diff --git a/.buildkite/pipelines/intake.template.yml b/.buildkite/pipelines/intake.template.yml index 32b0a12f06a0e..66b989d94455c 100644 --- a/.buildkite/pipelines/intake.template.yml +++ b/.buildkite/pipelines/intake.template.yml @@ -32,6 +32,14 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n1-standard-32 buildDirectory: /dev/shm/bk + - label: part4 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart4 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n1-standard-32 + buildDirectory: /dev/shm/bk - group: bwc-snapshots steps: - label: "{{matrix.BWC_VERSION}} / bwc-snapshots" diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index 7a50745a933ae..49c2d34df7e31 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -33,6 +33,14 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n1-standard-32 buildDirectory: /dev/shm/bk + - label: part4 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart4 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n1-standard-32 + buildDirectory: /dev/shm/bk - group: bwc-snapshots steps: - label: "{{matrix.BWC_VERSION}} / bwc-snapshots" @@ -40,7 +48,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["7.17.15", "8.10.5", "8.11.0", "8.12.0"] + BWC_VERSION: ["7.17.16", "8.11.2", "8.12.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.template.yml b/.buildkite/pipelines/periodic-packaging.template.yml index 1c626ffc53bfe..a5fe3a6c9e3d0 100644 --- a/.buildkite/pipelines/periodic-packaging.template.yml +++ b/.buildkite/pipelines/periodic-packaging.template.yml @@ -19,6 +19,7 @@ steps: - ubuntu-2004 - ubuntu-2204 - rocky-8 + - rocky-9 - rhel-7 - rhel-8 - rhel-9 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index dcf220c32ce9a..fab90c8ed6d17 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -20,6 +20,7 @@ steps: - ubuntu-2004 - ubuntu-2204 - rocky-8 + - rocky-9 - rhel-7 - rhel-8 - rhel-9 @@ -1072,6 +1073,22 @@ steps: env: BWC_VERSION: 7.17.15 + - label: "{{matrix.image}} / 7.17.16 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.16 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.16 + - label: "{{matrix.image}} / 8.0.0 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.0.0 timeout_in_minutes: 300 @@ -1680,8 +1697,24 @@ steps: env: BWC_VERSION: 8.10.4 - - label: "{{matrix.image}} / 8.10.5 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.10.5 + - label: "{{matrix.image}} / 8.11.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.11.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.11.0 + + - label: "{{matrix.image}} / 8.11.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.11.1 timeout_in_minutes: 300 matrix: setup: @@ -1694,10 +1727,10 @@ steps: machineType: custom-16-32768 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.10.5 + BWC_VERSION: 8.11.1 - - label: "{{matrix.image}} / 8.11.0 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.11.0 + - label: "{{matrix.image}} / 8.11.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.11.2 timeout_in_minutes: 300 matrix: setup: @@ -1710,7 +1743,7 @@ steps: machineType: custom-16-32768 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.11.0 + BWC_VERSION: 8.11.2 - label: "{{matrix.image}} / 8.12.0 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.12.0 diff --git a/.buildkite/pipelines/periodic-platform-support.yml b/.buildkite/pipelines/periodic-platform-support.yml index 8e43bd4ad5a12..b52f8506885c9 100644 --- a/.buildkite/pipelines/periodic-platform-support.yml +++ b/.buildkite/pipelines/periodic-platform-support.yml @@ -19,6 +19,7 @@ steps: - ubuntu-2004 - ubuntu-2204 - rocky-8 + - rocky-9 - rhel-7 - rhel-8 - rhel-9 diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 174a8a3b8c3ec..213bbff8e029c 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -652,6 +652,16 @@ steps: buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.15 + - label: 7.17.16 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.16#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.16 - label: 8.0.0 / bwc command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.0.0#bwcTest timeout_in_minutes: 300 @@ -1032,8 +1042,8 @@ steps: buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.10.4 - - label: 8.10.5 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.10.5#bwcTest + - label: 8.11.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.11.0#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -1041,9 +1051,9 @@ steps: machineType: custom-32-98304 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.10.5 - - label: 8.11.0 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.11.0#bwcTest + BWC_VERSION: 8.11.0 + - label: 8.11.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.11.1#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -1051,7 +1061,17 @@ steps: machineType: custom-32-98304 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.11.0 + BWC_VERSION: 8.11.1 + - label: 8.11.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.11.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.11.2 - label: 8.12.0 / bwc command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.12.0#bwcTest timeout_in_minutes: 300 diff --git a/.buildkite/pipelines/pull-request/packaging-tests-unix.yml b/.buildkite/pipelines/pull-request/packaging-tests-unix.yml index ec2e29b284781..d5c937aa4b5a2 100644 --- a/.buildkite/pipelines/pull-request/packaging-tests-unix.yml +++ b/.buildkite/pipelines/pull-request/packaging-tests-unix.yml @@ -3,9 +3,9 @@ config: steps: - group: packaging-tests-unix steps: - - label: "{{matrix.image}} / {{matrix.PACKAGING_TASK}} / packaging-tests-unix" - key: "packaging-tests-unix" - command: ./.ci/scripts/packaging-test.sh $$PACKAGING_TASK + - label: "{{matrix.image}} / docker / packaging-tests-unix" + key: "packaging-tests-unix-docker" + command: ./.ci/scripts/packaging-test.sh destructiveDistroTest.docker timeout_in_minutes: 300 matrix: setup: @@ -22,18 +22,71 @@ steps: - ubuntu-2004 - ubuntu-2204 - rocky-8 + - rocky-9 + - rhel-7 + - rhel-8 + - rhel-9 + - almalinux-8 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + diskSizeGb: 350 + machineType: custom-16-32768 + - label: "{{matrix.image}} / packages / packaging-tests-unix" + key: "packaging-tests-unix-packages" + command: ./.ci/scripts/packaging-test.sh destructiveDistroTest.packages + timeout_in_minutes: 300 + matrix: + setup: + image: + - centos-7 + - debian-10 + - debian-11 + - opensuse-leap-15 + - oraclelinux-7 + - oraclelinux-8 + - sles-12 + - sles-15 + - ubuntu-1804 + - ubuntu-2004 + - ubuntu-2204 + - rocky-8 + - rocky-9 + - rhel-7 + - rhel-8 + - rhel-9 + - almalinux-8 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + diskSizeGb: 350 + machineType: custom-16-32768 + - label: "{{matrix.image}} / archives / packaging-tests-unix" + key: "packaging-tests-unix-archives" + command: ./.ci/scripts/packaging-test.sh destructiveDistroTest.archives + timeout_in_minutes: 300 + matrix: + setup: + image: + - centos-7 + - debian-10 + - debian-11 + - opensuse-leap-15 + - oraclelinux-7 + - oraclelinux-8 + - sles-12 + - sles-15 + - ubuntu-1804 + - ubuntu-2004 + - ubuntu-2204 + - rocky-8 + - rocky-9 - rhel-7 - rhel-8 - rhel-9 - almalinux-8 - PACKAGING_TASK: - - destructiveDistroTest.docker - - destructiveDistroTest.packages - - destructiveDistroTest.archives agents: provider: gcp image: family/elasticsearch-{{matrix.image}} diskSizeGb: 350 machineType: custom-16-32768 - env: - PACKAGING_TASK: "{{matrix.PACKAGING_TASK}}" diff --git a/.buildkite/pipelines/pull-request/part-4-fips.yml b/.buildkite/pipelines/pull-request/part-4-fips.yml new file mode 100644 index 0000000000000..11a50456ca4c0 --- /dev/null +++ b/.buildkite/pipelines/pull-request/part-4-fips.yml @@ -0,0 +1,11 @@ +config: + allow-labels: "Team:Security" +steps: + - label: part-4-fips + command: .ci/scripts/run-gradle.sh -Dignore.tests.seed -Dtests.fips.enabled=true checkPart4 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk diff --git a/.buildkite/pipelines/pull-request/part-4-windows.yml b/.buildkite/pipelines/pull-request/part-4-windows.yml new file mode 100644 index 0000000000000..0493e8af0cf8f --- /dev/null +++ b/.buildkite/pipelines/pull-request/part-4-windows.yml @@ -0,0 +1,14 @@ +config: + allow-labels: "test-windows" +steps: + - label: part-4-windows + command: .\.buildkite\scripts\run-script.ps1 bash .buildkite/scripts/windows-run-gradle.sh + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-windows-2022 + machineType: custom-32-98304 + diskType: pd-ssd + diskSizeGb: 350 + env: + GRADLE_TASK: checkPart4 diff --git a/.buildkite/pipelines/pull-request/part-4.yml b/.buildkite/pipelines/pull-request/part-4.yml new file mode 100644 index 0000000000000..af11f08953d07 --- /dev/null +++ b/.buildkite/pipelines/pull-request/part-4.yml @@ -0,0 +1,11 @@ +config: + skip-target-branches: "7.17" +steps: + - label: part-4 + command: .ci/scripts/run-gradle.sh -Dignore.tests.seed checkPart4 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk diff --git a/.buildkite/pull-requests.json b/.buildkite/pull-requests.json index 456fce6aba519..b59bdc79ad293 100644 --- a/.buildkite/pull-requests.json +++ b/.buildkite/pull-requests.json @@ -12,9 +12,6 @@ "build_on_commit": true, "build_on_comment": true, "trigger_comment_regex": "run\\W+elasticsearch-ci.+", - "labels": [ - "buildkite-opt-in" - ], "cancel_intermediate_builds": true, "cancel_intermediate_builds_on_comment": false }, diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 988e7d1e0b453..581ec2f1565b6 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -64,6 +64,7 @@ BWC_VERSION: - "7.17.13" - "7.17.14" - "7.17.15" + - "7.17.16" - "8.0.0" - "8.0.1" - "8.1.0" @@ -102,6 +103,7 @@ BWC_VERSION: - "8.10.2" - "8.10.3" - "8.10.4" - - "8.10.5" - "8.11.0" + - "8.11.1" + - "8.11.2" - "8.12.0" diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+build-benchmark-part1.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+build-benchmark-part1.yml index a3f1345a07f13..173c8dbf805c0 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+build-benchmark-part1.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+build-benchmark-part1.yml @@ -23,7 +23,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/build-bench.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/build-bench.*' github-hooks: true status-context: elasticsearch-ci/build-benchmark-part1 cancel-builds-on-update: true @@ -32,21 +33,17 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - white-list-labels: - - 'build-benchmark' - black-list-labels: - - 'buildkite-opt-in' builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA JAVA8_HOME=$HOME/.java/java8 JAVA11_HOME=$HOME/.java/java11 - shell: | - #!/usr/local/bin/runbld --redirect-stderr - $WORKSPACE/.ci/scripts/run-gradle.sh :build-tools-internal:bootstrapPerformanceTests - $WORKSPACE/.ci/scripts/install-gradle-profiler.sh - $WORKSPACE/.ci/scripts/run-gradle-profiler.sh --benchmark --scenario-file build-tools-internal/build/performanceTests/elasticsearch-build-benchmark-part1.scenarios --project-dir . --output-dir profile-out - mkdir $WORKSPACE/build - tar -czf $WORKSPACE/build/${BUILD_NUMBER}.tar.bz2 profile-out + #!/usr/local/bin/runbld --redirect-stderr + $WORKSPACE/.ci/scripts/run-gradle.sh :build-tools-internal:bootstrapPerformanceTests + $WORKSPACE/.ci/scripts/install-gradle-profiler.sh + $WORKSPACE/.ci/scripts/run-gradle-profiler.sh --benchmark --scenario-file build-tools-internal/build/performanceTests/elasticsearch-build-benchmark-part1.scenarios --project-dir . --output-dir profile-out + mkdir $WORKSPACE/build + tar -czf $WORKSPACE/build/${BUILD_NUMBER}.tar.bz2 profile-out diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+build-benchmark-part2.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+build-benchmark-part2.yml index f1b11ab1ec75a..5f25c9153040e 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+build-benchmark-part2.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+build-benchmark-part2.yml @@ -23,7 +23,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/build-bench.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/build-bench.*' github-hooks: true status-context: elasticsearch-ci/build-benchmark-part2 cancel-builds-on-update: true @@ -32,21 +33,17 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - white-list-labels: - - 'build-benchmark' - black-list-labels: - - 'buildkite-opt-in' builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA JAVA8_HOME=$HOME/.java/java8 JAVA11_HOME=$HOME/.java/java11 - shell: | - #!/usr/local/bin/runbld --redirect-stderr - $WORKSPACE/.ci/scripts/run-gradle.sh :build-tools-internal:bootstrapPerformanceTests - $WORKSPACE/.ci/scripts/install-gradle-profiler.sh - $WORKSPACE/.ci/scripts/run-gradle-profiler.sh --benchmark --scenario-file build-tools-internal/build/performanceTests/elasticsearch-build-benchmark-part2.scenarios --project-dir . --output-dir profile-out - mkdir $WORKSPACE/build - tar -czf $WORKSPACE/build/${BUILD_NUMBER}.tar.bz2 profile-out + #!/usr/local/bin/runbld --redirect-stderr + $WORKSPACE/.ci/scripts/run-gradle.sh :build-tools-internal:bootstrapPerformanceTests + $WORKSPACE/.ci/scripts/install-gradle-profiler.sh + $WORKSPACE/.ci/scripts/run-gradle-profiler.sh --benchmark --scenario-file build-tools-internal/build/performanceTests/elasticsearch-build-benchmark-part2.scenarios --project-dir . --output-dir profile-out + mkdir $WORKSPACE/build + tar -czf $WORKSPACE/build/${BUILD_NUMBER}.tar.bz2 profile-out diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+bwc-snapshots-windows.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+bwc-snapshots-windows.yml index c0ed9bf998159..1a0652204b2f2 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+bwc-snapshots-windows.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+bwc-snapshots-windows.yml @@ -16,7 +16,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/bwc-snapshots-windows.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/bwc-snapshots-windows.*' github-hooks: true status-context: elasticsearch-ci/bwc-snapshots-windows cancel-builds-on-update: true @@ -25,11 +26,6 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - white-list-labels: - - 'test-windows' - black-list-labels: - - '>test-mute' - - 'buildkite-opt-in' axes: - axis: type: slave @@ -42,7 +38,7 @@ name: "BWC_VERSION" builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$USERPROFILE\\.java\\$ES_BUILD_JAVA JAVA11_HOME=$USERPROFILE\\.java\\java11 diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+bwc-snapshots.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+bwc-snapshots.yml index 676f5f6f629b7..9a20115a72f1c 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+bwc-snapshots.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+bwc-snapshots.yml @@ -16,17 +16,14 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/bwc.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/bwc.*' github-hooks: true status-context: elasticsearch-ci/bwc cancel-builds-on-update: true excluded-regions: - ^docs/.* - ^x-pack/docs/.* - black-list-labels: - - '>test-mute' - - 'test-full-bwc' - - 'buildkite-opt-in' axes: - axis: type: slave @@ -39,7 +36,7 @@ name: "BWC_VERSION" builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA JAVA8_HOME=$HOME/.java/java8 diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+cloud-deploy.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+cloud-deploy.yml index 24548954d8a10..a6f42c147dbeb 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+cloud-deploy.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+cloud-deploy.yml @@ -15,7 +15,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/cloud-deploy.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/cloud-deploy.*' github-hooks: true status-context: elasticsearch-ci/cloud-deploy cancel-builds-on-update: true @@ -24,13 +25,9 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - white-list-labels: - - 'cloud-deploy' - black-list-labels: - - 'buildkite-opt-in' builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA - shell: | diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+docs-check.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+docs-check.yml index c766b4379a1f6..58b273de2beb9 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+docs-check.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+docs-check.yml @@ -14,19 +14,17 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/docs-check.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/docs-check.*' github-hooks: true status-context: elasticsearch-ci/docs-check cancel-builds-on-update: true included-regions: - ^docs/.* - ^x-pack/docs/.* - black-list-labels: - - '>test-mute' - - 'buildkite-opt-in' builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA JAVA8_HOME=$HOME/.java/java8 diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+eql-correctness.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+eql-correctness.yml index 0b9eea62ad9bf..c1789e3b8595a 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+eql-correctness.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+eql-correctness.yml @@ -14,7 +14,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/eql-correctness.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/eql-correctness.*' github-hooks: true status-context: elasticsearch-ci/eql-correctness cancel-builds-on-update: true @@ -23,12 +24,9 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - black-list-labels: - - '>test-mute' - - 'buildkite-opt-in' builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA - shell: | diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+example-plugins.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+example-plugins.yml index 320a9c6176d5f..339fcd17ec77c 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+example-plugins.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+example-plugins.yml @@ -14,7 +14,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/example-plugins.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/example-plugins.*' github-hooks: true status-context: elasticsearch-ci/example-plugins cancel-builds-on-update: true @@ -23,11 +24,9 @@ - build-tools/.* - build-tools-internal/.* - plugins/examples/.* - black-list-labels: - - 'buildkite-opt-in' builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA JAVA8_HOME=$HOME/.java/java8 diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+full-bwc.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+full-bwc.yml index 2a7920e4bae89..4bb38a810e8f1 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+full-bwc.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+full-bwc.yml @@ -16,18 +16,14 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/full-bwc.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/full-bwc.*' github-hooks: true status-context: elasticsearch-ci/full-bwc cancel-builds-on-update: true excluded-regions: - ^docs/.* - ^x-pack/docs/.* - white-list-labels: - - 'test-full-bwc' - black-list-labels: - - '>test-mute' - - 'buildkite-opt-in' axes: - axis: type: slave @@ -40,7 +36,7 @@ name: "BWC_VERSION" builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA JAVA8_HOME=$HOME/.java/java8 diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix-sample.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix-sample.yml index 2d4f372142512..23d94e665f8a3 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix-sample.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix-sample.yml @@ -15,7 +15,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/packaging-tests-unix-sample.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/packaging-tests-unix-sample.*' github-hooks: true status-context: elasticsearch-ci/packaging-tests-unix-sample cancel-builds-on-update: true @@ -24,10 +25,6 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - black-list-labels: - - ">test-mute" - - ":Delivery/Packaging" - - "buildkite-opt-in" axes: - axis: type: label-expression diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix.yml index af1d3f493eeb0..901f7bcac3caa 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix.yml @@ -15,7 +15,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/packaging-tests-unix.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/packaging-tests-unix.*' github-hooks: true status-context: elasticsearch-ci/packaging-tests-unix cancel-builds-on-update: true @@ -24,11 +25,6 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - white-list-labels: - - ":Delivery/Packaging" - black-list-labels: - - ">test-mute" - - "buildkite-opt-in" axes: - axis: type: label-expression diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-nojdk.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-nojdk.yml index ea4097b1a0b93..c39326380fdaf 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-nojdk.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-nojdk.yml @@ -17,7 +17,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/packaging-tests-windows.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/packaging-tests-windows.*' github-hooks: true status-context: elasticsearch-ci/packaging-tests-windows cancel-builds-on-update: true @@ -28,11 +29,6 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - white-list-labels: - - ':Delivery/Packaging' - black-list-labels: - - '>test-mute' - - 'buildkite-opt-in' axes: - axis: type: label-expression @@ -46,11 +42,11 @@ type: user-defined name: PACKAGING_TASK values: - - 'default-windows-archive' - - 'default-windows-archive-no-jdk' + - "default-windows-archive" + - "default-windows-archive-no-jdk" builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$USERPROFILE\\.java\\$ES_BUILD_JAVA - batch: | diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample-nojdk.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample-nojdk.yml index ec644445ef8de..35705f7e759b1 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample-nojdk.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample-nojdk.yml @@ -17,7 +17,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/packaging-tests-windows-sample.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/packaging-tests-windows-sample.*' github-hooks: true status-context: elasticsearch-ci/packaging-tests-windows-sample cancel-builds-on-update: true @@ -28,10 +29,6 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - black-list-labels: - - '>test-mute' - - ':Delivery/Packaging' - - 'buildkite-opt-in' axes: - axis: type: label-expression @@ -42,11 +39,11 @@ type: user-defined name: PACKAGING_TASK values: - - 'default-windows-archive' - - 'default-windows-archive-no-jdk' + - "default-windows-archive" + - "default-windows-archive-no-jdk" builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$USERPROFILE\\.java\\$ES_BUILD_JAVA - batch: | diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample.yml index 242e137cb1d83..8a4eff2d30822 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample.yml @@ -17,7 +17,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/packaging-tests-windows-sample.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/packaging-tests-windows-sample.*' github-hooks: true status-context: elasticsearch-ci/packaging-tests-windows-sample cancel-builds-on-update: true @@ -27,10 +28,6 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - black-list-labels: - - '>test-mute' - - ':Delivery/Packaging' - - 'buildkite-opt-in' axes: - axis: type: label-expression @@ -41,10 +38,10 @@ type: user-defined name: PACKAGING_TASK values: - - 'default-windows-archive' + - "default-windows-archive" builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$USERPROFILE\\.java\\$ES_BUILD_JAVA - batch: | diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows.yml index a2ffc7b4050ec..d109477620386 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows.yml @@ -17,7 +17,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/packaging-tests-windows.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/packaging-tests-windows.*' github-hooks: true status-context: elasticsearch-ci/packaging-tests-windows cancel-builds-on-update: true @@ -28,11 +29,6 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - white-list-labels: - - ':Delivery/Packaging' - black-list-labels: - - '>test-mute' - - 'buildkite-opt-in' axes: - axis: type: label-expression @@ -46,10 +42,10 @@ type: user-defined name: PACKAGING_TASK values: - - 'default-windows-archive' + - "default-windows-archive" builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$USERPROFILE\\.java\\$ES_BUILD_JAVA - batch: | diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-upgrade-tests.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-upgrade-tests.yml index 19ed5398e3e1d..0cc14224375fb 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-upgrade-tests.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-upgrade-tests.yml @@ -16,7 +16,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/packaging-upgrade-tests.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/packaging-upgrade-tests.*' github-hooks: true status-context: elasticsearch-ci/packaging-upgrade-tests cancel-builds-on-update: true @@ -25,11 +26,6 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - white-list-labels: - - ":Delivery/Packaging" - black-list-labels: - - ">test-mute" - - "buildkite-opt-in" axes: - axis: type: label-expression diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-fips.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-fips.yml index a661230d3b93b..aaeeed2f0d52b 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-fips.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-fips.yml @@ -14,7 +14,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/part-1-fips.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/part-1-fips.*' github-hooks: true status-context: elasticsearch-ci/part-1-fips cancel-builds-on-update: true @@ -23,15 +24,10 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - white-list-labels: - - 'Team:Security' - black-list-labels: - - '>test-mute' - - 'buildkite-opt-in' builders: - inject: # Use FIPS-specific Java versions - properties-file: '.ci/java-versions-fips.properties' + properties-file: ".ci/java-versions-fips.properties" properties-content: | JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA JAVA16_HOME=$HOME/.java/openjdk16 diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-windows.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-windows.yml index d7afdd0ac3277..8b348f94026e0 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-windows.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-windows.yml @@ -15,7 +15,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/part-1-windows.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/part-1-windows.*' github-hooks: true status-context: elasticsearch-ci/part-1-windows cancel-builds-on-update: true @@ -24,14 +25,9 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - white-list-labels: - - 'test-windows' - black-list-labels: - - '>test-mute' - - 'buildkite-opt-in' builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$USERPROFILE\\.java\\$ES_BUILD_JAVA JAVA11_HOME=$USERPROFILE\\.java\\java11 diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-fips.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-fips.yml index 913820709dabc..11d168d7567d9 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-fips.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-fips.yml @@ -14,7 +14,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/part-2-fips.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/part-2-fips.*' github-hooks: true status-context: elasticsearch-ci/part-2-fips cancel-builds-on-update: true @@ -23,15 +24,10 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - white-list-labels: - - 'Team:Security' - black-list-labels: - - '>test-mute' - - 'buildkite-opt-in' builders: - inject: # Use FIPS-specific Java versions - properties-file: '.ci/java-versions-fips.properties' + properties-file: ".ci/java-versions-fips.properties" properties-content: | JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA JAVA16_HOME=$HOME/.java/openjdk16 diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-windows.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-windows.yml index ae590872be16e..927117cc3bced 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-windows.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-windows.yml @@ -15,7 +15,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/part-2-windows.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/part-2-windows.*' github-hooks: true status-context: elasticsearch-ci/part-2-windows cancel-builds-on-update: true @@ -24,14 +25,9 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - white-list-labels: - - 'test-windows' - black-list-labels: - - '>test-mute' - - 'buildkite-opt-in' builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$USERPROFILE\\.java\\$ES_BUILD_JAVA JAVA11_HOME=$USERPROFILE\\.java\\java11 diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3-fips.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3-fips.yml index 6bf6544d40310..3b7984ecbdc43 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3-fips.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3-fips.yml @@ -14,7 +14,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/part-3-fips.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/part-3-fips.*' github-hooks: true status-context: elasticsearch-ci/part-3-fips cancel-builds-on-update: true @@ -24,15 +25,10 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - white-list-labels: - - 'Team:Security' - black-list-labels: - - '>test-mute' - - 'buildkite-opt-in' builders: - inject: # Use FIPS-specific Java versions - properties-file: '.ci/java-versions-fips.properties' + properties-file: ".ci/java-versions-fips.properties" properties-content: | JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA JAVA16_HOME=$HOME/.java/openjdk16 diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3-windows.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3-windows.yml index 58bad17954b24..7e835b85015ba 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3-windows.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3-windows.yml @@ -15,7 +15,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/part-3-windows.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/part-3-windows.*' github-hooks: true status-context: elasticsearch-ci/part-3-windows cancel-builds-on-update: true @@ -25,14 +26,9 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - white-list-labels: - - 'test-windows' - black-list-labels: - - '>test-mute' - - 'buildkite-opt-in' builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$USERPROFILE\\.java\\$ES_BUILD_JAVA JAVA11_HOME=$USERPROFILE\\.java\\java11 diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3.yml index 0158b909903b4..e306657693f5f 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3.yml @@ -14,22 +14,20 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/part-3.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/part-3.*' github-hooks: true status-context: elasticsearch-ci/part-3 cancel-builds-on-update: true excluded-regions: - ^docs/.* - ^x-pack/docs/.* - black-list-labels: - - '>test-mute' - - 'buildkite-opt-in' black-list-target-branches: - 6.8 - 7.17 builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA JAVA8_HOME=$HOME/.java/java8 diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+precommit.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+precommit.yml index 1267b6a21778e..3994164fba0f3 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+precommit.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+precommit.yml @@ -14,17 +14,14 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/precommit.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/precommit.*' github-hooks: true status-context: elasticsearch-ci/precommit cancel-builds-on-update: true - white-list-labels: - - '>test-mute' - black-list-labels: - - 'buildkite-opt-in' builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA JAVA8_HOME=$HOME/.java/java8 diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+release-tests.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+release-tests.yml index 1ab6bd1ce0e5d..a86496d7199f5 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+release-tests.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+release-tests.yml @@ -16,23 +16,20 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/release-tests.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/release-tests.*' github-hooks: true status-context: elasticsearch-ci/release-tests cancel-builds-on-update: true excluded-regions: - ^docs/.* - ^x-pack/docs/.* - white-list-labels: - - 'test-release' - black-list-labels: - - 'buildkite-opt-in' black-list-target-branches: - 7.15 - 6.8 builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA JAVA8_HOME=$HOME/.java/java8 diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+rest-compatibility.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+rest-compatibility.yml index 216f8ceae2078..0ed86851c7f33 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+rest-compatibility.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+rest-compatibility.yml @@ -14,7 +14,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/rest-compatibility.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/rest-compatibility.*' github-hooks: true status-context: elasticsearch-ci/rest-compatibility cancel-builds-on-update: true @@ -26,12 +27,9 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - black-list-labels: - - '>test-mute' - - 'buildkite-opt-in' builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA JAVA8_HOME=$HOME/.java/java8 diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index 4246f34222b21..7970d655f4014 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,5 +1,4 @@ BWC_VERSION: - - "7.17.15" - - "8.10.5" - - "8.11.0" + - "7.17.16" + - "8.11.2" - "8.12.0" diff --git a/.ci/templates.t/pull-request-gradle-unix.yml b/.ci/templates.t/pull-request-gradle-unix.yml index c09e64c56f32d..7c0711a4e3a97 100644 --- a/.ci/templates.t/pull-request-gradle-unix.yml +++ b/.ci/templates.t/pull-request-gradle-unix.yml @@ -14,19 +14,17 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/{pr-job}.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/{pr-job}.*' github-hooks: true status-context: elasticsearch-ci/{pr-job} cancel-builds-on-update: true excluded-regions: - ^docs/.* - ^x-pack/docs/.* - black-list-labels: - - '>test-mute' - - 'buildkite-opt-in' builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA JAVA8_HOME=$HOME/.java/java8 diff --git a/.idea/inspectionProfiles/Project_Default.xml b/.idea/inspectionProfiles/Project_Default.xml index ae2c8d2be1deb..3efd2cce181d4 100644 --- a/.idea/inspectionProfiles/Project_Default.xml +++ b/.idea/inspectionProfiles/Project_Default.xml @@ -2,12 +2,13 @@ diff --git a/.idea/scopes/Production_minus_fixtures.xml b/.idea/scopes/Production_minus_fixtures.xml new file mode 100644 index 0000000000000..07510326481b4 --- /dev/null +++ b/.idea/scopes/Production_minus_fixtures.xml @@ -0,0 +1,3 @@ + + + diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4f9f432bca467..cb674221913de 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -675,12 +675,28 @@ number, there are a few rules that need to be followed: once merged into `main`. 2. To create a new component version, add a new constant to the respective class with a descriptive name of the change being made. Increment the integer - number according to the partciular `*Version` class. + number according to the particular `*Version` class. If your pull request has a conflict around your new version constant, you need to update your PR from `main` and change your PR to use the next available version number. +### Checking for cluster features + +As part of developing a new feature or change, you might need to determine +if all nodes in a cluster have been upgraded to support your new feature. +This can be done using `FeatureService`. To define and check for a new +feature in a cluster: + +1. Define a new `NodeFeature` constant with a unique id for the feature + in a class related to the change you're doing. +2. Return that constant from an instance of `FeatureSpecification.getFeatures`, + either an existing implementation or a new implementation. Make sure + the implementation is added as a SPI implementation in `module-info.java` + and `META-INF/services`. +3. To check if all nodes in the cluster support the new feature, call +`FeatureService.clusterHasFeature(ClusterState, NodeFeature)` + ### Creating A Distribution Run all build commands from within the root directory: diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle index 7cfa23e69ff96..3519434e07d42 100644 --- a/benchmarks/build.gradle +++ b/benchmarks/build.gradle @@ -8,7 +8,8 @@ import org.elasticsearch.gradle.internal.info.BuildParams * Side Public License, v 1. */ -apply plugin: 'elasticsearch.java' +apply plugin: org.elasticsearch.gradle.internal.ElasticsearchJavaBasePlugin +apply plugin: 'java-library' apply plugin: 'application' application { diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/ShardsAvailabilityHealthIndicatorBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/ShardsAvailabilityHealthIndicatorBenchmark.java index 30b37c018af01..ef834fad424e3 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/ShardsAvailabilityHealthIndicatorBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/ShardsAvailabilityHealthIndicatorBenchmark.java @@ -21,7 +21,7 @@ import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.DataTier; -import org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService; +import org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/AggConstructionContentionBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/AggConstructionContentionBenchmark.java index 1dafdbb9be2b9..5b139f800cb39 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/AggConstructionContentionBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/AggConstructionContentionBenchmark.java @@ -224,11 +224,6 @@ public Set getMatchingFieldNames(String pattern) { throw new UnsupportedOperationException(); } - @Override - public boolean isFieldMapped(String field) { - return field.startsWith("int"); - } - @Override public FactoryType compile(Script script, ScriptContext context) { throw new UnsupportedOperationException(); diff --git a/branches.json b/branches.json index c63328fb3ee22..c76417a198c57 100644 --- a/branches.json +++ b/branches.json @@ -7,9 +7,6 @@ { "branch": "8.11" }, - { - "branch": "8.10" - }, { "branch": "7.17" } diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/BuildPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/BuildPluginFuncTest.groovy index e31594ad2e4a6..96e342e995a36 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/BuildPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/BuildPluginFuncTest.groovy @@ -31,7 +31,7 @@ class BuildPluginFuncTest extends AbstractGradleFuncTest { Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - + 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright @@ -39,7 +39,7 @@ class BuildPluginFuncTest extends AbstractGradleFuncTest { documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. - + THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. @@ -58,11 +58,11 @@ class BuildPluginFuncTest extends AbstractGradleFuncTest { id 'java' id 'elasticsearch.global-build-info' } - + apply plugin:'elasticsearch.build' group = 'org.acme' description = "some example project" - + repositories { maven { name = "local-test" @@ -73,7 +73,7 @@ class BuildPluginFuncTest extends AbstractGradleFuncTest { } mavenCentral() } - + dependencies { jarHell 'org.elasticsearch:elasticsearch-core:current' } @@ -89,7 +89,7 @@ class BuildPluginFuncTest extends AbstractGradleFuncTest { * Side Public License, v 1. */ package org.elasticsearch; - + public class SampleClass { } """.stripIndent() @@ -117,7 +117,7 @@ class BuildPluginFuncTest extends AbstractGradleFuncTest { noticeFile.set(file("NOTICE")) """ when: - def result = gradleRunner("assemble").build() + def result = gradleRunner("assemble", "-x", "generateHistoricalFeaturesMetadata").build() then: result.task(":assemble").outcome == TaskOutcome.SUCCESS file("build/distributions/hello-world.jar").exists() @@ -146,7 +146,7 @@ class BuildPluginFuncTest extends AbstractGradleFuncTest { } licenseFile.set(file("LICENSE")) noticeFile.set(file("NOTICE")) - + tasks.named("forbiddenApisMain").configure {enabled = false } tasks.named('checkstyleMain').configure { enabled = false } tasks.named('loggerUsageCheck').configure { enabled = false } diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/PublishPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/PublishPluginFuncTest.groovy index e17f9c7537777..9d32eaadf7aec 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/PublishPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/PublishPluginFuncTest.groovy @@ -29,7 +29,7 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { id 'elasticsearch.java' id 'elasticsearch.publish' } - + version = "1.0" group = 'org.acme' description = "custom project description" @@ -92,11 +92,11 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { id 'elasticsearch.publish' id 'com.github.johnrengelman.shadow' } - + repositories { mavenCentral() } - + dependencies { implementation 'org.slf4j:log4j-over-slf4j:1.7.30' shadow 'org.slf4j:slf4j-api:1.7.30' @@ -110,8 +110,8 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { } } version = "1.0" - group = 'org.acme' - description = 'some description' + group = 'org.acme' + description = 'some description' """ when: @@ -179,7 +179,7 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { } dependencies { - shadow project(":someLib") + shadow project(":someLib") } publishing { repositories { @@ -192,10 +192,10 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { allprojects { apply plugin: 'elasticsearch.java' version = "1.0" - group = 'org.acme' + group = 'org.acme' } - description = 'some description' + description = 'some description' """ when: @@ -263,13 +263,13 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { id 'elasticsearch.publish' id 'com.github.johnrengelman.shadow' } - + esplugin { name = 'hello-world-plugin' classname 'org.acme.HelloWorldPlugin' description = "custom project description" } - + publishing { repositories { maven { @@ -277,17 +277,17 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { } } } - + // requires elasticsearch artifact available tasks.named('bundlePlugin').configure { enabled = false } licenseFile.set(file('license.txt')) noticeFile.set(file('notice.txt')) version = "1.0" - group = 'org.acme' + group = 'org.acme' """ when: - def result = gradleRunner('assemble', '--stacktrace').build() + def result = gradleRunner('assemble', '--stacktrace', '-x', 'generateHistoricalFeaturesMetadata').build() then: result.task(":generatePom").outcome == TaskOutcome.SUCCESS @@ -348,19 +348,19 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { id 'elasticsearch.internal-es-plugin' id 'elasticsearch.publish' } - + esplugin { name = 'hello-world-plugin' classname 'org.acme.HelloWorldPlugin' description = "custom project description" } - + // requires elasticsearch artifact available tasks.named('bundlePlugin').configure { enabled = false } licenseFile.set(file('license.txt')) noticeFile.set(file('notice.txt')) version = "2.0" - group = 'org.acme' + group = 'org.acme' """ when: @@ -420,9 +420,9 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { apply plugin:'elasticsearch.publish' version = "1.0" - group = 'org.acme' + group = 'org.acme' description = "just a test project" - + ext.projectLicenses.set(['The Apache Software License, Version 2.0': 'http://www.apache.org/licenses/LICENSE-2.0']) """ diff --git a/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle b/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle index 0f56dd2ef8992..2bb00faae38be 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle @@ -142,18 +142,17 @@ buildScan { // Add a build annotation // See: https://buildkite.com/docs/agent/v3/cli-annotate def body = """
${System.getenv('BUILDKITE_LABEL')} :gradle: ${result.failure ? 'failed' : 'successful'} build: gradle ${gradle.startParameter.taskNames.join(' ')}
""" - new ProcessBuilder( + def process = [ 'buildkite-agent', 'annotate', '--context', result.failure ? 'gradle-build-scans-failed' : 'gradle-build-scans', '--append', '--style', - result.failure ? 'error' : 'info', - body - ) - .start() - .waitFor() + result.failure ? 'error' : 'info' + ].execute() + process.withWriter { it.write(body) } // passing the body in as an argument has issues on Windows, so let's use stdin of the process instead + process.waitFor() } } } else { diff --git a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/DocsTestPlugin.groovy b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/DocsTestPlugin.groovy index 874141f2135ad..38b4cb499eeb9 100644 --- a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/DocsTestPlugin.groovy +++ b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/DocsTestPlugin.groovy @@ -12,6 +12,7 @@ import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.internal.test.rest.CopyRestApiTask import org.elasticsearch.gradle.internal.test.rest.CopyRestTestsTask +import org.gradle.api.Action import org.gradle.api.Plugin import org.gradle.api.Project import org.gradle.api.file.Directory @@ -61,16 +62,24 @@ class DocsTestPlugin implements Plugin { group 'Docs' description 'List each snippet' defaultSubstitutions = commonDefaultSubstitutions - perSnippet { println(it.toString()) } + perSnippet = new Action() { + @Override + void execute(SnippetsTask.Snippet snippet) { + println(snippet.toString()) + } + } } project.tasks.register('listConsoleCandidates', SnippetsTask) { group 'Docs' description 'List snippets that probably should be marked // CONSOLE' defaultSubstitutions = commonDefaultSubstitutions - perSnippet { - if (RestTestsFromSnippetsTask.isConsoleCandidate(it)) { - println(it.toString()) + perSnippet = new Action() { + @Override + void execute(SnippetsTask.Snippet snippet) { + if (RestTestsFromSnippetsTask.isConsoleCandidate(it)) { + println(it.toString()) + } } } } @@ -80,7 +89,7 @@ class DocsTestPlugin implements Plugin { defaultSubstitutions = commonDefaultSubstitutions testRoot.convention(restRootDir) doFirst { - fileOperations.delete(restRootDir) + getFileOperations().delete(testRoot.get()) } } diff --git a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/RestTestsFromSnippetsTask.groovy b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/RestTestsFromSnippetsTask.groovy index eda86355ee306..81207181dc9a7 100644 --- a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/RestTestsFromSnippetsTask.groovy +++ b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/RestTestsFromSnippetsTask.groovy @@ -10,8 +10,10 @@ package org.elasticsearch.gradle.internal.doc import groovy.transform.PackageScope import org.elasticsearch.gradle.internal.doc.SnippetsTask.Snippet +import org.gradle.api.Action import org.gradle.api.InvalidUserDataException import org.gradle.api.file.DirectoryProperty +import org.gradle.api.internal.file.FileOperations import org.gradle.api.tasks.Input import org.gradle.api.tasks.Internal import org.gradle.api.tasks.OutputDirectory @@ -24,7 +26,7 @@ import java.nio.file.Path /** * Generates REST tests for each snippet marked // TEST. */ -class RestTestsFromSnippetsTask extends SnippetsTask { +abstract class RestTestsFromSnippetsTask extends SnippetsTask { /** * These languages aren't supported by the syntax highlighter so we * shouldn't use them. @@ -64,13 +66,23 @@ class RestTestsFromSnippetsTask extends SnippetsTask { @Internal Set names = new HashSet<>() + @Inject + abstract FileOperations getFileOperations(); + @Inject RestTestsFromSnippetsTask(ObjectFactory objectFactory) { testRoot = objectFactory.directoryProperty() TestBuilder builder = new TestBuilder() - perSnippet builder.&handleSnippet - doLast builder.&checkUnconverted - doLast builder.&finishLastTest + perSnippet = new Action() { + @Override + void execute(Snippet snippet) { + builder.handleSnippet(snippet) + } + } + doLast { + builder.checkUnconverted() + builder.finishLastTest() + } } /** @@ -190,6 +202,7 @@ class RestTestsFromSnippetsTask extends SnippetsTask { * Called each time a snippet is encountered. Tracks the snippets and * calls buildTest to actually build the test. */ + void handleSnippet(Snippet snippet) { if (RestTestsFromSnippetsTask.isConsoleCandidate(snippet)) { unconvertedCandidates.add(snippet.path.toString() diff --git a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/SnippetsTask.groovy b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/SnippetsTask.groovy index 1580ec891ed2b..3e4ad91024082 100644 --- a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/SnippetsTask.groovy +++ b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/SnippetsTask.groovy @@ -11,8 +11,9 @@ package org.elasticsearch.gradle.internal.doc import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.JsonParseException; -import com.fasterxml.jackson.core.JsonToken; +import com.fasterxml.jackson.core.JsonToken +import org.gradle.api.Action; import org.gradle.api.DefaultTask import org.gradle.api.InvalidUserDataException import org.gradle.api.file.ConfigurableFileTree @@ -44,7 +45,7 @@ class SnippetsTask extends DefaultTask { * instance of Snippet. */ @Internal - Closure perSnippet + Action perSnippet /** * The docs to scan. Defaults to every file in the directory exception the @@ -134,7 +135,7 @@ class SnippetsTask extends DefaultTask { + "After substitutions and munging, the json looks like:\n" + quoted, e); } } - perSnippet(snippet) + perSnippet.execute(snippet) snippet = null } file.eachLine('UTF-8') { String line, int lineNumber -> diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BaseInternalPluginBuildPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BaseInternalPluginBuildPlugin.java index f709600fc7979..70d130605c15e 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BaseInternalPluginBuildPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BaseInternalPluginBuildPlugin.java @@ -13,6 +13,7 @@ import org.elasticsearch.gradle.internal.conventions.util.Util; import org.elasticsearch.gradle.internal.info.BuildParams; import org.elasticsearch.gradle.internal.precommit.JarHellPrecommitPlugin; +import org.elasticsearch.gradle.internal.test.HistoricalFeaturesMetadataPlugin; import org.elasticsearch.gradle.plugin.PluginBuildPlugin; import org.elasticsearch.gradle.plugin.PluginPropertiesExtension; import org.elasticsearch.gradle.testclusters.ElasticsearchCluster; @@ -36,6 +37,7 @@ public void apply(Project project) { project.getPluginManager().apply(PluginBuildPlugin.class); project.getPluginManager().apply(JarHellPrecommitPlugin.class); project.getPluginManager().apply(ElasticsearchJavaPlugin.class); + project.getPluginManager().apply(HistoricalFeaturesMetadataPlugin.class); // Clear default dependencies added by public PluginBuildPlugin as we add our // own project dependencies for internal builds // TODO remove once we removed default dependencies from PluginBuildPlugin diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BuildPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BuildPlugin.java index 6849796579ad9..6c7bc6753531c 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BuildPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BuildPlugin.java @@ -11,6 +11,7 @@ import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.elasticsearch.gradle.internal.precommit.InternalPrecommitTasks; import org.elasticsearch.gradle.internal.snyk.SnykDependencyMonitoringGradlePlugin; +import org.elasticsearch.gradle.internal.test.HistoricalFeaturesMetadataPlugin; import org.gradle.api.InvalidUserDataException; import org.gradle.api.Plugin; import org.gradle.api.Project; @@ -61,6 +62,7 @@ public void apply(final Project project) { project.getPluginManager().apply(ElasticsearchJavadocPlugin.class); project.getPluginManager().apply(DependenciesInfoPlugin.class); project.getPluginManager().apply(SnykDependencyMonitoringGradlePlugin.class); + project.getPluginManager().apply(HistoricalFeaturesMetadataPlugin.class); InternalPrecommitTasks.create(project, true); configureLicenseAndNotice(project); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalBwcGitPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalBwcGitPlugin.java index d51770ffd30ed..71c76b2045007 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalBwcGitPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalBwcGitPlugin.java @@ -72,20 +72,19 @@ public void apply(Project project) { createClone.commandLine("git", "clone", buildLayout.getRootDirectory(), gitExtension.getCheckoutDir().get()); }); - ExtraPropertiesExtension extraProperties = project.getExtensions().getExtraProperties(); TaskProvider findRemoteTaskProvider = tasks.register("findRemote", LoggedExec.class, findRemote -> { findRemote.dependsOn(createCloneTaskProvider); findRemote.getWorkingDir().set(gitExtension.getCheckoutDir()); findRemote.commandLine("git", "remote", "-v"); findRemote.getCaptureOutput().set(true); - findRemote.doLast(t -> { extraProperties.set("remoteExists", isRemoteAvailable(remote, findRemote.getOutput())); }); + findRemote.doLast(t -> System.setProperty("remoteExists", String.valueOf(isRemoteAvailable(remote, findRemote.getOutput())))); }); TaskProvider addRemoteTaskProvider = tasks.register("addRemote", addRemote -> { String rootProjectName = project.getRootProject().getName(); addRemote.dependsOn(findRemoteTaskProvider); - addRemote.onlyIf("remote exists", task -> ((boolean) extraProperties.get("remoteExists")) == false); + addRemote.onlyIf("remote exists", task -> (Boolean.valueOf(providerFactory.systemProperty("remoteExists").get()) == false)); addRemote.doLast(new Action() { @Override public void execute(Task task) { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java index 2468711561ae4..f727dc165a8a9 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java @@ -286,11 +286,12 @@ static void createBuildBwcTask( if (project.getGradle().getStartParameter().isBuildCacheEnabled()) { c.getArgs().add("--build-cache"); } + File rootDir = project.getRootDir(); c.doLast(new Action() { @Override public void execute(Task task) { if (expectedOutputFile.exists() == false) { - Path relativeOutputPath = project.getRootDir().toPath().relativize(expectedOutputFile.toPath()); + Path relativeOutputPath = rootDir.toPath().relativize(expectedOutputFile.toPath()); final String message = "Building %s didn't generate expected artifact [%s]. The working branch may be " + "out-of-date - try merging in the latest upstream changes to the branch."; throw new InvalidUserDataException(message.formatted(bwcVersion.get(), relativeOutputPath)); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java index 16c7bf6d32862..f92789f701049 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java @@ -23,16 +23,17 @@ import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.elasticsearch.gradle.util.GradleUtils; import org.gradle.api.GradleException; -import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Plugin; import org.gradle.api.Project; import org.gradle.api.artifacts.Dependency; +import org.gradle.api.artifacts.dsl.DependencyHandler; import org.gradle.api.provider.Provider; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import java.util.function.Function; -import static org.elasticsearch.gradle.util.GradleUtils.projectDependency; - /** * An internal elasticsearch build plugin that registers additional * distribution resolution strategies to the 'elasticsearch.download-distribution' plugin @@ -64,18 +65,18 @@ public void apply(Project project) { *

* BWC versions are resolved as project to projects under `:distribution:bwc`. */ - private void registerInternalDistributionResolutions(NamedDomainObjectContainer resolutions) { - resolutions.register("localBuild", distributionResolution -> distributionResolution.setResolver((project, distribution) -> { + private void registerInternalDistributionResolutions(List resolutions) { + resolutions.add(new DistributionResolution("local-build", (project, distribution) -> { if (isCurrentVersion(distribution)) { // non-external project, so depend on local build return new ProjectBasedDistributionDependency( - config -> projectDependency(project, distributionProjectPath(distribution), config) + config -> projectDependency(project.getDependencies(), distributionProjectPath(distribution), config) ); } return null; })); - resolutions.register("bwc", distributionResolution -> distributionResolution.setResolver((project, distribution) -> { + resolutions.add(new DistributionResolution("bwc", (project, distribution) -> { BwcVersions.UnreleasedVersionInfo unreleasedInfo = BuildParams.getBwcVersions() .unreleasedInfo(Version.fromString(distribution.getVersion())); if (unreleasedInfo != null) { @@ -89,7 +90,7 @@ private void registerInternalDistributionResolutions(NamedDomainObjectContainer< } String projectConfig = getProjectConfig(distribution, unreleasedInfo); return new ProjectBasedDistributionDependency( - (config) -> projectDependency(project, unreleasedInfo.gradleProjectPath(), projectConfig) + (config) -> projectDependency(project.getDependencies(), unreleasedInfo.gradleProjectPath(), projectConfig) ); } return null; @@ -116,6 +117,13 @@ private static String getProjectConfig(ElasticsearchDistribution distribution, B } } + private static Dependency projectDependency(DependencyHandler dependencyHandler, String projectPath, String projectConfig) { + Map depConfig = new HashMap<>(); + depConfig.put("path", projectPath); + depConfig.put("configuration", projectConfig); + return dependencyHandler.project(depConfig); + } + private static String distributionProjectPath(ElasticsearchDistribution distribution) { String projectPath = ":distribution"; if (distribution.getType() == ElasticsearchDistributionTypes.INTEG_TEST_ZIP) { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/RestrictedBuildApiService.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/RestrictedBuildApiService.java index b32c566363e88..93753f7c7ac56 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/RestrictedBuildApiService.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/RestrictedBuildApiService.java @@ -143,6 +143,7 @@ private static ListMultimap, String> createLegacyRestTestBasePluginUsag map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:esql:qa:security"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:esql:qa:server:multi-node"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:esql:qa:server:single-node"); + map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:esql:qa:server:mixed-cluster"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:fleet:qa:rest"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:graph:qa:with-security"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:identity-provider:qa:idp-rest-tests"); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckForbiddenApisTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckForbiddenApisTask.java index 194d0361980ec..bb0b8dcf04437 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckForbiddenApisTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckForbiddenApisTask.java @@ -377,6 +377,7 @@ public void checkForbidden() { parameters.getTargetCompatibility().set(getTargetCompatibility()); parameters.getIgnoreFailures().set(getIgnoreFailures()); parameters.getSuccessMarker().set(getSuccessMarker()); + parameters.getSignaturesFiles().from(getSignaturesFiles()); }); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java index bcbe1740630ce..42d3a770dbbcc 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java @@ -101,7 +101,7 @@ public void apply(Project project) { addDistributionSysprop(t, DISTRIBUTION_SYSPROP, distribution::getFilepath); addDistributionSysprop(t, EXAMPLE_PLUGIN_SYSPROP, () -> examplePlugin.getSingleFile().toString()); t.exclude("**/PackageUpgradeTests.class"); - }, distribution.getArchiveDependencies(), examplePlugin.getDependencies()); + }, distribution, examplePlugin.getDependencies()); if (distribution.getPlatform() == Platform.WINDOWS) { windowsTestTasks.add(destructiveTask); @@ -235,6 +235,7 @@ private static ElasticsearchDistribution createDistro( d.setBundledJdk(bundledJdk); } d.setVersion(version); + d.setPreferArchive(true); }); // Allow us to gracefully omit building Docker distributions if Docker is not available on the system. diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/HistoricalFeaturesMetadataPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/HistoricalFeaturesMetadataPlugin.java new file mode 100644 index 0000000000000..bd9df6d3903ca --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/HistoricalFeaturesMetadataPlugin.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.test; + +import org.elasticsearch.gradle.dependencies.CompileOnlyResolvePlugin; +import org.gradle.api.Plugin; +import org.gradle.api.Project; +import org.gradle.api.artifacts.Configuration; +import org.gradle.api.artifacts.type.ArtifactTypeDefinition; +import org.gradle.api.tasks.SourceSet; +import org.gradle.api.tasks.SourceSetContainer; +import org.gradle.api.tasks.TaskProvider; + +import java.util.Map; + +/** + * Extracts historical feature metadata into a machine-readable format for use in backward compatibility testing. + */ +public class HistoricalFeaturesMetadataPlugin implements Plugin { + public static final String HISTORICAL_FEATURES_JSON = "historical-features.json"; + public static final String FEATURES_METADATA_TYPE = "features-metadata-json"; + public static final String FEATURES_METADATA_CONFIGURATION = "featuresMetadata"; + + @Override + public void apply(Project project) { + Configuration featureMetadataExtractorConfig = project.getConfigurations().create("featuresMetadataExtractor", c -> { + // Don't bother adding this dependency if the project doesn't exist which simplifies testing + if (project.findProject(":test:metadata-extractor") != null) { + c.defaultDependencies(d -> d.add(project.getDependencies().project(Map.of("path", ":test:metadata-extractor")))); + } + }); + + SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); + SourceSet mainSourceSet = sourceSets.getByName(SourceSet.MAIN_SOURCE_SET_NAME); + + TaskProvider generateTask = project.getTasks() + .register("generateHistoricalFeaturesMetadata", HistoricalFeaturesMetadataTask.class, task -> { + task.setClasspath( + featureMetadataExtractorConfig.plus(mainSourceSet.getRuntimeClasspath()) + .plus(project.getConfigurations().getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME)) + ); + task.getOutputFile().convention(project.getLayout().getBuildDirectory().file(HISTORICAL_FEATURES_JSON)); + }); + + Configuration featuresMetadataArtifactConfig = project.getConfigurations().create(FEATURES_METADATA_CONFIGURATION, c -> { + c.setCanBeResolved(false); + c.setCanBeConsumed(true); + c.attributes(a -> { a.attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, FEATURES_METADATA_TYPE); }); + }); + + project.getArtifacts().add(featuresMetadataArtifactConfig.getName(), generateTask); + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/HistoricalFeaturesMetadataTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/HistoricalFeaturesMetadataTask.java new file mode 100644 index 0000000000000..0891225d1e1ef --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/HistoricalFeaturesMetadataTask.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.test; + +import org.elasticsearch.gradle.LoggedExec; +import org.gradle.api.DefaultTask; +import org.gradle.api.file.ConfigurableFileCollection; +import org.gradle.api.file.FileCollection; +import org.gradle.api.file.RegularFileProperty; +import org.gradle.api.tasks.CacheableTask; +import org.gradle.api.tasks.Classpath; +import org.gradle.api.tasks.OutputFile; +import org.gradle.api.tasks.TaskAction; +import org.gradle.process.ExecOperations; +import org.gradle.workers.WorkAction; +import org.gradle.workers.WorkParameters; +import org.gradle.workers.WorkerExecutor; + +import javax.inject.Inject; + +@CacheableTask +public abstract class HistoricalFeaturesMetadataTask extends DefaultTask { + private FileCollection classpath; + + @OutputFile + public abstract RegularFileProperty getOutputFile(); + + @Classpath + public FileCollection getClasspath() { + return classpath; + } + + public void setClasspath(FileCollection classpath) { + this.classpath = classpath; + } + + @Inject + public abstract WorkerExecutor getWorkerExecutor(); + + @TaskAction + public void execute() { + getWorkerExecutor().noIsolation().submit(HistoricalFeaturesMetadataWorkAction.class, params -> { + params.getClasspath().setFrom(getClasspath()); + params.getOutputFile().set(getOutputFile()); + }); + } + + public interface HistoricalFeaturesWorkParameters extends WorkParameters { + ConfigurableFileCollection getClasspath(); + + RegularFileProperty getOutputFile(); + } + + public abstract static class HistoricalFeaturesMetadataWorkAction implements WorkAction { + private final ExecOperations execOperations; + + @Inject + public HistoricalFeaturesMetadataWorkAction(ExecOperations execOperations) { + this.execOperations = execOperations; + } + + @Override + public void execute() { + LoggedExec.javaexec(execOperations, spec -> { + spec.getMainClass().set("org.elasticsearch.extractor.features.HistoricalFeaturesMetadataExtractor"); + spec.classpath(getParameters().getClasspath()); + spec.args(getParameters().getOutputFile().get().getAsFile().getAbsolutePath()); + }); + } + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/LegacyRestTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/LegacyRestTestBasePlugin.java index eacc5da6220ab..be6e3eb377aa1 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/LegacyRestTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/LegacyRestTestBasePlugin.java @@ -22,12 +22,18 @@ import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Plugin; import org.gradle.api.Project; +import org.gradle.api.Task; import org.gradle.api.plugins.JavaBasePlugin; import org.gradle.api.provider.Provider; import org.gradle.api.provider.ProviderFactory; +import org.gradle.api.specs.NotSpec; +import org.gradle.api.specs.Spec; import org.gradle.api.tasks.Sync; +import org.gradle.api.tasks.TaskContainer; import org.gradle.api.tasks.bundling.Zip; +import java.util.Collections; + import javax.inject.Inject; import static org.elasticsearch.gradle.internal.RestrictedBuildApiService.BUILD_API_RESTRICTIONS_SYS_PROPERTY; @@ -47,6 +53,7 @@ public class LegacyRestTestBasePlugin implements Plugin { private static final String TESTS_CLUSTER_REMOTE_ACCESS = "tests.cluster.remote_access"; private ProviderFactory providerFactory; + private Project project; @Inject public LegacyRestTestBasePlugin(ProviderFactory providerFactory) { @@ -55,6 +62,7 @@ public LegacyRestTestBasePlugin(ProviderFactory providerFactory) { @Override public void apply(Project project) { + this.project = project; Provider serviceProvider = project.getGradle() .getSharedServices() .registerIfAbsent("restrictedBuildAPI", RestrictedBuildApiService.class, spec -> { @@ -118,9 +126,30 @@ public void apply(Project project) { t.getClusters().forEach(c -> c.plugin(bundle)); } }); + configureCacheability(t); }); } + private void configureCacheability(StandaloneRestIntegTestTask testTask) { + TaskContainer tasks = project.getTasks(); + Spec taskSpec = t -> tasks.withType(StandaloneRestIntegTestTask.class) + .stream() + .filter(task -> task != testTask) + .anyMatch(task -> Collections.disjoint(task.getClusters(), testTask.getClusters()) == false); + testTask.getOutputs() + .doNotCacheIf( + "Caching disabled for this task since it uses a cluster shared by other tasks", + /* + * Look for any other tasks which use the same cluster as this task. Since tests often have side effects for the cluster + * they execute against, this state can cause issues when trying to cache tests results of tasks that share a cluster. To + * avoid any undesired behavior we simply disable the cache if we detect that this task uses a cluster shared between + * multiple tasks. + */ + taskSpec + ); + testTask.getOutputs().upToDateWhen(new NotSpec(taskSpec)); + } + private String systemProperty(String propName) { return providerFactory.systemProperty(propName).getOrNull(); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/CopyRestTestsTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/CopyRestTestsTask.java index 9359272b29610..94345ed80eec7 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/CopyRestTestsTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/CopyRestTestsTask.java @@ -15,6 +15,7 @@ import org.gradle.api.file.FileSystemOperations; import org.gradle.api.file.FileTree; import org.gradle.api.file.ProjectLayout; +import org.gradle.api.internal.file.FileOperations; import org.gradle.api.model.ObjectFactory; import org.gradle.api.provider.ListProperty; import org.gradle.api.tasks.IgnoreEmptyDirectories; @@ -43,7 +44,7 @@ * * @see RestResourcesPlugin */ -public class CopyRestTestsTask extends DefaultTask { +public abstract class CopyRestTestsTask extends DefaultTask { private static final String REST_TEST_PREFIX = "rest-api-spec/test"; private final ListProperty includeCore; private final ListProperty includeXpack; @@ -62,6 +63,9 @@ public class CopyRestTestsTask extends DefaultTask { private final ProjectLayout projectLayout; private final FileSystemOperations fileSystemOperations; + @Inject + public abstract FileOperations getFileOperations(); + @Inject public CopyRestTestsTask( ProjectLayout projectLayout, diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java index c602a50c2adb8..566e93d8a3f53 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java @@ -21,6 +21,7 @@ import org.elasticsearch.gradle.internal.ElasticsearchTestBasePlugin; import org.elasticsearch.gradle.internal.InternalDistributionDownloadPlugin; import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.internal.test.HistoricalFeaturesMetadataPlugin; import org.elasticsearch.gradle.plugin.BasePluginBuildPlugin; import org.elasticsearch.gradle.plugin.PluginBuildPlugin; import org.elasticsearch.gradle.plugin.PluginPropertiesExtension; @@ -35,9 +36,12 @@ import org.gradle.api.Task; import org.gradle.api.artifacts.Configuration; import org.gradle.api.artifacts.Dependency; +import org.gradle.api.artifacts.DependencySet; import org.gradle.api.artifacts.ProjectDependency; import org.gradle.api.artifacts.type.ArtifactTypeDefinition; import org.gradle.api.attributes.Attribute; +import org.gradle.api.file.ConfigurableFileCollection; +import org.gradle.api.file.FileCollection; import org.gradle.api.file.FileTree; import org.gradle.api.provider.ProviderFactory; import org.gradle.api.tasks.ClasspathNormalizer; @@ -72,6 +76,9 @@ public class RestTestBasePlugin implements Plugin { private static final String PLUGINS_CONFIGURATION = "clusterPlugins"; private static final String EXTRACTED_PLUGINS_CONFIGURATION = "extractedPlugins"; private static final Attribute CONFIGURATION_ATTRIBUTE = Attribute.of("test-cluster-artifacts", String.class); + private static final String FEATURES_METADATA_CONFIGURATION = "featuresMetadataDeps"; + private static final String DEFAULT_DISTRO_FEATURES_METADATA_CONFIGURATION = "defaultDistrofeaturesMetadataDeps"; + private static final String TESTS_FEATURES_METADATA_PATH = "tests.features.metadata.path"; private final ProviderFactory providerFactory; @@ -105,6 +112,36 @@ public void apply(Project project) { extractedPluginsConfiguration.extendsFrom(pluginsConfiguration); configureArtifactTransforms(project); + // Create configuration for aggregating historical feature metadata + Configuration featureMetadataConfig = project.getConfigurations().create(FEATURES_METADATA_CONFIGURATION, c -> { + c.setCanBeConsumed(false); + c.setCanBeResolved(true); + c.attributes( + a -> a.attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, HistoricalFeaturesMetadataPlugin.FEATURES_METADATA_TYPE) + ); + c.defaultDependencies(d -> d.add(project.getDependencies().project(Map.of("path", ":server")))); + c.withDependencies(dependencies -> { + // We can't just use Configuration#extendsFrom() here as we'd inherit the wrong project configuration + copyDependencies(project, dependencies, modulesConfiguration); + copyDependencies(project, dependencies, pluginsConfiguration); + }); + }); + + Configuration defaultDistroFeatureMetadataConfig = project.getConfigurations() + .create(DEFAULT_DISTRO_FEATURES_METADATA_CONFIGURATION, c -> { + c.setCanBeConsumed(false); + c.setCanBeResolved(true); + c.attributes( + a -> a.attribute( + ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, + HistoricalFeaturesMetadataPlugin.FEATURES_METADATA_TYPE + ) + ); + c.defaultDependencies( + d -> d.add(project.getDependencies().project(Map.of("path", ":distribution", "configuration", "featuresMetadata"))) + ); + }); + // For plugin and module projects, register the current project plugin bundle as a dependency project.getPluginManager().withPlugin("elasticsearch.esplugin", plugin -> { if (GradleUtils.isModuleProject(project.getPath())) { @@ -122,6 +159,10 @@ public void apply(Project project) { task.dependsOn(integTestDistro, modulesConfiguration); registerDistributionInputs(task, integTestDistro); + // Pass feature metadata on to tests + task.getInputs().files(featureMetadataConfig).withPathSensitivity(PathSensitivity.NONE); + nonInputSystemProperties.systemProperty(TESTS_FEATURES_METADATA_PATH, () -> featureMetadataConfig.getAsPath()); + // Enable parallel execution for these tests since each test gets its own cluster task.setMaxParallelForks(task.getProject().getGradle().getStartParameter().getMaxWorkerCount() / 2); nonInputSystemProperties.systemProperty(TESTS_MAX_PARALLEL_FORKS_SYSPROP, () -> String.valueOf(task.getMaxParallelForks())); @@ -134,16 +175,20 @@ public void apply(Project project) { task.systemProperty("tests.system_call_filter", "false"); // Register plugins and modules as task inputs and pass paths as system properties to tests - nonInputSystemProperties.systemProperty(TESTS_CLUSTER_MODULES_PATH_SYSPROP, modulesConfiguration::getAsPath); - registerConfigurationInputs(task, modulesConfiguration); - nonInputSystemProperties.systemProperty(TESTS_CLUSTER_PLUGINS_PATH_SYSPROP, pluginsConfiguration::getAsPath); - registerConfigurationInputs(task, extractedPluginsConfiguration); + var modulePath = project.getObjects().fileCollection().from(modulesConfiguration); + nonInputSystemProperties.systemProperty(TESTS_CLUSTER_MODULES_PATH_SYSPROP, modulePath::getAsPath); + registerConfigurationInputs(task, modulesConfiguration.getName(), modulePath); + var pluginPath = project.getObjects().fileCollection().from(pluginsConfiguration); + nonInputSystemProperties.systemProperty(TESTS_CLUSTER_PLUGINS_PATH_SYSPROP, pluginPath::getAsPath); + registerConfigurationInputs( + task, + extractedPluginsConfiguration.getName(), + project.getObjects().fileCollection().from(extractedPluginsConfiguration) + ); // Wire up integ-test distribution by default for all test tasks - nonInputSystemProperties.systemProperty( - INTEG_TEST_DISTRIBUTION_SYSPROP, - () -> integTestDistro.getExtracted().getSingleFile().getPath() - ); + FileCollection extracted = integTestDistro.getExtracted(); + nonInputSystemProperties.systemProperty(INTEG_TEST_DISTRIBUTION_SYSPROP, () -> extracted.getSingleFile().getPath()); nonInputSystemProperties.systemProperty(TESTS_RUNTIME_JAVA_SYSPROP, BuildParams.getRuntimeJavaHome()); // Add `usesDefaultDistribution()` extension method to test tasks to indicate they require the default distro @@ -157,6 +202,11 @@ public Void call(Object... args) { DEFAULT_DISTRIBUTION_SYSPROP, providerFactory.provider(() -> defaultDistro.getExtracted().getSingleFile().getPath()) ); + + // If we are using the default distribution we need to register all module feature metadata + task.getInputs().files(defaultDistroFeatureMetadataConfig).withPathSensitivity(PathSensitivity.NONE); + nonInputSystemProperties.systemProperty(TESTS_FEATURES_METADATA_PATH, defaultDistroFeatureMetadataConfig::getAsPath); + return null; } }); @@ -192,6 +242,14 @@ public Void call(Object... args) { }); } + private void copyDependencies(Project project, DependencySet dependencies, Configuration configuration) { + configuration.getDependencies() + .stream() + .filter(d -> d instanceof ProjectDependency) + .map(d -> project.getDependencies().project(Map.of("path", ((ProjectDependency) d).getDependencyProject().getPath()))) + .forEach(dependencies::add); + } + private ElasticsearchDistribution createDistribution(Project project, String name, String version) { return createDistribution(project, name, version, null); } @@ -216,15 +274,15 @@ private FileTree getDistributionFiles(ElasticsearchDistribution distribution, Ac return distribution.getExtracted().getAsFileTree().matching(patternFilter); } - private void registerConfigurationInputs(Task task, Configuration configuration) { + private void registerConfigurationInputs(Task task, String configurationName, ConfigurableFileCollection configuration) { task.getInputs() .files(providerFactory.provider(() -> configuration.getAsFileTree().filter(f -> f.getName().endsWith(".jar") == false))) - .withPropertyName(configuration.getName() + "-files") + .withPropertyName(configurationName + "-files") .withPathSensitivity(PathSensitivity.RELATIVE); task.getInputs() .files(providerFactory.provider(() -> configuration.getAsFileTree().filter(f -> f.getName().endsWith(".jar")))) - .withPropertyName(configuration.getName() + "-classpath") + .withPropertyName(configurationName + "-classpath") .withNormalizer(ClasspathNormalizer.class); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/RestCompatTestTransformTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/RestCompatTestTransformTask.java index 76004e3e5f6db..9b1e8a67deec8 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/RestCompatTestTransformTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/RestCompatTestTransformTask.java @@ -457,15 +457,17 @@ public void transform() throws IOException { Collections.singletonList(new Skip(skippedFilesWithReason.get(file))) ); } else { + List> transformations = new ArrayList<>(getTransformations().get()); + if (skippedFilesWithTestAndReason.containsKey(file)) { // skip the named tests for this file skippedFilesWithTestAndReason.get(file).forEach(fullTestNameAndReasonPair -> { String prefix = file.getName().replace(".yml", "/"); String singleTestName = fullTestNameAndReasonPair.getLeft().replaceAll(".*" + prefix, ""); - getTransformations().add(new Skip(singleTestName, fullTestNameAndReasonPair.getRight())); + transformations.add(new Skip(singleTestName, fullTestNameAndReasonPair.getRight())); }); } - transformRestTests = transformer.transformRestTests(new LinkedList<>(tests), getTransformations().get()); + transformRestTests = transformer.transformRestTests(new LinkedList<>(tests), transformations); } // convert to url to ensure forward slashes diff --git a/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt b/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt index 34f39bbc4ca54..48c888acd35e2 100644 --- a/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt +++ b/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt @@ -158,6 +158,8 @@ org.elasticsearch.cluster.ClusterState#compatibilityVersions() @defaultMessage ClusterFeatures#nodeFeatures is for internal use only. Use FeatureService#clusterHasFeature to determine if a feature is present on the cluster. org.elasticsearch.cluster.ClusterFeatures#nodeFeatures() +@defaultMessage ClusterFeatures#allNodeFeatures is for internal use only. Use FeatureService#clusterHasFeature to determine if a feature is present on the cluster. +org.elasticsearch.cluster.ClusterFeatures#allNodeFeatures() @defaultMessage ClusterFeatures#clusterHasFeature is for internal use only. Use FeatureService#clusterHasFeature to determine if a feature is present on the cluster. org.elasticsearch.cluster.ClusterFeatures#clusterHasFeature(org.elasticsearch.features.NodeFeature) diff --git a/build-tools/src/integTest/groovy/org/elasticsearch/gradle/TestClustersPluginFuncTest.groovy b/build-tools/src/integTest/groovy/org/elasticsearch/gradle/TestClustersPluginFuncTest.groovy index 6b662b8165034..719fae2b463c0 100644 --- a/build-tools/src/integTest/groovy/org/elasticsearch/gradle/TestClustersPluginFuncTest.groovy +++ b/build-tools/src/integTest/groovy/org/elasticsearch/gradle/TestClustersPluginFuncTest.groovy @@ -34,7 +34,7 @@ class TestClustersPluginFuncTest extends AbstractGradleFuncTest { id 'elasticsearch.testclusters' } - class SomeClusterAwareTask extends DefaultTask implements TestClustersAware { + abstract class SomeClusterAwareTask extends DefaultTask implements TestClustersAware { private Collection clusters = new HashSet<>(); diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java b/build-tools/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java index d08dc469e5ba5..e12523870b15b 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java @@ -11,6 +11,7 @@ import org.elasticsearch.gradle.distribution.ElasticsearchDistributionTypes; import org.elasticsearch.gradle.transform.SymbolicLinkPreservingUntarTransform; import org.elasticsearch.gradle.transform.UnzipTransform; +import org.gradle.api.Action; import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Plugin; import org.gradle.api.Project; @@ -22,7 +23,8 @@ import org.gradle.api.provider.Property; import org.gradle.api.provider.Provider; -import java.util.Comparator; +import java.util.ArrayList; +import java.util.List; import javax.inject.Inject; @@ -42,9 +44,10 @@ public class DistributionDownloadPlugin implements Plugin { private static final String DOWNLOAD_REPO_NAME = "elasticsearch-downloads"; private static final String SNAPSHOT_REPO_NAME = "elasticsearch-snapshots"; public static final String DISTRO_EXTRACTED_CONFIG_PREFIX = "es_distro_extracted_"; + public static final String DISTRO_CONFIG_PREFIX = "es_distro_file_"; private NamedDomainObjectContainer distributionsContainer; - private NamedDomainObjectContainer distributionsResolutionStrategiesContainer; + private List distributionsResolutionStrategies; private Property dockerAvailability; @@ -77,7 +80,7 @@ public void apply(Project project) { private void setupDistributionContainer(Project project, Property dockerAvailable) { distributionsContainer = project.container(ElasticsearchDistribution.class, name -> { - Configuration fileConfiguration = project.getConfigurations().create("es_distro_file_" + name); + Configuration fileConfiguration = project.getConfigurations().create(DISTRO_CONFIG_PREFIX + name); Configuration extractedConfiguration = project.getConfigurations().create(DISTRO_EXTRACTED_CONFIG_PREFIX + name); extractedConfiguration.getAttributes() .attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ArtifactTypeDefinition.DIRECTORY_TYPE); @@ -85,21 +88,17 @@ private void setupDistributionContainer(Project project, Property docke name, project.getObjects(), dockerAvailability, - fileConfiguration, - extractedConfiguration, - (dist) -> finalizeDistributionDependencies(project, dist) + project.getObjects().fileCollection().from(fileConfiguration), + project.getObjects().fileCollection().from(extractedConfiguration), + new FinalizeDistributionAction(distributionsResolutionStrategies, project) ); }); project.getExtensions().add(CONTAINER_NAME, distributionsContainer); } private void setupResolutionsContainer(Project project) { - distributionsResolutionStrategiesContainer = project.container(DistributionResolution.class); - // We want this ordered in the same resolution strategies are added - distributionsResolutionStrategiesContainer.whenObjectAdded( - resolveDependencyNotation -> resolveDependencyNotation.setPriority(distributionsResolutionStrategiesContainer.size()) - ); - project.getExtensions().add(RESOLUTION_CONTAINER_NAME, distributionsResolutionStrategiesContainer); + distributionsResolutionStrategies = new ArrayList<>(); + project.getExtensions().add(RESOLUTION_CONTAINER_NAME, distributionsResolutionStrategies); } @SuppressWarnings("unchecked") @@ -108,30 +107,8 @@ public static NamedDomainObjectContainer getContainer } @SuppressWarnings("unchecked") - public static NamedDomainObjectContainer getRegistrationsContainer(Project project) { - return (NamedDomainObjectContainer) project.getExtensions().getByName(RESOLUTION_CONTAINER_NAME); - } - - private void finalizeDistributionDependencies(Project project, ElasticsearchDistribution distribution) { - DependencyHandler dependencies = project.getDependencies(); - // for the distribution as a file, just depend on the artifact directly - DistributionDependency distributionDependency = resolveDependencyNotation(project, distribution); - dependencies.add(distribution.configuration.getName(), distributionDependency.getDefaultNotation()); - // no extraction needed for rpm, deb or docker - if (distribution.getType().shouldExtract()) { - // The extracted configuration depends on the artifact directly but has - // an artifact transform registered to resolve it as an unpacked folder. - dependencies.add(distribution.getExtracted().getName(), distributionDependency.getExtractedNotation()); - } - } - - private DistributionDependency resolveDependencyNotation(Project p, ElasticsearchDistribution distribution) { - return distributionsResolutionStrategiesContainer.stream() - .sorted(Comparator.comparingInt(DistributionResolution::getPriority)) - .map(r -> r.getResolver().resolve(p, distribution)) - .filter(d -> d != null) - .findFirst() - .orElseGet(() -> DistributionDependency.of(dependencyNotation(distribution))); + public static List getRegistrationsContainer(Project project) { + return (List) project.getExtensions().getByName(RESOLUTION_CONTAINER_NAME); } private static void addIvyRepo(Project project, String name, String url, String group) { @@ -155,22 +132,53 @@ private static void setupDownloadServiceRepo(Project project) { addIvyRepo(project, SNAPSHOT_REPO_NAME, "https://snapshots-no-kpi.elastic.co", FAKE_SNAPSHOT_IVY_GROUP); } - /** - * Returns a dependency object representing the given distribution. - *

- * The returned object is suitable to be passed to {@link DependencyHandler}. - * The concrete type of the object will be a set of maven coordinates as a {@link String}. - * Maven coordinates point to either the integ-test-zip coordinates on maven central, or a set of artificial - * coordinates that resolve to the Elastic download service through an ivy repository. - */ - private String dependencyNotation(ElasticsearchDistribution distribution) { - if (distribution.getType() == ElasticsearchDistributionTypes.INTEG_TEST_ZIP) { - return "org.elasticsearch.distribution.integ-test-zip:elasticsearch:" + distribution.getVersion() + "@zip"; + private record FinalizeDistributionAction(List resolutionList, Project project) + implements + Action { + @Override + + public void execute(ElasticsearchDistribution distro) { + finalizeDistributionDependencies(project, distro); + } + + private void finalizeDistributionDependencies(Project project, ElasticsearchDistribution distribution) { + // for the distribution as a file, just depend on the artifact directly + DistributionDependency distributionDependency = resolveDependencyNotation(project, distribution); + project.getDependencies().add(DISTRO_CONFIG_PREFIX + distribution.getName(), distributionDependency.getDefaultNotation()); + // no extraction needed for rpm, deb or docker + if (distribution.getType().shouldExtract()) { + // The extracted configuration depends on the artifact directly but has + // an artifact transform registered to resolve it as an unpacked folder. + project.getDependencies() + .add(DISTRO_EXTRACTED_CONFIG_PREFIX + distribution.getName(), distributionDependency.getExtractedNotation()); + } + } + + private DistributionDependency resolveDependencyNotation(Project project, ElasticsearchDistribution distro) { + return resolutionList.stream() + .map(r -> r.getResolver().resolve(project, distro)) + .filter(d -> d != null) + .findFirst() + .orElseGet(() -> DistributionDependency.of(dependencyNotation(distro))); + } + + /** + * Returns a dependency object representing the given distribution. + *

+ * The returned object is suitable to be passed to {@link DependencyHandler}. + * The concrete type of the object will be a set of maven coordinates as a {@link String}. + * Maven coordinates point to either the integ-test-zip coordinates on maven central, or a set of artificial + * coordinates that resolve to the Elastic download service through an ivy repository. + */ + private String dependencyNotation(ElasticsearchDistribution distribution) { + if (distribution.getType() == ElasticsearchDistributionTypes.INTEG_TEST_ZIP) { + return "org.elasticsearch.distribution.integ-test-zip:elasticsearch:" + distribution.getVersion() + "@zip"; + } + Version distroVersion = Version.fromString(distribution.getVersion()); + String extension = distribution.getType().getExtension(distribution.getPlatform()); + String classifier = distribution.getType().getClassifier(distribution.getPlatform(), distroVersion); + String group = distribution.getVersion().endsWith("-SNAPSHOT") ? FAKE_SNAPSHOT_IVY_GROUP : FAKE_IVY_GROUP; + return group + ":elasticsearch" + ":" + distribution.getVersion() + classifier + "@" + extension; } - Version distroVersion = Version.fromString(distribution.getVersion()); - String extension = distribution.getType().getExtension(distribution.getPlatform()); - String classifier = distribution.getType().getClassifier(distribution.getPlatform(), distroVersion); - String group = distribution.getVersion().endsWith("-SNAPSHOT") ? FAKE_SNAPSHOT_IVY_GROUP : FAKE_IVY_GROUP; - return group + ":elasticsearch" + ":" + distribution.getVersion() + classifier + "@" + extension; } } diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/DistributionResolution.java b/build-tools/src/main/java/org/elasticsearch/gradle/DistributionResolution.java index 3b82c9f6975a0..0d8177dea5cb6 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/DistributionResolution.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/DistributionResolution.java @@ -12,9 +12,14 @@ public class DistributionResolution { private Resolver resolver; - private String name; + private final String name; private int priority; + public DistributionResolution(String name, Resolver resolver) { + this(name); + this.resolver = resolver; + } + public DistributionResolution(String name) { this.name = name; } diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/ElasticsearchDistribution.java b/build-tools/src/main/java/org/elasticsearch/gradle/ElasticsearchDistribution.java index 5350b6698cb30..eca0fb319cea4 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/ElasticsearchDistribution.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/ElasticsearchDistribution.java @@ -11,7 +11,8 @@ import org.elasticsearch.gradle.distribution.ElasticsearchDistributionTypes; import org.gradle.api.Action; import org.gradle.api.Buildable; -import org.gradle.api.artifacts.Configuration; +import org.gradle.api.file.ConfigurableFileCollection; +import org.gradle.api.file.FileCollection; import org.gradle.api.model.ObjectFactory; import org.gradle.api.provider.Property; import org.gradle.api.tasks.TaskDependency; @@ -44,7 +45,7 @@ public String toString() { private final String name; private final Property dockerAvailability; // pkg private so plugin can configure - final Configuration configuration; + final FileCollection configuration; private final Property architecture; private final Property version; @@ -52,7 +53,8 @@ public String toString() { private final Property platform; private final Property bundledJdk; private final Property failIfUnavailable; - private final Configuration extracted; + private final Property preferArchive; + private final ConfigurableFileCollection extracted; private Action distributionFinalizer; private boolean frozen = false; @@ -60,8 +62,8 @@ public String toString() { String name, ObjectFactory objectFactory, Property dockerAvailability, - Configuration fileConfiguration, - Configuration extractedConfiguration, + ConfigurableFileCollection fileConfiguration, + ConfigurableFileCollection extractedConfiguration, Action distributionFinalizer ) { this.name = name; @@ -74,6 +76,7 @@ public String toString() { this.platform = objectFactory.property(Platform.class); this.bundledJdk = objectFactory.property(Boolean.class); this.failIfUnavailable = objectFactory.property(Boolean.class).convention(true); + this.preferArchive = objectFactory.property(Boolean.class).convention(false); this.extracted = extractedConfiguration; this.distributionFinalizer = distributionFinalizer; } @@ -140,6 +143,14 @@ public void setFailIfUnavailable(boolean failIfUnavailable) { this.failIfUnavailable.set(failIfUnavailable); } + public boolean getPreferArchive() { + return preferArchive.get(); + } + + public void setPreferArchive(boolean preferArchive) { + this.preferArchive.set(preferArchive); + } + public void setArchitecture(Architecture architecture) { this.architecture.set(architecture); } @@ -172,7 +183,7 @@ public String getFilepath() { return configuration.getSingleFile().toString(); } - public Configuration getExtracted() { + public ConfigurableFileCollection getExtracted() { if (getType().shouldExtract() == false) { throw new UnsupportedOperationException( "distribution type [" + getType().getName() + "] for " + "elasticsearch distribution [" + name + "] cannot be extracted" @@ -187,7 +198,9 @@ public TaskDependency getBuildDependencies() { return task -> Collections.emptySet(); } else { maybeFreeze(); - return getType().shouldExtract() ? extracted.getBuildDependencies() : configuration.getBuildDependencies(); + return getType().shouldExtract() && (preferArchive.get() == false) + ? extracted.getBuildDependencies() + : configuration.getBuildDependencies(); } } @@ -252,13 +265,4 @@ void finalizeValues() { type.finalizeValue(); bundledJdk.finalizeValue(); } - - public TaskDependency getArchiveDependencies() { - if (skippingDockerDistributionBuild()) { - return task -> Collections.emptySet(); - } else { - maybeFreeze(); - return configuration.getBuildDependencies(); - } - } } diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/DefaultTestClustersTask.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/DefaultTestClustersTask.java index 5c98ab3bf4364..e80d2ed64cabd 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/DefaultTestClustersTask.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/DefaultTestClustersTask.java @@ -12,7 +12,7 @@ import java.util.Collection; import java.util.HashSet; -public class DefaultTestClustersTask extends DefaultTask implements TestClustersAware { +public abstract class DefaultTestClustersTask extends DefaultTask implements TestClustersAware { private Collection clusters = new HashSet<>(); diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java index 953c0447ec71b..b7d4f91ac6240 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java @@ -194,7 +194,9 @@ public void beforeStart() { } catch (IOException e) { logger.warn("Unable to start APM server", e); } - + } else { + // metrics are enabled by default, if the --with-apm-server was not used we should disable it + node.setting("telemetry.metrics.enabled", "false"); } } diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java index 2bd8219dc48e5..ba2a5a20c4fbb 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java @@ -8,11 +8,9 @@ package org.elasticsearch.gradle.testclusters; import org.elasticsearch.gradle.FileSystemOperationsAware; -import org.gradle.api.Task; +import org.gradle.api.provider.ProviderFactory; import org.gradle.api.services.internal.BuildServiceProvider; import org.gradle.api.services.internal.BuildServiceRegistryInternal; -import org.gradle.api.specs.NotSpec; -import org.gradle.api.specs.Spec; import org.gradle.api.tasks.CacheableTask; import org.gradle.api.tasks.Internal; import org.gradle.api.tasks.Nested; @@ -28,6 +26,8 @@ import java.util.HashSet; import java.util.List; +import javax.inject.Inject; + import static org.elasticsearch.gradle.testclusters.TestClustersPlugin.THROTTLE_SERVICE_NAME; /** @@ -42,23 +42,6 @@ public abstract class StandaloneRestIntegTestTask extends Test implements TestCl private boolean debugServer = false; public StandaloneRestIntegTestTask() { - Spec taskSpec = t -> getProject().getTasks() - .withType(StandaloneRestIntegTestTask.class) - .stream() - .filter(task -> task != this) - .anyMatch(task -> Collections.disjoint(task.getClusters(), getClusters()) == false); - this.getOutputs() - .doNotCacheIf( - "Caching disabled for this task since it uses a cluster shared by other tasks", - /* - * Look for any other tasks which use the same cluster as this task. Since tests often have side effects for the cluster - * they execute against, this state can cause issues when trying to cache tests results of tasks that share a cluster. To - * avoid any undesired behavior we simply disable the cache if we detect that this task uses a cluster shared between - * multiple tasks. - */ - taskSpec - ); - this.getOutputs().upToDateWhen(new NotSpec(taskSpec)); this.getOutputs() .doNotCacheIf( "Caching disabled for this task since it is configured to preserve data directory", @@ -79,6 +62,10 @@ public Collection getClusters() { return clusters; } + @Override + @Inject + public abstract ProviderFactory getProviderFactory(); + @Override @Internal public List getSharedResources() { diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java index 9537162b5d109..09066d4b26e88 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java @@ -9,17 +9,24 @@ import org.gradle.api.Task; import org.gradle.api.artifacts.Configuration; +import org.gradle.api.provider.Property; import org.gradle.api.provider.Provider; +import org.gradle.api.services.ServiceReference; import org.gradle.api.tasks.Nested; import java.util.Collection; import java.util.concurrent.Callable; +import static org.elasticsearch.gradle.testclusters.TestClustersPlugin.REGISTRY_SERVICE_NAME; + public interface TestClustersAware extends Task { @Nested Collection getClusters(); + @ServiceReference(REGISTRY_SERVICE_NAME) + Property getRegistery(); + default void useCluster(ElasticsearchCluster cluster) { if (cluster.getPath().equals(getProject().getPath()) == false) { throw new TestClustersException("Task " + getPath() + " can't use test cluster from" + " another project " + cluster); diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java index 72a462c3cd8c9..d2ccda1c1f8c7 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java @@ -37,6 +37,7 @@ import java.io.File; import java.util.HashMap; import java.util.Map; +import java.util.Set; import java.util.function.Function; import javax.inject.Inject; @@ -49,7 +50,7 @@ public class TestClustersPlugin implements Plugin { public static final String THROTTLE_SERVICE_NAME = "testClustersThrottle"; private static final String LIST_TASK_NAME = "listTestClusters"; - private static final String REGISTRY_SERVICE_NAME = "testClustersRegistry"; + public static final String REGISTRY_SERVICE_NAME = "testClustersRegistry"; private static final Logger logger = Logging.getLogger(TestClustersPlugin.class); private final ProviderFactory providerFactory; private Provider runtimeJavaProvider; @@ -222,13 +223,21 @@ private void configureStartClustersHook( testClusterTasksService.get().register(awareTask.getPath(), awareTask); awareTask.doFirst(task -> { awareTask.beforeStart(); - awareTask.getClusters().forEach(registry::maybeStartCluster); + awareTask.getClusters().forEach(awareTask.getRegistery().get()::maybeStartCluster); }); }); }); } } + public static void maybeStartCluster(ElasticsearchCluster cluster, Set runningClusters) { + if (runningClusters.contains(cluster)) { + return; + } + runningClusters.add(cluster); + cluster.start(); + } + static public abstract class TaskEventsService implements BuildService, OperationCompletionListener { Map tasksMap = new HashMap<>(); diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/util/GradleUtils.java b/build-tools/src/main/java/org/elasticsearch/gradle/util/GradleUtils.java index ce69c4ec476f9..00e5834b0f826 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/util/GradleUtils.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/util/GradleUtils.java @@ -13,7 +13,6 @@ import org.gradle.api.Task; import org.gradle.api.UnknownTaskException; import org.gradle.api.artifacts.Configuration; -import org.gradle.api.artifacts.Dependency; import org.gradle.api.artifacts.ModuleDependency; import org.gradle.api.artifacts.ProjectDependency; import org.gradle.api.plugins.JavaBasePlugin; @@ -34,7 +33,6 @@ import java.util.ArrayList; import java.util.Arrays; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.function.Function; @@ -183,16 +181,6 @@ public static void extendSourceSet(Project project, String parentSourceSetName, } } - public static Dependency projectDependency(Project project, String projectPath, String projectConfig) { - if (project.findProject(projectPath) == null) { - throw new GradleException("no project [" + projectPath + "], project names: " + project.getRootProject().getAllprojects()); - } - Map depConfig = new HashMap<>(); - depConfig.put("path", projectPath); - depConfig.put("configuration", projectConfig); - return project.getDependencies().project(depConfig); - } - /** * To calculate the project path from a task path without relying on Task#getProject() which is discouraged during * task execution time. diff --git a/build.gradle b/build.gradle index d05c2bf53f660..acd8d6788318f 100644 --- a/build.gradle +++ b/build.gradle @@ -161,8 +161,10 @@ tasks.register("verifyVersions") { String versionMapping = backportConfig.get("branchLabelMapping").fields().find { it.value.textValue() == 'main' }.key String expectedMapping = "^v${versions.elasticsearch.replaceAll('-SNAPSHOT', '')}\$" if (versionMapping != expectedMapping) { - throw new GradleException("Backport label mapping for branch 'main' is '${versionMapping}' but should be " + - "'${expectedMapping}'. Update .backportrc.json.") + throw new GradleException( + "Backport label mapping for branch 'main' is '${versionMapping}' but should be " + + "'${expectedMapping}'. Update .backportrc.json." + ) } } } @@ -211,9 +213,9 @@ allprojects { project.ext { // for ide hacks... isEclipse = providers.systemProperty("eclipse.launcher").isPresent() || // Detects gradle launched from Eclipse's IDE - providers.systemProperty("eclipse.application").isPresent() || // Detects gradle launched from the Eclipse compiler server - gradle.startParameter.taskNames.contains('eclipse') || // Detects gradle launched from the command line to do eclipse stuff - gradle.startParameter.taskNames.contains('cleanEclipse') + providers.systemProperty("eclipse.application").isPresent() || // Detects gradle launched from the Eclipse compiler server + gradle.startParameter.taskNames.contains('eclipse') || // Detects gradle launched from the command line to do eclipse stuff + gradle.startParameter.taskNames.contains('cleanEclipse') } ext.bwc_tests_enabled = bwc_tests_enabled @@ -229,10 +231,10 @@ allprojects { eclipse.classpath.file.whenMerged { classpath -> if (false == forbiddenApisTest.bundledSignatures.contains('jdk-non-portable')) { classpath.entries - .findAll { it.kind == "con" && it.toString().contains("org.eclipse.jdt.launching.JRE_CONTAINER") } - .each { - it.accessRules.add(new AccessRule("accessible", "com/sun/net/httpserver/*")) - } + .findAll { it.kind == "con" && it.toString().contains("org.eclipse.jdt.launching.JRE_CONTAINER") } + .each { + it.accessRules.add(new AccessRule("accessible", "com/sun/net/httpserver/*")) + } } } } @@ -248,6 +250,8 @@ allprojects { plugins.withId('lifecycle-base') { if (project.path.startsWith(":x-pack:")) { if (project.path.contains("security") || project.path.contains(":ml")) { + tasks.register('checkPart4') { dependsOn 'check' } + } else if (project.path == ":x-pack:plugin" || project.path.contains("ql") || project.path.contains("smoke-test")) { tasks.register('checkPart3') { dependsOn 'check' } } else { tasks.register('checkPart2') { dependsOn 'check' } @@ -256,7 +260,7 @@ allprojects { tasks.register('checkPart1') { dependsOn 'check' } } - tasks.register('functionalTests') { dependsOn 'check'} + tasks.register('functionalTests') { dependsOn 'check' } } /* @@ -281,7 +285,7 @@ allprojects { // :test:framework:test cannot run before and after :server:test return } - tasks.matching { it.name.equals('integTest')}.configureEach {integTestTask -> + tasks.matching { it.name.equals('integTest') }.configureEach { integTestTask -> integTestTask.mustRunAfter tasks.matching { it.name.equals("test") } } @@ -290,7 +294,7 @@ allprojects { Project upstreamProject = dep.dependencyProject if (project.path != upstreamProject?.path) { for (String taskName : ['test', 'integTest']) { - project.tasks.matching { it.name == taskName }.configureEach {task -> + project.tasks.matching { it.name == taskName }.configureEach { task -> task.shouldRunAfter(upstreamProject.tasks.matching { upStreamTask -> upStreamTask.name == taskName }) } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index fca1e5d29efaf..fdbb5d0c86d6f 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -18,7 +18,6 @@ import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; @@ -241,7 +240,7 @@ static Request search(SearchRequest searchRequest, String searchEndpoint) throws return request; } - static void addSearchRequestParams(Params params, SearchRequest searchRequest) { + private static void addSearchRequestParams(Params params, SearchRequest searchRequest) { params.putParam(RestSearchAction.TYPED_KEYS_PARAM, "true"); params.withRouting(searchRequest.routing()); params.withPreference(searchRequest.preference()); @@ -268,53 +267,28 @@ static void addSearchRequestParams(Params params, SearchRequest searchRequest) { } } - static Request searchScroll(SearchScrollRequest searchScrollRequest) throws IOException { - Request request = new Request(HttpPost.METHOD_NAME, "/_search/scroll"); - request.setEntity(createEntity(searchScrollRequest, REQUEST_BODY_CONTENT_TYPE)); - return request; - } - - static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException { + private static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException { return createEntity(toXContent, xContentType, ToXContent.EMPTY_PARAMS); } - static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType, ToXContent.Params toXContentParams) + private static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType, ToXContent.Params toXContentParams) throws IOException { BytesRef source = XContentHelper.toXContent(toXContent, xContentType, toXContentParams, false).toBytesRef(); return new NByteArrayEntity(source.bytes, source.offset, source.length, createContentType(xContentType)); } - @Deprecated - static String endpoint(String index, String type, String id) { + private static String endpoint(String index, String type, String id) { return new EndpointBuilder().addPathPart(index, type, id).build(); } - static String endpoint(String index, String id) { + private static String endpoint(String index, String id) { return new EndpointBuilder().addPathPart(index, "_doc", id).build(); } - @Deprecated - static String endpoint(String index, String type, String id, String endpoint) { - return new EndpointBuilder().addPathPart(index, type, id).addPathPartAsIs(endpoint).build(); - } - - static String endpoint(String[] indices, String endpoint) { + private static String endpoint(String[] indices, String endpoint) { return new EndpointBuilder().addCommaSeparatedPathParts(indices).addPathPartAsIs(endpoint).build(); } - @Deprecated - static String endpoint(String[] indices, String[] types, String endpoint) { - return new EndpointBuilder().addCommaSeparatedPathParts(indices) - .addCommaSeparatedPathParts(types) - .addPathPartAsIs(endpoint) - .build(); - } - - @Deprecated - static String endpoint(String[] indices, String endpoint, String type) { - return new EndpointBuilder().addCommaSeparatedPathParts(indices).addPathPartAsIs(endpoint).addPathPart(type).build(); - } - /** * Returns a {@link ContentType} from a given {@link XContentType}. * @@ -322,7 +296,7 @@ static String endpoint(String[] indices, String endpoint, String type) { * @return the {@link ContentType} */ @SuppressForbidden(reason = "Only allowed place to convert a XContentType to a ContentType") - public static ContentType createContentType(final XContentType xContentType) { + private static ContentType createContentType(final XContentType xContentType) { return ContentType.create(xContentType.mediaTypeWithoutParameters(), (Charset) null); } @@ -330,7 +304,7 @@ public static ContentType createContentType(final XContentType xContentType) { * Utility class to help with common parameter names and patterns. Wraps * a {@link Request} and adds the parameters to it directly. */ - static class Params { + private static class Params { private final Map parameters = new HashMap<>(); Params() {} @@ -478,7 +452,7 @@ Params withIgnoreUnavailable(boolean ignoreUnavailable) { * * @return the {@link IndexRequest}'s content type */ - static XContentType enforceSameContentType(IndexRequest indexRequest, @Nullable XContentType xContentType) { + private static XContentType enforceSameContentType(IndexRequest indexRequest, @Nullable XContentType xContentType) { XContentType requestContentType = indexRequest.getContentType(); if (requestContentType.canonical() != XContentType.JSON && requestContentType.canonical() != XContentType.SMILE) { throw new IllegalArgumentException( @@ -505,7 +479,7 @@ static XContentType enforceSameContentType(IndexRequest indexRequest, @Nullable /** * Utility class to build request's endpoint given its parts as strings */ - static class EndpointBuilder { + private static class EndpointBuilder { private final StringJoiner joiner = new StringJoiner("/", "/", ""); @@ -532,7 +506,7 @@ EndpointBuilder addPathPartAsIs(String... parts) { return this; } - String build() { + private String build() { return joiner.toString(); } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index b0998957910a2..5d779ea17f534 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -23,7 +23,6 @@ import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.aggregations.bucket.adjacency.AdjacencyMatrixAggregationBuilder; import org.elasticsearch.aggregations.bucket.adjacency.ParsedAdjacencyMatrix; @@ -159,7 +158,6 @@ import java.io.Closeable; import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -180,29 +178,6 @@ * High level REST client that wraps an instance of the low level {@link RestClient} and allows to build requests and read responses. The * {@link RestClient} instance is internally built based on the provided {@link RestClientBuilder} and it gets closed automatically when * closing the {@link RestHighLevelClient} instance that wraps it. - *

- * - * In case an already existing instance of a low-level REST client needs to be provided, this class can be subclassed and the - * {@link #RestHighLevelClient(RestClient, CheckedConsumer, List)} constructor can be used. - *

- * - * This class can also be sub-classed to expose additional client methods that make use of endpoints added to Elasticsearch through plugins, - * or to add support for custom response sections, again added to Elasticsearch through plugins. - *

- * - * The majority of the methods in this class come in two flavors, a blocking and an asynchronous version (e.g. - * {@link #search(SearchRequest, RequestOptions)} and {@link #searchAsync(SearchRequest, RequestOptions, ActionListener)}, where the later - * takes an implementation of an {@link ActionListener} as an argument that needs to implement methods that handle successful responses and - * failure scenarios. Most of the blocking calls can throw an {@link IOException} or an unchecked {@link ElasticsearchException} in the - * following cases: - * - *

    - *
  • an {@link IOException} is usually thrown in case of failing to parse the REST response in the high-level REST client, the request - * times out or similar cases where there is no response coming back from the Elasticsearch server
  • - *
  • an {@link ElasticsearchException} is usually thrown in case where the server returns a 4xx or 5xx error code. The high-level client - * then tries to parse the response body error details into a generic ElasticsearchException and suppresses the original - * {@link ResponseException}
  • - *
* * @deprecated The High Level Rest Client is deprecated in favor of the * @@ -216,7 +191,7 @@ public class RestHighLevelClient implements Closeable { /** * Environment variable determining whether to send the 7.x compatibility header */ - public static final String API_VERSIONING_ENV_VARIABLE = "ELASTIC_CLIENT_APIVERSIONING"; + private static final String API_VERSIONING_ENV_VARIABLE = "ELASTIC_CLIENT_APIVERSIONING"; // To be called using performClientRequest and performClientRequestAsync to ensure version compatibility check private final RestClient client; @@ -227,14 +202,6 @@ public class RestHighLevelClient implements Closeable { /** Do not access directly but through getVersionValidationFuture() */ private volatile ListenableFuture> versionValidationFuture; - /** - * Creates a {@link RestHighLevelClient} given the low level {@link RestClientBuilder} that allows to build the - * {@link RestClient} to be used to perform requests. - */ - public RestHighLevelClient(RestClientBuilder restClientBuilder) { - this(restClientBuilder.build(), RestClient::close, Collections.emptyList()); - } - /** * Creates a {@link RestHighLevelClient} given the low level {@link RestClient} that it should use to perform requests and * a list of entries that allow to parse custom response sections added to Elasticsearch through plugins. @@ -331,23 +298,6 @@ public final IndexResponse index(IndexRequest indexRequest, RequestOptions optio return performRequestAndParseEntity(indexRequest, RequestConverters::index, options, IndexResponse::fromXContent, emptySet()); } - /** - * Executes a search request using the Search API. - * See Search API on elastic.co - * @param searchRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @return the response - */ - public final SearchResponse search(SearchRequest searchRequest, RequestOptions options) throws IOException { - return performRequestAndParseEntity( - searchRequest, - r -> RequestConverters.search(r, "_search"), - options, - SearchResponse::fromXContent, - emptySet() - ); - } - /** * Asynchronously executes a search using the Search API. * See Search API on elastic.co @@ -368,27 +318,7 @@ public final Cancellable searchAsync(SearchRequest searchRequest, RequestOptions } /** - * Executes a search using the Search Scroll API. - * See Search - * Scroll API on elastic.co - * @param searchScrollRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @return the response - */ - public final SearchResponse scroll(SearchScrollRequest searchScrollRequest, RequestOptions options) throws IOException { - return performRequestAndParseEntity( - searchScrollRequest, - RequestConverters::searchScroll, - options, - SearchResponse::fromXContent, - emptySet() - ); - } - - /** - * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation - * layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}. + * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. */ @Deprecated private Resp performRequestAndParseEntity( @@ -402,8 +332,7 @@ private Resp performRequestAndParseEntity( } /** - * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation - * layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}. + * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. */ @Deprecated private Resp performRequest( @@ -458,8 +387,7 @@ private Resp internalPerformRequest( } /** - * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation - * layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}. + * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. * @return Cancellable instance that may be used to cancel the request */ @Deprecated @@ -482,8 +410,7 @@ private Cancellable performRequestAsyncAndPars } /** - * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation - * layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}. + * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. * @return Cancellable instance that may be used to cancel the request */ @Deprecated diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/Validatable.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/Validatable.java deleted file mode 100644 index b7635f7054299..0000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/Validatable.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.client; - -import java.util.Optional; - -/** - * Defines a validation layer for Requests. - */ -public interface Validatable { - - Validatable EMPTY = new Validatable() { - }; - - /** - * Perform validation. This method does not have to be overridden in the event that no validation needs to be done, - * or the validation was done during object construction time. A {@link ValidationException} that is not null is - * assumed to contain validation errors and will be thrown. - * - * @return An {@link Optional} {@link ValidationException} that contains a list of validation errors. - */ - default Optional validate() { - return Optional.empty(); - } -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ValidationException.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ValidationException.java deleted file mode 100644 index d5701c5723096..0000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ValidationException.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.client; - -import org.elasticsearch.core.Nullable; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -/** - * Encapsulates an accumulation of validation errors - */ -public class ValidationException extends IllegalArgumentException { - - /** - * Creates {@link ValidationException} instance initialized with given error messages. - * @param error the errors to add - * @return {@link ValidationException} instance - */ - public static ValidationException withError(String... error) { - return withErrors(Arrays.asList(error)); - } - - /** - * Creates {@link ValidationException} instance initialized with given error messages. - * @param errors the list of errors to add - * @return {@link ValidationException} instance - */ - public static ValidationException withErrors(List errors) { - ValidationException e = new ValidationException(); - for (String error : errors) { - e.addValidationError(error); - } - return e; - } - - private final List validationErrors = new ArrayList<>(); - - /** - * Add a new validation error to the accumulating validation errors - * @param error the error to add - */ - public void addValidationError(final String error) { - validationErrors.add(error); - } - - /** - * Adds validation errors from an existing {@link ValidationException} to - * the accumulating validation errors - * @param exception the {@link ValidationException} to add errors from - */ - public final void addValidationErrors(final @Nullable ValidationException exception) { - if (exception != null) { - for (String error : exception.validationErrors()) { - addValidationError(error); - } - } - } - - /** - * Returns the validation errors accumulated - */ - public final List validationErrors() { - return validationErrors; - } - - @Override - public final String getMessage() { - StringBuilder sb = new StringBuilder(); - sb.append("Validation Failed: "); - int index = 0; - for (String error : validationErrors) { - sb.append(++index).append(": ").append(error).append(";"); - } - return sb.toString(); - } -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/StringStatsAggregationBuilder.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/StringStatsAggregationBuilder.java index 13a72ee64c03f..f28aabe41f4a9 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/StringStatsAggregationBuilder.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/StringStatsAggregationBuilder.java @@ -22,7 +22,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; -import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry; import org.elasticsearch.search.aggregations.support.ValuesSourceType; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.xcontent.ParseField; @@ -71,12 +70,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - // This would be called from the same thing that calls innerBuild, which also throws. So it's "safe" to throw here. - throw new UnsupportedOperationException(); - } - @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { return builder.field(StringStatsAggregationBuilder.SHOW_DISTRIBUTION_FIELD.getPreferredName(), showDistribution); diff --git a/distribution/build.gradle b/distribution/build.gradle index 90af1472deb2e..e45f1d09625d6 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -14,6 +14,7 @@ import org.elasticsearch.gradle.internal.ConcatFilesTask import org.elasticsearch.gradle.internal.DependenciesInfoPlugin import org.elasticsearch.gradle.internal.NoticeTask import org.elasticsearch.gradle.internal.info.BuildParams +import org.elasticsearch.gradle.internal.test.HistoricalFeaturesMetadataPlugin import java.nio.file.Files import java.nio.file.Path @@ -30,6 +31,15 @@ configurations { attribute(Category.CATEGORY_ATTRIBUTE, project.getObjects().named(Category.class, Category.DOCUMENTATION)) } } + featuresMetadata { + attributes { + attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, HistoricalFeaturesMetadataPlugin.FEATURES_METADATA_TYPE) + } + } +} + +dependencies { + featuresMetadata project(':server') } def thisProj = project @@ -196,6 +206,7 @@ project.rootProject.subprojects.findAll { it.parent.path == ':modules' }.each { } distro.copyModule(processDefaultOutputsTaskProvider, module) + dependencies.add('featuresMetadata', module) if (module.name.startsWith('transport-') || (BuildParams.snapshotBuild == false && module.name == 'apm')) { distro.copyModule(processIntegTestOutputsTaskProvider, module) } @@ -214,6 +225,7 @@ xpack.subprojects.findAll { it.parent == xpack }.each { Project xpackModule -> } } distro.copyModule(processDefaultOutputsTaskProvider, xpackModule) + dependencies.add('featuresMetadata', xpackModule) if (xpackModule.name.equals('core') || xpackModule.name.equals('security')) { distro.copyModule(processIntegTestOutputsTaskProvider, xpackModule) } diff --git a/distribution/tools/java-version-checker/build.gradle b/distribution/tools/java-version-checker/build.gradle index 39f9bbf536dda..0a47d0652e465 100644 --- a/distribution/tools/java-version-checker/build.gradle +++ b/distribution/tools/java-version-checker/build.gradle @@ -8,15 +8,17 @@ tasks.named(sourceSets.unsupportedJdkVersionEntrypoint.compileJavaTaskName).conf targetCompatibility = JavaVersion.VERSION_1_8 } + tasks.named("jar") { manifest { attributes("Multi-Release": "true") } + FileCollection mainOutput = sourceSets.main.output; from(sourceSets.unsupportedJdkVersionEntrypoint.output) eachFile { details -> if (details.path.equals("org/elasticsearch/tools/java_version_checker/JavaVersionChecker.class") && - sourceSets.main.output.asFileTree.contains(details.file)) { + mainOutput.asFileTree.contains(details.file)) { details.relativePath = details.relativePath.prepend("META-INF/versions/17") } } diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java index b6cd680cb5816..9dcd630f52631 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java @@ -145,7 +145,7 @@ static List apmJvmOptions(Settings settings, @Nullable SecureSettings se // Configures a log file to write to. Don't disable writing to a log file, // as the agent will then require extra Security Manager permissions when // it tries to do something else, and it's just painful. - propertiesMap.put("log_file", logsDir.resolve("apm-agent.log").toString()); + propertiesMap.put("log_file", logsDir.resolve("apm-agent.json").toString()); // No point doing anything if we don't have a destination for the trace data, and it can't be configured dynamically if (propertiesMap.containsKey("server_url") == false && propertiesMap.containsKey("server_urls") == false) { diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOptionsParser.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOptionsParser.java index 5999f618bc0ab..29650e4b74114 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOptionsParser.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOptionsParser.java @@ -137,7 +137,7 @@ private List jvmOptions( ); substitutedJvmOptions.addAll(machineDependentHeap.determineHeapSettings(config, substitutedJvmOptions)); final List ergonomicJvmOptions = JvmErgonomics.choose(substitutedJvmOptions); - final List systemJvmOptions = SystemJvmOptions.systemJvmOptions(); + final List systemJvmOptions = SystemJvmOptions.systemJvmOptions(args.nodeSettings()); final List apmOptions = APMJvmOptions.apmJvmOptions(args.nodeSettings(), args.secrets(), args.logsDir(), tmpDir); diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java index a55a303517d6f..6e250075f7747 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java @@ -8,13 +8,16 @@ package org.elasticsearch.server.cli; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsExecutors; + import java.util.List; import java.util.stream.Collectors; import java.util.stream.Stream; final class SystemJvmOptions { - static List systemJvmOptions() { + static List systemJvmOptions(Settings nodeSettings) { return Stream.of( /* * Cache ttl in seconds for positive DNS lookups noting that this overrides the JDK security property networkaddress.cache.ttl; @@ -61,7 +64,8 @@ static List systemJvmOptions() { * explore alternatives. See org.elasticsearch.xpack.searchablesnapshots.preallocate.Preallocate. */ "--add-opens=java.base/java.io=org.elasticsearch.preallocate", - maybeOverrideDockerCgroup() + maybeOverrideDockerCgroup(), + maybeSetActiveProcessorCount(nodeSettings) ).filter(e -> e.isEmpty() == false).collect(Collectors.toList()); } @@ -85,4 +89,16 @@ private static String maybeOverrideDockerCgroup() { } return ""; } + + /* + * node.processors determines thread pool sizes for Elasticsearch. When it + * is set, we need to also tell the JVM to respect a different value + */ + private static String maybeSetActiveProcessorCount(Settings nodeSettings) { + if (EsExecutors.NODE_PROCESSORS_SETTING.exists(nodeSettings)) { + int allocated = EsExecutors.allocatedProcessors(nodeSettings); + return "-XX:ActiveProcessorCount=" + allocated; + } + return ""; + } } diff --git a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmOptionsParserTests.java b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmOptionsParserTests.java index 5d63f29ac584e..03856b1024992 100644 --- a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmOptionsParserTests.java +++ b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmOptionsParserTests.java @@ -8,6 +8,8 @@ package org.elasticsearch.server.cli; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Strings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase.WithoutSecurityManager; @@ -28,10 +30,13 @@ import java.util.concurrent.atomic.AtomicBoolean; import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.not; @WithoutSecurityManager public class JvmOptionsParserTests extends ESTestCase { @@ -344,4 +349,27 @@ public void accept(final int lineNumber, final String line) { assertThat(seenInvalidLines, equalTo(invalidLines)); } + public void testNodeProcessorsActiveCount() { + { + final List jvmOptions = SystemJvmOptions.systemJvmOptions(Settings.EMPTY); + assertThat(jvmOptions, not(hasItem(containsString("-XX:ActiveProcessorCount=")))); + } + { + Settings nodeSettings = Settings.builder().put(EsExecutors.NODE_PROCESSORS_SETTING.getKey(), 1).build(); + final List jvmOptions = SystemJvmOptions.systemJvmOptions(nodeSettings); + assertThat(jvmOptions, hasItem("-XX:ActiveProcessorCount=1")); + } + { + // check rounding + Settings nodeSettings = Settings.builder().put(EsExecutors.NODE_PROCESSORS_SETTING.getKey(), 0.2).build(); + final List jvmOptions = SystemJvmOptions.systemJvmOptions(nodeSettings); + assertThat(jvmOptions, hasItem("-XX:ActiveProcessorCount=1")); + } + { + // check validation + Settings nodeSettings = Settings.builder().put(EsExecutors.NODE_PROCESSORS_SETTING.getKey(), 10000).build(); + var e = expectThrows(IllegalArgumentException.class, () -> SystemJvmOptions.systemJvmOptions(nodeSettings)); + assertThat(e.getMessage(), containsString("setting [node.processors] must be <=")); + } + } } diff --git a/docs/changelog/100018.yaml b/docs/changelog/100018.yaml deleted file mode 100644 index b39089db568c0..0000000000000 --- a/docs/changelog/100018.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100018 -summary: Improve time-series error and documentation -area: "TSDB" -type: enhancement -issues: [] diff --git a/docs/changelog/100020.yaml b/docs/changelog/100020.yaml deleted file mode 100644 index 9f97778860eef..0000000000000 --- a/docs/changelog/100020.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100020 -summary: "[CI] `SearchResponseTests#testSerialization` failing resolved" -area: Search -type: bug -issues: - - 100005 diff --git a/docs/changelog/100064.yaml b/docs/changelog/100064.yaml deleted file mode 100644 index f595b7e8e0705..0000000000000 --- a/docs/changelog/100064.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100064 -summary: Update the elastic-apm-agent version -area: Infra/Core -type: enhancement -issues: [] diff --git a/docs/changelog/100092.yaml b/docs/changelog/100092.yaml deleted file mode 100644 index e86b856caf3ad..0000000000000 --- a/docs/changelog/100092.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100092 -summary: Compute SLM retention from `RepositoryData` -area: ILM+SLM -type: bug -issues: - - 99953 diff --git a/docs/changelog/100129.yaml b/docs/changelog/100129.yaml deleted file mode 100644 index aa2c6961b6681..0000000000000 --- a/docs/changelog/100129.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100129 -summary: Refactor `SearchResponseClusters` to use CHM -area: Search -type: enhancement -issues: - - 99101 diff --git a/docs/changelog/100138.yaml b/docs/changelog/100138.yaml deleted file mode 100644 index 0df2004f8539d..0000000000000 --- a/docs/changelog/100138.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100138 -summary: Upgrade main to Lucene 9.8.0 -area: Search -type: upgrade -issues: [] diff --git a/docs/changelog/100143.yaml b/docs/changelog/100143.yaml deleted file mode 100644 index c61a2a8bc7a13..0000000000000 --- a/docs/changelog/100143.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100143 -summary: Preserve order of inference results when calling the _infer API with multiple inputs on a model deployment with more than one allocation the output results order was not guaranteed to match the input order. The fix ensures the output order matches the input order. -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/100154.yaml b/docs/changelog/100154.yaml deleted file mode 100644 index 5e75102390c61..0000000000000 --- a/docs/changelog/100154.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100154 -summary: Log warnings for jobs unassigned for a long time -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/100187.yaml b/docs/changelog/100187.yaml deleted file mode 100644 index f0ab9257e7127..0000000000000 --- a/docs/changelog/100187.yaml +++ /dev/null @@ -1,17 +0,0 @@ -pr: 100187 -summary: GA the data stream lifecycle -area: Data streams -type: "feature" -issues: [] -highlight: - title: The data stream lifecycle is now in Technical Preview - body: "This marks the data stream lifecycle as available in Technical Preview. - Data streams will be able to take advantage of a built-in simplified and - resilient lifecycle implementation. Data streams with a configured lifecycle will - be automatically rolled over and tail merged (a forcemerge implementation that's - lightweight and only merges the long tail of small segments instead of the - whole shard). With the shard and index maintenance tasks being handled automatically - to ensure optimum performance, and trade-off between indexing and searching, - you'll be able to focus on the business related lifecycle aspects like data - retention." - notable: true diff --git a/docs/changelog/100199.yaml b/docs/changelog/100199.yaml deleted file mode 100644 index 0f609194813c5..0000000000000 --- a/docs/changelog/100199.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100199 -summary: "ESQL: Simple check if all blocks get released" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/100205.yaml b/docs/changelog/100205.yaml deleted file mode 100644 index 41b16465ef4c5..0000000000000 --- a/docs/changelog/100205.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100205 -summary: Simplify the Inference Ingest Processor configuration -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/100232.yaml b/docs/changelog/100232.yaml deleted file mode 100644 index 3f8336b6c241c..0000000000000 --- a/docs/changelog/100232.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100232 -summary: "Tracing: Use `doPriv` when working with spans, use `SpanId`" -area: Infra/Core -type: bug -issues: [] diff --git a/docs/changelog/100238.yaml b/docs/changelog/100238.yaml deleted file mode 100644 index 70e3f5340e223..0000000000000 --- a/docs/changelog/100238.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100238 -summary: "ESQL: Remove aliasing inside Eval" -area: ES|QL -type: bug -issues: - - 100174 diff --git a/docs/changelog/100253.yaml b/docs/changelog/100253.yaml deleted file mode 100644 index 7a9d3f3fb13d7..0000000000000 --- a/docs/changelog/100253.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100253 -summary: Propagate cancellation in `DataTiersUsageTransportAction` -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/100273.yaml b/docs/changelog/100273.yaml deleted file mode 100644 index 4ccd52d033aa7..0000000000000 --- a/docs/changelog/100273.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100273 -summary: Propagate cancellation in `GetHealthAction` -area: Health -type: bug -issues: [] diff --git a/docs/changelog/100323.yaml b/docs/changelog/100323.yaml deleted file mode 100644 index de50da6ec8cf9..0000000000000 --- a/docs/changelog/100323.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100323 -summary: "CCR: Use local cluster state request" -area: CCR -type: bug -issues: [] diff --git a/docs/changelog/100351.yaml b/docs/changelog/100351.yaml deleted file mode 100644 index d8ba19b70cbed..0000000000000 --- a/docs/changelog/100351.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100351 -summary: "ESQL: support metric tsdb fields while querying index patterns" -area: ES|QL -type: bug -issues: - - 100144 diff --git a/docs/changelog/100360.yaml b/docs/changelog/100360.yaml deleted file mode 100644 index 6d0dcafe16a8f..0000000000000 --- a/docs/changelog/100360.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100360 -summary: "ESQL: Limit how many bytes `concat()` can process" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/100370.yaml b/docs/changelog/100370.yaml deleted file mode 100644 index 3e2e1b762c654..0000000000000 --- a/docs/changelog/100370.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 100370 -summary: "ESQL: Page shouldn't close a block twice" -area: ES|QL -type: bug -issues: - - 100356 - - 100365 diff --git a/docs/changelog/100377.yaml b/docs/changelog/100377.yaml deleted file mode 100644 index a4cbb0ba46a61..0000000000000 --- a/docs/changelog/100377.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100377 -summary: "ESQL: Add identity check in Block equality" -area: ES|QL -type: bug -issues: - - 100374 diff --git a/docs/changelog/100388.yaml b/docs/changelog/100388.yaml deleted file mode 100644 index 4b596b6ea23b6..0000000000000 --- a/docs/changelog/100388.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100388 -summary: Fix for inference requests being sent to every node with a model allocation. If there are more nodes than items in the original request then empty requests were sent. -area: Machine Learning -type: bug -issues: - - 100180 diff --git a/docs/changelog/100447.yaml b/docs/changelog/100447.yaml deleted file mode 100644 index c20eb1599cf41..0000000000000 --- a/docs/changelog/100447.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100447 -summary: Reinstate `RepositoryData` BwC -area: Snapshot/Restore -type: bug -issues: [] diff --git a/docs/changelog/100470.yaml b/docs/changelog/100470.yaml deleted file mode 100644 index 3408ae06f7fe9..0000000000000 --- a/docs/changelog/100470.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100470 -summary: DSL waits for the tsdb time boundaries to lapse -area: Data streams -type: bug -issues: - - 99696 diff --git a/docs/changelog/100594.yaml b/docs/changelog/100594.yaml deleted file mode 100644 index 62d2a8933b9ad..0000000000000 --- a/docs/changelog/100594.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100594 -summary: Grant editor and viewer access to profiling -area: Authorization -type: bug -issues: [] diff --git a/docs/changelog/100610.yaml b/docs/changelog/100610.yaml deleted file mode 100644 index 7423ce9225868..0000000000000 --- a/docs/changelog/100610.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 100610 -summary: Fix interruption of `markAllocationIdAsInSync` -area: Recovery -type: bug -issues: - - 96578 - - 100589 diff --git a/docs/changelog/100624.yaml b/docs/changelog/100624.yaml deleted file mode 100644 index 247343bf03ed8..0000000000000 --- a/docs/changelog/100624.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100624 -summary: Make Transform Feature Reset really wait for all the tasks -area: Transform -type: bug -issues: [] diff --git a/docs/changelog/100645.yaml b/docs/changelog/100645.yaml deleted file mode 100644 index e6bb6ab0fd653..0000000000000 --- a/docs/changelog/100645.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 100645 -summary: "ESQL: Graceful handling of non-bool condition in the filter" -area: ES|QL -type: bug -issues: - - 100049 - - 100409 diff --git a/docs/changelog/100647.yaml b/docs/changelog/100647.yaml deleted file mode 100644 index 399407146af68..0000000000000 --- a/docs/changelog/100647.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100647 -summary: "ESQL: Handle queries with non-existing enrich policies and no field" -area: ES|QL -type: bug -issues: - - 100593 diff --git a/docs/changelog/100650.yaml b/docs/changelog/100650.yaml deleted file mode 100644 index 96d7bc0571403..0000000000000 --- a/docs/changelog/100650.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100650 -summary: "ESQL: Improve verifier error for incorrect agg declaration" -area: ES|QL -type: bug -issues: - - 100641 diff --git a/docs/changelog/100656.yaml b/docs/changelog/100656.yaml deleted file mode 100644 index 1ee9a2ad0e47a..0000000000000 --- a/docs/changelog/100656.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100656 -summary: "ESQL: fix non-null value being returned for unsupported data types in `ValueSources`" -area: ES|QL -type: bug -issues: - - 100048 diff --git a/docs/changelog/100707.yaml b/docs/changelog/100707.yaml deleted file mode 100644 index 6808b781b603a..0000000000000 --- a/docs/changelog/100707.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100707 -summary: Allow `enrich_user` to read/view enrich indices -area: Authorization -type: bug -issues: [] diff --git a/docs/changelog/100760.yaml b/docs/changelog/100760.yaml deleted file mode 100644 index b8d149fff5758..0000000000000 --- a/docs/changelog/100760.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100760 -summary: Remove noisy 'Could not find trained model' message -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/100766.yaml b/docs/changelog/100766.yaml deleted file mode 100644 index c7a3d0479afd6..0000000000000 --- a/docs/changelog/100766.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100766 -summary: "ESQL: Properly handle multi-values in fold() and date math" -area: ES|QL -type: bug -issues: - - 100497 diff --git a/docs/changelog/100779.yaml b/docs/changelog/100779.yaml deleted file mode 100644 index 2d7f40f5b34da..0000000000000 --- a/docs/changelog/100779.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100779 -summary: Fix NullPointerException in RotableSecret -area: Security -type: bug -issues: - - 99759 diff --git a/docs/changelog/100782.yaml b/docs/changelog/100782.yaml deleted file mode 100644 index c6007bfb4d9ba..0000000000000 --- a/docs/changelog/100782.yaml +++ /dev/null @@ -1,8 +0,0 @@ -pr: 100782 -summary: "ESQL: `mv_expand` pushes down limit and project and keep the limit after\ - \ it untouched" -area: ES|QL -type: bug -issues: - - 99971 - - 100774 diff --git a/docs/changelog/100808.yaml b/docs/changelog/100808.yaml deleted file mode 100644 index 1abbfdcebf74e..0000000000000 --- a/docs/changelog/100808.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100808 -summary: Make tasks that calculate checkpoints cancellable -area: Transform -type: bug -issues: [] diff --git a/docs/changelog/100846.yaml b/docs/changelog/100846.yaml deleted file mode 100644 index d13fb78b697a2..0000000000000 --- a/docs/changelog/100846.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100846 -summary: Consistent scores for multi-term `SourceConfirmedTestQuery` -area: Search -type: bug -issues: - - 98712 diff --git a/docs/changelog/100866.yaml b/docs/changelog/100866.yaml deleted file mode 100644 index 67a22cc1e0996..0000000000000 --- a/docs/changelog/100866.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100866 -summary: "ESQL: Preserve intermediate aggregation output in local relation" -area: ES|QL -type: bug -issues: - - 100807 diff --git a/docs/changelog/100872.yaml b/docs/changelog/100872.yaml deleted file mode 100644 index 9877afa28982e..0000000000000 --- a/docs/changelog/100872.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100872 -summary: Improve painless error wrapping -area: Infra/Scripting -type: bug -issues: [] diff --git a/docs/changelog/100875.yaml b/docs/changelog/100875.yaml deleted file mode 100644 index bd0ca59e8b8f0..0000000000000 --- a/docs/changelog/100875.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100875 -summary: Preserve subfields for unsupported types -area: "Query Languages" -type: bug -issues: - - 100869 diff --git a/docs/changelog/100886.yaml b/docs/changelog/100886.yaml deleted file mode 100644 index b926f924c7a7c..0000000000000 --- a/docs/changelog/100886.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100886 -summary: Use the correct writable name for model assignment metadata in mixed version clusters. Prevents a node failure due to IllegalArgumentException Unknown NamedWriteable [trained_model_assignment] -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/100911.yaml b/docs/changelog/100911.yaml deleted file mode 100644 index baab6f2482a76..0000000000000 --- a/docs/changelog/100911.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100911 -summary: '`WaitForSnapshotStep` verifies if the index belongs to the latest snapshot - of that SLM policy' -area: ILM+SLM -type: bug -issues: [] diff --git a/docs/changelog/101001.yaml b/docs/changelog/101001.yaml deleted file mode 100644 index 3ebcefc2c8045..0000000000000 --- a/docs/changelog/101001.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101001 -summary: "ESQL: Support date and time intervals as input params" -area: ES|QL -type: bug -issues: - - 99570 diff --git a/docs/changelog/101012.yaml b/docs/changelog/101012.yaml deleted file mode 100644 index 1d5f62bdddba7..0000000000000 --- a/docs/changelog/101012.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101012 -summary: Adjust `DateHistogram's` bucket accounting to be iteratively -area: Aggregations -type: bug -issues: [] diff --git a/docs/changelog/101051.yaml b/docs/changelog/101051.yaml deleted file mode 100644 index 05e7443dac8b3..0000000000000 --- a/docs/changelog/101051.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101051 -summary: Percolator to support parsing script score query with params -area: Mapping -type: bug -issues: - - 97377 diff --git a/docs/changelog/101120.yaml b/docs/changelog/101120.yaml deleted file mode 100644 index bf359eb21be9f..0000000000000 --- a/docs/changelog/101120.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101120 -summary: "ESQL: Fix escaping of backslash in LIKE operator" -area: ES|QL -type: bug -issues: - - 101106 diff --git a/docs/changelog/101133.yaml b/docs/changelog/101133.yaml deleted file mode 100644 index 546a5392c309a..0000000000000 --- a/docs/changelog/101133.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101133 -summary: Update bundled JDK to 21.0.1 -area: Packaging -type: upgrade -issues: [] diff --git a/docs/changelog/101184.yaml b/docs/changelog/101184.yaml deleted file mode 100644 index ac2f5f3ee8af1..0000000000000 --- a/docs/changelog/101184.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101184 -summary: More robust timeout for repo analysis -area: Snapshot/Restore -type: bug -issues: - - 101182 diff --git a/docs/changelog/101205.yaml b/docs/changelog/101205.yaml deleted file mode 100644 index 528f6fb35846e..0000000000000 --- a/docs/changelog/101205.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101205 -summary: Increase K/V look-back time interval -area: Application -type: bug -issues: [] diff --git a/docs/changelog/101212.yaml b/docs/changelog/101212.yaml deleted file mode 100644 index ed2b433209e8d..0000000000000 --- a/docs/changelog/101212.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101212 -summary: Fix painless execute api and tsdb issue -area: TSDB -type: bug -issues: - - 101072 diff --git a/docs/changelog/101245.yaml b/docs/changelog/101245.yaml deleted file mode 100644 index 2f9fef318f31a..0000000000000 --- a/docs/changelog/101245.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101245 -summary: Make S3 anti-contention delay configurable -area: Snapshot/Restore -type: bug -issues: [] diff --git a/docs/changelog/101255.yaml b/docs/changelog/101255.yaml deleted file mode 100644 index 37d8f7e3c14fe..0000000000000 --- a/docs/changelog/101255.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101255 -summary: Provide stable resampling -area: Application -type: bug -issues: [] diff --git a/docs/changelog/101264.yaml b/docs/changelog/101264.yaml deleted file mode 100644 index 7160240b2f3a0..0000000000000 --- a/docs/changelog/101264.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101264 -summary: Align look-back with client-side cache -area: Application -type: bug -issues: [] diff --git a/docs/changelog/101265.yaml b/docs/changelog/101265.yaml deleted file mode 100644 index f39b57fa9a75e..0000000000000 --- a/docs/changelog/101265.yaml +++ /dev/null @@ -1,13 +0,0 @@ -pr: 101265 -summary: Rollup functionality is now deprecated -area: Rollup -type: deprecation -issues: [] -deprecation: - title: >- - Rollup functionality is now deprecated - area: Rollup - details: |- - {ref}/xpack-rollup[Rollup functionality] has been deprecated and will be removed in a future release. Previously, rollups were available in technical preview. - impact: |- - Use {ref}/downsampling.html[downsampling] to reduce storage costs for time series data by by storing it at reduced granularity. diff --git a/docs/changelog/101333.yaml b/docs/changelog/101333.yaml new file mode 100644 index 0000000000000..4452687b995d3 --- /dev/null +++ b/docs/changelog/101333.yaml @@ -0,0 +1,29 @@ +pr: 101333 +summary: Fixed JWT principal from claims +area: Authorization +type: breaking +issues: [] +breaking: + title: Fixed JWT principal from claims + area: Authorization + details: "This changes the format of a JWT's principal before the JWT is actually\ + \ validated by any JWT realm. The JWT's principal is a convenient way to refer\ + \ to a JWT that has not yet been verified by a JWT realm. The JWT's principal\ + \ is printed in the audit and regular logs (notably for auditing authn failures)\ + \ as well as the smart realm chain reordering optimization. The JWT principal\ + \ is NOT required to be identical to the JWT-authenticated user's principal, but\ + \ in general, they should be similar. Previously, the JWT's principal was built\ + \ by individual realms in the same way the realms built the authenticated user's\ + \ principal. This had the advantage that, in simpler JWT realms configurations\ + \ (e.g. a single JWT realm in the chain), the JWT principal and the authenticated\ + \ user's principal are very similar. However the drawback is that, in general,\ + \ the JWT principal and the user principal can be very different (i.e. in the\ + \ case where one JWT realm builds the JWT principal and a different one builds\ + \ the user principal). Another downside is that the (unauthenticated) JWT principal\ + \ depended on realm ordering, which makes identifying the JWT from its principal\ + \ dependent on the ES authn realm configuration. This PR implements a consistent\ + \ fixed logic to build the JWT principal, which only depends on the JWT's claims\ + \ and no ES configuration." + impact: "Users will observe changed format and values for the `user.name` attribute\ + \ of `authentication_failed` audit log events, in the JWT (failed) authn case." + notable: false diff --git a/docs/changelog/101344.yaml b/docs/changelog/101344.yaml deleted file mode 100644 index b546e743301f6..0000000000000 --- a/docs/changelog/101344.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101344 -summary: Register `repository_s3` settings -area: Snapshot/Restore -type: bug -issues: [] diff --git a/docs/changelog/101358.yaml b/docs/changelog/101358.yaml deleted file mode 100644 index 3ae2a44e15e5e..0000000000000 --- a/docs/changelog/101358.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101358 -summary: Make DISSECT parameter `append_separator` case insensitive -area: ES|QL -type: bug -issues: - - 101138 diff --git a/docs/changelog/101362.yaml b/docs/changelog/101362.yaml deleted file mode 100644 index e1d763cd416fa..0000000000000 --- a/docs/changelog/101362.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101362 -summary: "ESQL: Remove the swapped-args check for date_xxx()" -area: ES|QL -type: enhancement -issues: - - 99562 diff --git a/docs/changelog/101438.yaml b/docs/changelog/101438.yaml deleted file mode 100644 index 8189ee96b6576..0000000000000 --- a/docs/changelog/101438.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101438 -summary: "ESQL: Fix eval of functions on foldable literals" -area: ES|QL -type: bug -issues: - - 101425 diff --git a/docs/changelog/101456.yaml b/docs/changelog/101456.yaml deleted file mode 100644 index db55dfbde1c64..0000000000000 --- a/docs/changelog/101456.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101456 -summary: "ESQL: adds Enrich implicit `match_fields` to `field_caps` call" -area: ES|QL -type: bug -issues: - - 101328 diff --git a/docs/changelog/101486.yaml b/docs/changelog/101486.yaml deleted file mode 100644 index 99795feda328f..0000000000000 --- a/docs/changelog/101486.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101486 -summary: Improving tika handling -area: Ingest Node -type: bug -issues: [] diff --git a/docs/changelog/101492.yaml b/docs/changelog/101492.yaml deleted file mode 100644 index 2c3cdeee21bbb..0000000000000 --- a/docs/changelog/101492.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101492 -summary: "ESQL: check type before casting" -area: ES|QL -type: bug -issues: - - 101489 diff --git a/docs/changelog/101495.yaml b/docs/changelog/101495.yaml deleted file mode 100644 index f61c9b824b77c..0000000000000 --- a/docs/changelog/101495.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101495 -summary: "[DSL] skip deleting indices that have in-progress downsampling operations" -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/101497.yaml b/docs/changelog/101497.yaml deleted file mode 100644 index 7909cb1ecdc0d..0000000000000 --- a/docs/changelog/101497.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101497 -summary: Fix snapshot double finalization -area: Snapshot/Restore -type: bug -issues: [] diff --git a/docs/changelog/101516.yaml b/docs/changelog/101516.yaml deleted file mode 100644 index a5445102c33c6..0000000000000 --- a/docs/changelog/101516.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101516 -summary: "Make settings dynamic" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/101577.yaml b/docs/changelog/101577.yaml new file mode 100644 index 0000000000000..e485fd3811cb6 --- /dev/null +++ b/docs/changelog/101577.yaml @@ -0,0 +1,5 @@ +pr: 101577 +summary: Add metrics to the shared blob cache +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/101609.yaml b/docs/changelog/101609.yaml new file mode 100644 index 0000000000000..27993574743d2 --- /dev/null +++ b/docs/changelog/101609.yaml @@ -0,0 +1,9 @@ +pr: 101609 +summary: > + Add a node feature join barrier. This prevents nodes from joining clusters that do not have + all the features already present in the cluster. This ensures that once a features is supported + by all the nodes in a cluster, that feature will never then not be supported in the future. + This is the corresponding functionality for the version join barrier, but for features +area: "Cluster Coordination" +type: feature +issues: [] diff --git a/docs/changelog/101627.yaml b/docs/changelog/101627.yaml deleted file mode 100644 index 07992efd8bb3c..0000000000000 --- a/docs/changelog/101627.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101627 -summary: Ignore `IndexNotFound` error when refreshing destination index -area: Transform -type: bug -issues: [] diff --git a/docs/changelog/101629.yaml b/docs/changelog/101629.yaml deleted file mode 100644 index 1b8691c9798ff..0000000000000 --- a/docs/changelog/101629.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101629 -summary: Health report infrastructure doesn't trip the circuit breakers -area: Health -type: bug -issues: [] diff --git a/docs/changelog/101648.yaml b/docs/changelog/101648.yaml deleted file mode 100644 index 48e01739aabc0..0000000000000 --- a/docs/changelog/101648.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101648 -summary: "ESQL: Fix unreleased block in topn" -area: ES|QL -type: bug -issues: - - 101588 diff --git a/docs/changelog/101652.yaml b/docs/changelog/101652.yaml deleted file mode 100644 index 79e3167696aee..0000000000000 --- a/docs/changelog/101652.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101652 -summary: Fix race condition in `SnapshotsService` -area: Snapshot/Restore -type: bug -issues: [] diff --git a/docs/changelog/101660.yaml b/docs/changelog/101660.yaml new file mode 100644 index 0000000000000..cb3d3118d15a6 --- /dev/null +++ b/docs/changelog/101660.yaml @@ -0,0 +1,6 @@ +pr: 101660 +summary: Fall through malformed JWTs to subsequent realms in the chain +area: Authentication +type: bug +issues: + - 101367 diff --git a/docs/changelog/101682.yaml b/docs/changelog/101682.yaml new file mode 100644 index 0000000000000..e512006057581 --- /dev/null +++ b/docs/changelog/101682.yaml @@ -0,0 +1,5 @@ +pr: 101682 +summary: "Add manage_enrich cluster privilege to kibana_system role" +area: Authentication +type: enhancement +issues: [] diff --git a/docs/changelog/101700.yaml b/docs/changelog/101700.yaml new file mode 100644 index 0000000000000..08671360688a7 --- /dev/null +++ b/docs/changelog/101700.yaml @@ -0,0 +1,5 @@ +pr: 101700 +summary: Fix `lastUnsafeSegmentGenerationForGets` for realtime get +area: Engine +type: bug +issues: [] diff --git a/docs/changelog/101713.yaml b/docs/changelog/101713.yaml deleted file mode 100644 index c3addf9296584..0000000000000 --- a/docs/changelog/101713.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101713 -summary: Disable `weight_matches` when kNN query is present -area: Highlighting -type: bug -issues: [] diff --git a/docs/changelog/101723.yaml b/docs/changelog/101723.yaml new file mode 100644 index 0000000000000..146d164805f00 --- /dev/null +++ b/docs/changelog/101723.yaml @@ -0,0 +1,6 @@ +pr: 101723 +summary: Allowing non-dynamic index settings to be updated by automatically unassigning + shards +area: Indices APIs +type: enhancement +issues: [] diff --git a/docs/changelog/101753.yaml b/docs/changelog/101753.yaml new file mode 100644 index 0000000000000..7b64075998430 --- /dev/null +++ b/docs/changelog/101753.yaml @@ -0,0 +1,5 @@ +pr: 101753 +summary: Expose roles by default in cat allocation API +area: CAT APIs +type: enhancement +issues: [] diff --git a/docs/changelog/101788.yaml b/docs/changelog/101788.yaml new file mode 100644 index 0000000000000..b7cc1e20663e8 --- /dev/null +++ b/docs/changelog/101788.yaml @@ -0,0 +1,6 @@ +pr: 101788 +summary: "ESQL: Narrow catch in convert functions" +area: ES|QL +type: bug +issues: + - 100820 diff --git a/docs/changelog/101802.yaml b/docs/changelog/101802.yaml new file mode 100644 index 0000000000000..20e857c32f664 --- /dev/null +++ b/docs/changelog/101802.yaml @@ -0,0 +1,5 @@ +pr: 101802 +summary: Correctly logging watcher history write failures +area: Watcher +type: bug +issues: [] diff --git a/docs/changelog/101815.yaml b/docs/changelog/101815.yaml new file mode 100644 index 0000000000000..511e23beb68ef --- /dev/null +++ b/docs/changelog/101815.yaml @@ -0,0 +1,5 @@ +pr: 101815 +summary: Run `TransportGetAliasesAction` on local node +area: Indices APIs +type: enhancement +issues: [] diff --git a/docs/changelog/101826.yaml b/docs/changelog/101826.yaml new file mode 100644 index 0000000000000..87f3f8df1b0c2 --- /dev/null +++ b/docs/changelog/101826.yaml @@ -0,0 +1,6 @@ +pr: 101826 +summary: Support keyed histograms +area: Aggregations +type: enhancement +issues: + - 100242 diff --git a/docs/changelog/101846.yaml b/docs/changelog/101846.yaml new file mode 100644 index 0000000000000..52dfff8801c62 --- /dev/null +++ b/docs/changelog/101846.yaml @@ -0,0 +1,5 @@ +pr: 101846 +summary: Set `ActiveProcessorCount` when `node.processors` is set +area: Infra/CLI +type: enhancement +issues: [] diff --git a/docs/changelog/101847.yaml b/docs/changelog/101847.yaml new file mode 100644 index 0000000000000..91922b9e23ed0 --- /dev/null +++ b/docs/changelog/101847.yaml @@ -0,0 +1,6 @@ +pr: 101847 +summary: Add an additional tiebreaker to RRF +area: Ranking +type: bug +issues: + - 101232 diff --git a/docs/changelog/101859.yaml b/docs/changelog/101859.yaml new file mode 100644 index 0000000000000..54f3fb12810ca --- /dev/null +++ b/docs/changelog/101859.yaml @@ -0,0 +1,6 @@ +pr: 101859 +summary: Cover head/tail commands edge cases and data types coverage +area: EQL +type: bug +issues: + - 101724 diff --git a/docs/changelog/101868.yaml b/docs/changelog/101868.yaml new file mode 100644 index 0000000000000..d7cf650d25ed2 --- /dev/null +++ b/docs/changelog/101868.yaml @@ -0,0 +1,5 @@ +pr: 101868 +summary: Read scores from downloaded vocabulary for XLM Roberta tokenizers +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/101915.yaml b/docs/changelog/101915.yaml new file mode 100644 index 0000000000000..aed7ca62021a5 --- /dev/null +++ b/docs/changelog/101915.yaml @@ -0,0 +1,5 @@ +pr: 101915 +summary: Add inference counts by model to the machine learning usage stats +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/101989.yaml b/docs/changelog/101989.yaml new file mode 100644 index 0000000000000..d294d194bd4e8 --- /dev/null +++ b/docs/changelog/101989.yaml @@ -0,0 +1,5 @@ +pr: 101989 +summary: Add message field to `HealthPeriodicLogger` and `S3RequestRetryStats` +area: Health +type: enhancement +issues: [] diff --git a/docs/changelog/102020.yaml b/docs/changelog/102020.yaml new file mode 100644 index 0000000000000..7c74e9676d342 --- /dev/null +++ b/docs/changelog/102020.yaml @@ -0,0 +1,5 @@ +pr: 102020 +summary: Retrieve stacktrace events from a custom index +area: Application +type: enhancement +issues: [] diff --git a/docs/changelog/102048.yaml b/docs/changelog/102048.yaml new file mode 100644 index 0000000000000..54bc1d9eae52e --- /dev/null +++ b/docs/changelog/102048.yaml @@ -0,0 +1,5 @@ +pr: 102048 +summary: "Repo analysis: verify empty register" +area: Snapshot/Restore +type: enhancement +issues: [] diff --git a/docs/changelog/102051.yaml b/docs/changelog/102051.yaml new file mode 100644 index 0000000000000..c3ca4a546928f --- /dev/null +++ b/docs/changelog/102051.yaml @@ -0,0 +1,5 @@ +pr: 102051 +summary: "Repo analysis: allow configuration of register ops" +area: Snapshot/Restore +type: enhancement +issues: [] diff --git a/docs/changelog/102056.yaml b/docs/changelog/102056.yaml new file mode 100644 index 0000000000000..455f66ba90b03 --- /dev/null +++ b/docs/changelog/102056.yaml @@ -0,0 +1,5 @@ +pr: 102056 +summary: Use `BulkRequest` to store Application Privileges +area: Authorization +type: enhancement +issues: [] diff --git a/docs/changelog/102057.yaml b/docs/changelog/102057.yaml new file mode 100644 index 0000000000000..d5b664ba14c29 --- /dev/null +++ b/docs/changelog/102057.yaml @@ -0,0 +1,6 @@ +pr: 102057 +summary: Simplify `BlobStoreRepository` idle check +area: Snapshot/Restore +type: bug +issues: + - 101948 diff --git a/docs/changelog/102065.yaml b/docs/changelog/102065.yaml new file mode 100644 index 0000000000000..1a9a219df4502 --- /dev/null +++ b/docs/changelog/102065.yaml @@ -0,0 +1,5 @@ +pr: 102065 +summary: Add more desired balance stats +area: Allocation +type: enhancement +issues: [] diff --git a/docs/changelog/102075.yaml b/docs/changelog/102075.yaml new file mode 100644 index 0000000000000..54daae04169db --- /dev/null +++ b/docs/changelog/102075.yaml @@ -0,0 +1,5 @@ +pr: 102075 +summary: Accept a single or multiple inputs to `_inference` +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/102089.yaml b/docs/changelog/102089.yaml new file mode 100644 index 0000000000000..9f33c0648d09f --- /dev/null +++ b/docs/changelog/102089.yaml @@ -0,0 +1,5 @@ +pr: 102089 +summary: Add prefix strings option to trained models +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/102114.yaml b/docs/changelog/102114.yaml new file mode 100644 index 0000000000000..a08389da0351b --- /dev/null +++ b/docs/changelog/102114.yaml @@ -0,0 +1,6 @@ +pr: 102114 +summary: Fix double-completion in `SecurityUsageTransportAction` +area: Security +type: bug +issues: + - 102111 diff --git a/docs/changelog/102140.yaml b/docs/changelog/102140.yaml new file mode 100644 index 0000000000000..0f086649b9710 --- /dev/null +++ b/docs/changelog/102140.yaml @@ -0,0 +1,6 @@ +pr: 102140 +summary: Collect data tiers usage stats more efficiently +area: ILM+SLM +type: bug +issues: + - 100230 \ No newline at end of file diff --git a/docs/changelog/102151.yaml b/docs/changelog/102151.yaml new file mode 100644 index 0000000000000..652ae555af97d --- /dev/null +++ b/docs/changelog/102151.yaml @@ -0,0 +1,5 @@ +pr: 102151 +summary: Default `run_ml_inference` should be true +area: Application +type: bug +issues: [] diff --git a/docs/changelog/102172.yaml b/docs/changelog/102172.yaml new file mode 100644 index 0000000000000..485c2c4327e11 --- /dev/null +++ b/docs/changelog/102172.yaml @@ -0,0 +1,5 @@ +pr: 102172 +summary: Adjust Histogram's bucket accounting to be iteratively +area: Aggregations +type: bug +issues: [] diff --git a/docs/changelog/102188.yaml b/docs/changelog/102188.yaml new file mode 100644 index 0000000000000..595a8395fab5c --- /dev/null +++ b/docs/changelog/102188.yaml @@ -0,0 +1,5 @@ +pr: 102188 +summary: Track blocks in `AsyncOperator` +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/102190.yaml b/docs/changelog/102190.yaml new file mode 100644 index 0000000000000..cd04e041fca5e --- /dev/null +++ b/docs/changelog/102190.yaml @@ -0,0 +1,5 @@ +pr: 102190 +summary: Track pages in ESQL enrich request/response +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/102208.yaml b/docs/changelog/102208.yaml new file mode 100644 index 0000000000000..b566a85753d82 --- /dev/null +++ b/docs/changelog/102208.yaml @@ -0,0 +1,5 @@ +pr: 102208 +summary: Add static node settings to set default values for max merged segment sizes +area: Engine +type: enhancement +issues: [] diff --git a/docs/changelog/94607.yaml b/docs/changelog/94607.yaml deleted file mode 100644 index eea9264ce90f9..0000000000000 --- a/docs/changelog/94607.yaml +++ /dev/null @@ -1,18 +0,0 @@ -pr: 94607 -summary: Use `IndexWriter.flushNextBuffer()` to reclaim memory from indexing buffers -area: Engine -type: enhancement -issues: [] -highlight: - title: Use `IndexWriter.flushNextBuffer()` to reclaim memory from indexing buffers - body: |- - Rather than forcing a refresh to reclaim memory from indexing buffers, which flushes all - segments no matter how large, Elasticsearch now takes advantage of - `IndexWriter#flushNextBuffer` which only flushes the largest pending segment. This should smooth - out indexing allowing for larger segment sizes, fewer merges and higher throughput. - - Furthermore, the selection algorithm to pick which shard to reclaim memory from next was - changed, from picking the shard that uses the most RAM to going over shards in a round-robin - fashion. This approach has proved to work significantly better in practice. - - notable: true diff --git a/docs/changelog/97317.yaml b/docs/changelog/97317.yaml deleted file mode 100644 index 64fcd55e67e28..0000000000000 --- a/docs/changelog/97317.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 97317 -summary: "Fix merges of mappings with `subobjects: false` for composable index templates" -area: Mapping -type: bug -issues: - - 96768 diff --git a/docs/changelog/97397.yaml b/docs/changelog/97397.yaml deleted file mode 100644 index 5c1867d55f9bd..0000000000000 --- a/docs/changelog/97397.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 97397 -summary: Return a 410 (Gone) status code for unavailable API endpoints -area: Infra/REST API -type: enhancement -issues: [] diff --git a/docs/changelog/97409.yaml b/docs/changelog/97409.yaml deleted file mode 100644 index 8c05d6254f7cc..0000000000000 --- a/docs/changelog/97409.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 97409 -summary: Trim stored fields for `_id` field in tsdb -area: TSDB -type: enhancement -issues: [] diff --git a/docs/changelog/97450.yaml b/docs/changelog/97450.yaml deleted file mode 100644 index a057e0beefaca..0000000000000 --- a/docs/changelog/97450.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 97450 -summary: Make `_index` optional for pinned query docs -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/97642.yaml b/docs/changelog/97642.yaml deleted file mode 100644 index cf519e04e2d38..0000000000000 --- a/docs/changelog/97642.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 97642 -summary: fix fuzzy query rewrite parameter not work -area: Search -type: bug -issues: [] diff --git a/docs/changelog/97729.yaml b/docs/changelog/97729.yaml deleted file mode 100644 index f80a04bc58f68..0000000000000 --- a/docs/changelog/97729.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 97729 -summary: Allow parsing on non-string routing fields -area: Aggregations -type: bug -issues: [] diff --git a/docs/changelog/97972.yaml b/docs/changelog/97972.yaml deleted file mode 100644 index d4d55e33b4bb2..0000000000000 --- a/docs/changelog/97972.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 97972 -summary: Automatically flatten objects when subobjects:false -area: Mapping -type: enhancement -issues: - - 88934 diff --git a/docs/changelog/98038.yaml b/docs/changelog/98038.yaml deleted file mode 100644 index d99db24664f30..0000000000000 --- a/docs/changelog/98038.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98038 -summary: Update enrich execution to only set index false on fields that support it -area: Ingest Node -type: bug -issues: - - 98019 diff --git a/docs/changelog/98061.yaml b/docs/changelog/98061.yaml deleted file mode 100644 index 3955b262017f0..0000000000000 --- a/docs/changelog/98061.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98061 -summary: Fix possible NPE when getting transform stats for failed transforms -area: Transform -type: bug -issues: - - 98052 diff --git a/docs/changelog/98268.yaml b/docs/changelog/98268.yaml deleted file mode 100644 index ef6f98b8d016c..0000000000000 --- a/docs/changelog/98268.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98268 -summary: Dense vector field types are indexed by default -area: Vector Search -type: enhancement -issues: [] diff --git a/docs/changelog/98309.yaml b/docs/changelog/98309.yaml deleted file mode 100644 index 550f50b3569a1..0000000000000 --- a/docs/changelog/98309.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98309 -summary: "Integrate Elasticsearch Query Language, ES|QL" -area: Query Languages -type: feature -issues: [] diff --git a/docs/changelog/98332.yaml b/docs/changelog/98332.yaml deleted file mode 100644 index 6446707515b3c..0000000000000 --- a/docs/changelog/98332.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98332 -summary: Correct behaviour of `ContentPath::remove()` -area: Mapping -type: bug -issues: - - 98327 diff --git a/docs/changelog/98337.yaml b/docs/changelog/98337.yaml deleted file mode 100644 index 8664ae15eed00..0000000000000 --- a/docs/changelog/98337.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98337 -summary: TopN sorting with min and max for multi-value fields -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/98360.yaml b/docs/changelog/98360.yaml deleted file mode 100644 index b6b8696259c98..0000000000000 --- a/docs/changelog/98360.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98360 -summary: Use a competitive iterator in `FiltersAggregator` -area: Aggregations -type: enhancement -issues: - - 97544 diff --git a/docs/changelog/98406.yaml b/docs/changelog/98406.yaml deleted file mode 100644 index f62af64171944..0000000000000 --- a/docs/changelog/98406.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98406 -summary: Safely drain deployment request queues before allowing node to shutdown -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/98457.yaml b/docs/changelog/98457.yaml deleted file mode 100644 index 465c9ed30cc5b..0000000000000 --- a/docs/changelog/98457.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98457 -summary: Support cluster/details for CCS minimize_roundtrips=false -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/98470.yaml b/docs/changelog/98470.yaml deleted file mode 100644 index 498b1db244d22..0000000000000 --- a/docs/changelog/98470.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98470 -summary: Reduce verbosity of the bulk indexing audit log -area: Audit -type: enhancement -issues: [] diff --git a/docs/changelog/98512.yaml b/docs/changelog/98512.yaml deleted file mode 100644 index c2108a18c6b91..0000000000000 --- a/docs/changelog/98512.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98512 -summary: Automatically map float arrays of lengths 128 - 2048 as dense_vector -area: Application -type: feature -issues: - - 97532 diff --git a/docs/changelog/98518.yaml b/docs/changelog/98518.yaml deleted file mode 100644 index 2f961fc11ce69..0000000000000 --- a/docs/changelog/98518.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98518 -summary: Add `index.look_back_time` setting for tsdb data streams -area: TSDB -type: enhancement -issues: - - 98463 diff --git a/docs/changelog/98528.yaml b/docs/changelog/98528.yaml deleted file mode 100644 index 0004499e58f83..0000000000000 --- a/docs/changelog/98528.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98528 -summary: "ESQL: Add support for TEXT fields in comparison operators and SORT" -area: ES|QL -type: enhancement -issues: - - 98642 diff --git a/docs/changelog/98550.yaml b/docs/changelog/98550.yaml deleted file mode 100644 index 30c9891b15182..0000000000000 --- a/docs/changelog/98550.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98550 -summary: Report a node's "roles" setting in the /_cluster/allocation/explain response -area: Allocation -type: enhancement -issues: [97859] diff --git a/docs/changelog/98574.yaml b/docs/changelog/98574.yaml deleted file mode 100644 index bf016b4c241c8..0000000000000 --- a/docs/changelog/98574.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98574 -summary: Specify correct current `IndexVersion` after 8.10 release -area: Infra/Core -type: bug -issues: - - 98555 diff --git a/docs/changelog/98590.yaml b/docs/changelog/98590.yaml deleted file mode 100644 index f3ef3cdd56a12..0000000000000 --- a/docs/changelog/98590.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98590 -summary: "ESQL: LTRIM, RTRIM and fix unicode whitespace" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/98622.yaml b/docs/changelog/98622.yaml deleted file mode 100644 index 8c41444b6c725..0000000000000 --- a/docs/changelog/98622.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98622 -summary: Add 'dataset' size to cat indices and cat shards -area: CAT APIs -type: enhancement -issues: - - 95092 diff --git a/docs/changelog/98628.yaml b/docs/changelog/98628.yaml deleted file mode 100644 index 2ecd9dd23e0ef..0000000000000 --- a/docs/changelog/98628.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98628 -summary: Add ESQL own flavor of arithmetic operators -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/98630.yaml b/docs/changelog/98630.yaml deleted file mode 100644 index 444c593f87d0b..0000000000000 --- a/docs/changelog/98630.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98630 -summary: "ESQL: LEAST and GREATEST functions" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/98635.yaml b/docs/changelog/98635.yaml deleted file mode 100644 index 274096951fcf6..0000000000000 --- a/docs/changelog/98635.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98635 -summary: Fix NPE in `StableMasterHealthIndicatorService` -area: Health -type: bug -issues: [] diff --git a/docs/changelog/98653.yaml b/docs/changelog/98653.yaml deleted file mode 100644 index 384a29c3cc4ab..0000000000000 --- a/docs/changelog/98653.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98653 -summary: Reset `GatewayService` flags before reroute -area: Cluster Coordination -type: bug -issues: - - 98606 diff --git a/docs/changelog/98654.yaml b/docs/changelog/98654.yaml deleted file mode 100644 index ea63edb93eb58..0000000000000 --- a/docs/changelog/98654.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98654 -summary: Allow native users/roles to be disabled via setting -area: Authentication -type: enhancement -issues: [] diff --git a/docs/changelog/98684.yaml b/docs/changelog/98684.yaml deleted file mode 100644 index 552e85a04151a..0000000000000 --- a/docs/changelog/98684.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98684 -summary: Explicit parsing object capabilities of `FieldMappers` -area: Mapping -type: enhancement -issues: - - 98537 diff --git a/docs/changelog/98711.yaml b/docs/changelog/98711.yaml deleted file mode 100644 index 43e0c2a03e8fa..0000000000000 --- a/docs/changelog/98711.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98711 -summary: Support unsigned long in sqrt and log10 for ESQL -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/98759.yaml b/docs/changelog/98759.yaml deleted file mode 100644 index df6180bddc192..0000000000000 --- a/docs/changelog/98759.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98759 -summary: "ESQL: Support queries that don't return underlying fields" -area: ES|QL -type: bug -issues: - - 98404 diff --git a/docs/changelog/98809.yaml b/docs/changelog/98809.yaml deleted file mode 100644 index f9f5be523e179..0000000000000 --- a/docs/changelog/98809.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 98809 -summary: Avoiding the use of nodes that are no longer in the cluster when computing - master stability -area: Health -type: enhancement -issues: - - 98636 diff --git a/docs/changelog/98811.yaml b/docs/changelog/98811.yaml deleted file mode 100644 index 338efbcf1d8c9..0000000000000 --- a/docs/changelog/98811.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98811 -summary: Allow explain data stream lifecycle to accept a data stream -area: Data streams -type: enhancement -issues: [] diff --git a/docs/changelog/98824.yaml b/docs/changelog/98824.yaml deleted file mode 100644 index 7e2c43d266232..0000000000000 --- a/docs/changelog/98824.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98824 -summary: Consider node shutdown in `DataTierAllocationDecider` -area: "Allocation" -type: bug -issues: - - 97207 diff --git a/docs/changelog/98840.yaml b/docs/changelog/98840.yaml deleted file mode 100644 index bb358916354dc..0000000000000 --- a/docs/changelog/98840.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98840 -summary: Don't ignore empty index template that have no template definition -area: TSDB -type: bug -issues: - - 98834 diff --git a/docs/changelog/98843.yaml b/docs/changelog/98843.yaml deleted file mode 100644 index 742ae25697718..0000000000000 --- a/docs/changelog/98843.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98843 -summary: Fix UnsignedLong field range query gt "0" can get the result equal to 0 -area: Search -type: bug -issues: [] diff --git a/docs/changelog/98844.yaml b/docs/changelog/98844.yaml deleted file mode 100644 index a5870e7344d15..0000000000000 --- a/docs/changelog/98844.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98844 -summary: Add accessors required to recreate `TransformStats` object from the fields -area: Transform -type: enhancement -issues: [] diff --git a/docs/changelog/98847.yaml b/docs/changelog/98847.yaml deleted file mode 100644 index ab7455bd783c3..0000000000000 --- a/docs/changelog/98847.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98847 -summary: "ESQL: Add `CEIL` function" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/98870.yaml b/docs/changelog/98870.yaml deleted file mode 100644 index b719fbb0caf22..0000000000000 --- a/docs/changelog/98870.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98870 -summary: "ESQL: Add ability to perform date math" -area: ES|QL -type: enhancement -issues: - - 98402 diff --git a/docs/changelog/98874.yaml b/docs/changelog/98874.yaml new file mode 100644 index 0000000000000..e3eb7b5acc63f --- /dev/null +++ b/docs/changelog/98874.yaml @@ -0,0 +1,5 @@ +pr: 98874 +summary: Estimate the memory required to deploy trained models more accurately +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/98878.yaml b/docs/changelog/98878.yaml deleted file mode 100644 index 4fa8b23851bf9..0000000000000 --- a/docs/changelog/98878.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98878 -summary: Fix percolator query for stored queries that expand on wildcard field names -area: Percolator -type: bug -issues: [] diff --git a/docs/changelog/98888.yaml b/docs/changelog/98888.yaml deleted file mode 100644 index 1f2f7ea27ff19..0000000000000 --- a/docs/changelog/98888.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98888 -summary: Revert "Kibana system index does not allow user templates to affect it" -area: Infra/Core -type: bug -issues: [] diff --git a/docs/changelog/98915.yaml b/docs/changelog/98915.yaml deleted file mode 100644 index c23ddcc55d98e..0000000000000 --- a/docs/changelog/98915.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98915 -summary: Avoid risk of OOM in datafeeds when memory is constrained -area: Machine Learning -type: bug -issues: [89769] diff --git a/docs/changelog/98930.yaml b/docs/changelog/98930.yaml deleted file mode 100644 index e6a2c74192ebe..0000000000000 --- a/docs/changelog/98930.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98930 -summary: Frozen index input clone copy cache file -area: Snapshot/Restore -type: bug -issues: [] diff --git a/docs/changelog/98942.yaml b/docs/changelog/98942.yaml deleted file mode 100644 index 4d8eeee5192e5..0000000000000 --- a/docs/changelog/98942.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98942 -summary: "ESQL: LEFT function" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/98972.yaml b/docs/changelog/98972.yaml deleted file mode 100644 index acd336ff7d666..0000000000000 --- a/docs/changelog/98972.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98972 -summary: "ES|QL: Implement serialization of `InvalidMappedField`" -area: ES|QL -type: bug -issues: - - 98851 diff --git a/docs/changelog/98974.yaml b/docs/changelog/98974.yaml deleted file mode 100644 index 90950986141ab..0000000000000 --- a/docs/changelog/98974.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98974 -summary: "ESQL: RIGHT function" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/98996.yaml b/docs/changelog/98996.yaml deleted file mode 100644 index 1f1bdd35ff643..0000000000000 --- a/docs/changelog/98996.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98996 -summary: Reintroduce `sparse_vector` mapping -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/99054.yaml b/docs/changelog/99054.yaml deleted file mode 100644 index a9e4128e7ae97..0000000000000 --- a/docs/changelog/99054.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99054 -summary: "ESQL: Mark counter fields as unsupported" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/99058.yaml b/docs/changelog/99058.yaml deleted file mode 100644 index a112834add071..0000000000000 --- a/docs/changelog/99058.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99058 -summary: "ESQL: log query and execution time" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/99091.yaml b/docs/changelog/99091.yaml deleted file mode 100644 index 2c7be19b161ba..0000000000000 --- a/docs/changelog/99091.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99091 -summary: Add flamegraph API -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/99106.yaml b/docs/changelog/99106.yaml deleted file mode 100644 index 21cb121595d2b..0000000000000 --- a/docs/changelog/99106.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99106 -summary: "Add support for Persian language stemmer" -area: Analysis -type: feature -issues: - - 98911 diff --git a/docs/changelog/99107.yaml b/docs/changelog/99107.yaml deleted file mode 100644 index a808fb57fcf80..0000000000000 --- a/docs/changelog/99107.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99107 -summary: Wait to gracefully stop deployments until alternative allocation exists -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/99117.yaml b/docs/changelog/99117.yaml deleted file mode 100644 index 491692f232081..0000000000000 --- a/docs/changelog/99117.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99117 -summary: Do not report failure after connections are made -area: Network -type: bug -issues: [] diff --git a/docs/changelog/99163.yaml b/docs/changelog/99163.yaml deleted file mode 100644 index f7a44c7f24869..0000000000000 --- a/docs/changelog/99163.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99163 -summary: Use `NamedWritable` to enable `GeoBoundingBox` serialisation -area: Geo -type: bug -issues: - - 99089 diff --git a/docs/changelog/99188.yaml b/docs/changelog/99188.yaml deleted file mode 100644 index c22e3ba4b36e5..0000000000000 --- a/docs/changelog/99188.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99188 -summary: "ESQL: skip synthetic attributes when planning the physical fragment" -area: ES|QL -type: bug -issues: - - 99170 diff --git a/docs/changelog/99193.yaml b/docs/changelog/99193.yaml deleted file mode 100644 index 9db646dc80435..0000000000000 --- a/docs/changelog/99193.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99193 -summary: Wait for cluster state in recovery -area: Recovery -type: enhancement -issues: [] diff --git a/docs/changelog/99215.yaml b/docs/changelog/99215.yaml deleted file mode 100644 index 99227839b491e..0000000000000 --- a/docs/changelog/99215.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99215 -summary: Skip `DisiPriorityQueue` on single filter agg -area: Aggregations -type: enhancement -issues: - - 99202 diff --git a/docs/changelog/99219.yaml b/docs/changelog/99219.yaml deleted file mode 100644 index 811e2df5f83d0..0000000000000 --- a/docs/changelog/99219.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99219 -summary: Reduce copying when creating scroll/PIT ids -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/99222.yaml b/docs/changelog/99222.yaml deleted file mode 100644 index 025c5e01d2a53..0000000000000 --- a/docs/changelog/99222.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99222 -summary: Fork response-sending in `OpenPointInTimeAction` -area: Search -type: bug -issues: [] diff --git a/docs/changelog/99223.yaml b/docs/changelog/99223.yaml deleted file mode 100644 index 914441931033b..0000000000000 --- a/docs/changelog/99223.yaml +++ /dev/null @@ -1,11 +0,0 @@ -pr: 99223 -summary: Remove `transport_versions` from cluster state API -area: Infra/Core -type: breaking -issues: [] -breaking: - title: Remove `transport_versions` from cluster state API - area: REST API - details: The `transport_versions` subobject of the response to `GET _cluster/state` has been replaced by the `nodes_versions` subobject. - impact: If needed, retrieve the per-node transport versions from the `nodes_versions` subobject. - notable: false diff --git a/docs/changelog/99224.yaml b/docs/changelog/99224.yaml deleted file mode 100644 index cde4084ab0e84..0000000000000 --- a/docs/changelog/99224.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99224 -summary: Add new _inference API -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/99278.yaml b/docs/changelog/99278.yaml deleted file mode 100644 index f2788a00e6369..0000000000000 --- a/docs/changelog/99278.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99278 -summary: Support rotatating the JWT shared secret -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/99286.yaml b/docs/changelog/99286.yaml deleted file mode 100644 index 1b37416d51ba6..0000000000000 --- a/docs/changelog/99286.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99286 -summary: "ESQL: Log execution time consistently" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/99300.yaml b/docs/changelog/99300.yaml deleted file mode 100644 index 508001b98f29e..0000000000000 --- a/docs/changelog/99300.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99300 -summary: Change `GetFromTranslog` to indices action -area: CRUD -type: bug -issues: [] diff --git a/docs/changelog/99303.yaml b/docs/changelog/99303.yaml deleted file mode 100644 index 479c3a3e280c7..0000000000000 --- a/docs/changelog/99303.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99303 -summary: Use DEBUG log level to report ESQL execution steps -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/99310.yaml b/docs/changelog/99310.yaml deleted file mode 100644 index 8b595fe93fd33..0000000000000 --- a/docs/changelog/99310.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99310 -summary: "ESQL: \"params\" correctly parses the values including an optional \"type\"" -area: ES|QL -type: bug -issues: - - 99294 diff --git a/docs/changelog/99316.yaml b/docs/changelog/99316.yaml deleted file mode 100644 index 78857b433b385..0000000000000 --- a/docs/changelog/99316.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99316 -summary: "ESQL: Compact topn" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/99346.yaml b/docs/changelog/99346.yaml deleted file mode 100644 index fc6fe02e6bf14..0000000000000 --- a/docs/changelog/99346.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99346 -summary: Automatically disable `ignore_malformed` on datastream `@timestamp` fields -area: Mapping -type: bug -issues: [] diff --git a/docs/changelog/99382.yaml b/docs/changelog/99382.yaml deleted file mode 100644 index 5f5eb932ed458..0000000000000 --- a/docs/changelog/99382.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99382 -summary: "ESQL: create a Vector when needed for IN" -area: ES|QL -type: bug -issues: - - 99347 diff --git a/docs/changelog/99417.yaml b/docs/changelog/99417.yaml deleted file mode 100644 index 8c88a5a548dff..0000000000000 --- a/docs/changelog/99417.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99417 -summary: Disable `FilterByFilterAggregator` through `ClusterSettings` -area: Aggregations -type: enhancement -issues: - - 99335 diff --git a/docs/changelog/99432.yaml b/docs/changelog/99432.yaml deleted file mode 100644 index df4c5a7f78199..0000000000000 --- a/docs/changelog/99432.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99432 -summary: "ESQL: Enable arithmetics for durations and periods" -area: ES|QL -type: enhancement -issues: [99293] diff --git a/docs/changelog/99470.yaml b/docs/changelog/99470.yaml deleted file mode 100644 index 3e784595cc6ac..0000000000000 --- a/docs/changelog/99470.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99470 -summary: "ESQL: Improve log messages" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/99474.yaml b/docs/changelog/99474.yaml deleted file mode 100644 index ea23481069833..0000000000000 --- a/docs/changelog/99474.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99474 -summary: Add `java.net.NetPermission` to APM module's permissions -area: Infra/Core -type: bug -issues: [] diff --git a/docs/changelog/99515.yaml b/docs/changelog/99515.yaml deleted file mode 100644 index 7de237531a506..0000000000000 --- a/docs/changelog/99515.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99515 -summary: Add `IndexVersion` to node info -area: Infra/REST API -type: enhancement -issues: [] diff --git a/docs/changelog/99527.yaml b/docs/changelog/99527.yaml deleted file mode 100644 index 19eef621fa500..0000000000000 --- a/docs/changelog/99527.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99445 -summary: Add new max_inner_product vector similarity function -area: Vector Search -type: enhancement -issues: [] diff --git a/docs/changelog/99532.yaml b/docs/changelog/99532.yaml deleted file mode 100644 index 859ba963600a8..0000000000000 --- a/docs/changelog/99532.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99532 -summary: Adds `nested` support for indexed `dense_vector` fields -area: Vector Search -type: enhancement -issues: [] diff --git a/docs/changelog/99555.yaml b/docs/changelog/99555.yaml deleted file mode 100644 index 5e53e8782e08c..0000000000000 --- a/docs/changelog/99555.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99555 -summary: Use mappings version to retrieve system index mappings at creation time -area: Infra/Core -type: enhancement -issues: [] diff --git a/docs/changelog/99566.yaml b/docs/changelog/99566.yaml deleted file mode 100644 index caad871bf58ed..0000000000000 --- a/docs/changelog/99566.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99566 -summary: Add additional counters to `_clusters` response for all Cluster search states -area: Search -type: enhancement -issues: - - 98927 diff --git a/docs/changelog/99567.yaml b/docs/changelog/99567.yaml deleted file mode 100644 index aea65e55b6ee2..0000000000000 --- a/docs/changelog/99567.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99567 -summary: Make tsdb settings public in Serverless -area: TSDB -type: bug -issues: - - 99563 diff --git a/docs/changelog/99584.yaml b/docs/changelog/99584.yaml deleted file mode 100644 index 229e3d8024506..0000000000000 --- a/docs/changelog/99584.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99584 -summary: Adding an option for trained models to be platform specific -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/99588.yaml b/docs/changelog/99588.yaml deleted file mode 100644 index 7cbb53376fdf0..0000000000000 --- a/docs/changelog/99588.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99588 -summary: Make ESQL more resilient to non-indexed fields -area: ES|QL -type: bug -issues: - - 99506 diff --git a/docs/changelog/99601.yaml b/docs/changelog/99601.yaml deleted file mode 100644 index 9deba859a5cef..0000000000000 --- a/docs/changelog/99601.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99601 -summary: "ESQL: continue resolving attributes for Eval" -area: ES|QL -type: bug -issues: - - 99576 diff --git a/docs/changelog/99627.yaml b/docs/changelog/99627.yaml deleted file mode 100644 index 84abdf6418dc2..0000000000000 --- a/docs/changelog/99627.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99627 -summary: Fix thread context in `getRepositoryData` -area: Snapshot/Restore -type: bug -issues: [] diff --git a/docs/changelog/99631.yaml b/docs/changelog/99631.yaml deleted file mode 100644 index d9174de76f1ea..0000000000000 --- a/docs/changelog/99631.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99631 -summary: Add component info versions to node info in a pluggable way -area: Infra/REST API -type: enhancement -issues: [] diff --git a/docs/changelog/99641.yaml b/docs/changelog/99641.yaml deleted file mode 100644 index c74f7380bd93a..0000000000000 --- a/docs/changelog/99641.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99641 -summary: Chunk the cluster allocation explain response -area: Network -type: enhancement -issues: [97803] diff --git a/docs/changelog/99644.yaml b/docs/changelog/99644.yaml deleted file mode 100644 index 10c10448c074c..0000000000000 --- a/docs/changelog/99644.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99644 -summary: Add links to docs from failing bootstrap checks -area: Infra/Node Lifecycle -type: enhancement -issues: [99614] - diff --git a/docs/changelog/99655.yaml b/docs/changelog/99655.yaml deleted file mode 100644 index 3d1e76ec47aa3..0000000000000 --- a/docs/changelog/99655.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99655 -summary: "[Profiling] Allow to wait until resources created" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/99682.yaml b/docs/changelog/99682.yaml deleted file mode 100644 index 48e99a5145674..0000000000000 --- a/docs/changelog/99682.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99682 -summary: Increase the max vector dims to 4096 -area: Vector Search -type: enhancement -issues: [] diff --git a/docs/changelog/99685.yaml b/docs/changelog/99685.yaml deleted file mode 100644 index 43dac2abbb312..0000000000000 --- a/docs/changelog/99685.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99685 -summary: Fix `advanceExact` for doc values from sources -area: Search -type: bug -issues: [] diff --git a/docs/changelog/99694.yaml b/docs/changelog/99694.yaml deleted file mode 100644 index a449ecb2ae378..0000000000000 --- a/docs/changelog/99694.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99694 -summary: Remove shard data files when they fail to write for snapshot -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/99695.yaml b/docs/changelog/99695.yaml deleted file mode 100644 index 6dc4037a57763..0000000000000 --- a/docs/changelog/99695.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99695 -summary: "ESQL: Better management of not stored TEXT fiels with synthetic source" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/99711.yaml b/docs/changelog/99711.yaml deleted file mode 100644 index 34731a52818f0..0000000000000 --- a/docs/changelog/99711.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99711 -summary: "ESQL: Date math for negatives" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/99712.yaml b/docs/changelog/99712.yaml deleted file mode 100644 index c5fa1ac1e64ec..0000000000000 --- a/docs/changelog/99712.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99712 -summary: Make downsample target index replicas configurable -area: Downsampling -type: bug -issues: [] diff --git a/docs/changelog/99717.yaml b/docs/changelog/99717.yaml deleted file mode 100644 index db48c69ed68a2..0000000000000 --- a/docs/changelog/99717.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99717 -summary: Treating watcher webhook response header names as case-insensitive -area: Watcher -type: bug -issues: [] diff --git a/docs/changelog/99726.yaml b/docs/changelog/99726.yaml deleted file mode 100644 index 23350fdb85bd0..0000000000000 --- a/docs/changelog/99726.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99726 -summary: "ESQL: Account for an exception being thrown when building a `BytesRefArrayBlock`" -area: ES|QL -type: bug -issues: - - 99472 diff --git a/docs/changelog/99736.yaml b/docs/changelog/99736.yaml deleted file mode 100644 index fbf177ea152a8..0000000000000 --- a/docs/changelog/99736.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99736 -summary: "ESQL: enhance SHOW FUNCTIONS command" -area: ES|QL -type: enhancement -issues: - - 99507 diff --git a/docs/changelog/99746.yaml b/docs/changelog/99746.yaml deleted file mode 100644 index c4cdbc00f82c1..0000000000000 --- a/docs/changelog/99746.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99746 -summary: "ESQL: Log start and end of queries" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/99775.yaml b/docs/changelog/99775.yaml deleted file mode 100644 index 0c0dbdb1fce87..0000000000000 --- a/docs/changelog/99775.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99775 -summary: Adding support for exist queries to `sparse_vector` fields -area: Search -type: enhancement -issues: - - 99319 diff --git a/docs/changelog/99796.yaml b/docs/changelog/99796.yaml deleted file mode 100644 index cad10564ed294..0000000000000 --- a/docs/changelog/99796.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99796 -summary: Support runtime fields in synthetic source -area: Aggregations -type: bug -issues: - - 98287 diff --git a/docs/changelog/99797.yaml b/docs/changelog/99797.yaml deleted file mode 100644 index e46d4501291b5..0000000000000 --- a/docs/changelog/99797.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99797 -summary: Wait for cluster to recover before resolving index template -area: CRUD -type: bug -issues: [] diff --git a/docs/changelog/99798.yaml b/docs/changelog/99798.yaml deleted file mode 100644 index bd8b9da71541d..0000000000000 --- a/docs/changelog/99798.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 99798 -summary: Chunk `SingleNodeShutdownStatus` and `ShutdownShardMigrationStatus` (and - related action) response -area: Infra/Node Lifecycle -type: enhancement -issues: - - 99678 diff --git a/docs/changelog/99804.yaml b/docs/changelog/99804.yaml deleted file mode 100644 index b4c226217e352..0000000000000 --- a/docs/changelog/99804.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99804 -summary: Correctly handle `ScriptScoreQuery` in plain highlighter -area: Highlighting -type: bug -issues: - - 99700 diff --git a/docs/changelog/99816.yaml b/docs/changelog/99816.yaml deleted file mode 100644 index 4caf8a36f54b4..0000000000000 --- a/docs/changelog/99816.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99816 -summary: "ESQL: Lower the implicit limit, if none is user-provided" -area: ES|QL -type: enhancement -issues: - - 99458 diff --git a/docs/changelog/99827.yaml b/docs/changelog/99827.yaml deleted file mode 100644 index 3e6690a8e9e68..0000000000000 --- a/docs/changelog/99827.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99827 -summary: "ESQL: Fix NPE when aggregating literals" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/99832.yaml b/docs/changelog/99832.yaml deleted file mode 100644 index 9bd83591ba920..0000000000000 --- a/docs/changelog/99832.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99832 -summary: APM Metering API -area: Infra/Core -type: enhancement -issues: [] diff --git a/docs/changelog/99873.yaml b/docs/changelog/99873.yaml deleted file mode 100644 index d726ba00a1558..0000000000000 --- a/docs/changelog/99873.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99873 -summary: "[Profiling] Tighten resource creation check" -area: Application -type: bug -issues: [] diff --git a/docs/changelog/99874.yaml b/docs/changelog/99874.yaml deleted file mode 100644 index d23fc1ea6edde..0000000000000 --- a/docs/changelog/99874.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99874 -summary: "ESQL: Use exact attributes for data source extraction" -area: ES|QL -type: bug -issues: - - 99183 diff --git a/docs/changelog/99909.yaml b/docs/changelog/99909.yaml deleted file mode 100644 index 2051a30e4efa1..0000000000000 --- a/docs/changelog/99909.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99909 -summary: "[Profiling] Allow to customize the ILM policy" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/99912.yaml b/docs/changelog/99912.yaml deleted file mode 100644 index 06f0f9baa9661..0000000000000 --- a/docs/changelog/99912.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99912 -summary: Represent histogram value count as long -area: Aggregations -type: enhancement -issues: - - 99820 diff --git a/docs/changelog/99938.yaml b/docs/changelog/99938.yaml deleted file mode 100644 index 4349b73516cae..0000000000000 --- a/docs/changelog/99938.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99938 -summary: "Prune unnecessary information from TransportNodesInfoAction.NodeInfoRequest" -area: Stats -type: enhancement -issues: [99744] diff --git a/docs/changelog/99947.yaml b/docs/changelog/99947.yaml deleted file mode 100644 index 61996c8fde92b..0000000000000 --- a/docs/changelog/99947.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99947 -summary: GET `_data_stream` displays both ILM and DSL information -area: Data streams -type: feature -issues: [] diff --git a/docs/changelog/99956.yaml b/docs/changelog/99956.yaml deleted file mode 100644 index 04646a98898a3..0000000000000 --- a/docs/changelog/99956.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99956 -summary: "ESQL: Serialize the source in expressions" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/99995.yaml b/docs/changelog/99995.yaml deleted file mode 100644 index d67cbdaec1f37..0000000000000 --- a/docs/changelog/99995.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99995 -summary: When a primary is inactive but this is considered expected, the same applies for the replica of this shard. -area: Health -type: enhancement -issues: - - 99951 diff --git a/docs/plugins/development/creating-stable-plugins.asciidoc b/docs/plugins/development/creating-stable-plugins.asciidoc index a8efc86c5beac..c9a8a1f6c7e2a 100644 --- a/docs/plugins/development/creating-stable-plugins.asciidoc +++ b/docs/plugins/development/creating-stable-plugins.asciidoc @@ -59,7 +59,7 @@ for the plugin. If you need other resources, package them into a resources JAR. [discrete] ==== Development process -Elastic provides a Grade plugin, `elasticsearch.stable-esplugin`, that makes it +Elastic provides a Gradle plugin, `elasticsearch.stable-esplugin`, that makes it easier to develop and package stable plugins. The steps in this section assume you use this plugin. However, you don't need Gradle to create plugins. @@ -128,4 +128,4 @@ extend `ESClientYamlSuiteTestCase`. [[plugin-descriptor-file-stable]] ==== The plugin descriptor file for stable plugins -include::plugin-descriptor-file.asciidoc[] \ No newline at end of file +include::plugin-descriptor-file.asciidoc[] diff --git a/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc b/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc index b5f1315531916..44a00b9f5b99e 100644 --- a/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc @@ -68,7 +68,7 @@ POST /_search -------------------------------------------------- // TEST[setup:sales] -<1> A `date_histogram` named "my_date_histo" is constructed on the "timestamp" field, with one-day intervals +<1> A `date_histogram` named "my_date_histo" is constructed on the "timestamp" field, with one-month intervals <2> A `sum` metric is used to calculate the sum of a field. This could be any numeric metric (sum, min, max, etc) <3> Finally, we specify a `moving_fn` aggregation which uses "the_sum" metric as its input. diff --git a/docs/reference/cat/allocation.asciidoc b/docs/reference/cat/allocation.asciidoc index 7153e99e503a8..f9574ed933398 100644 --- a/docs/reference/cat/allocation.asciidoc +++ b/docs/reference/cat/allocation.asciidoc @@ -6,7 +6,7 @@ [IMPORTANT] ==== -cat APIs are only intended for human consumption using the command line or {kib} +cat APIs are only intended for human consumption using the command line or {kib} console. They are _not_ intended for use by applications. ==== @@ -113,10 +113,10 @@ The API returns the following response: [source,txt] -------------------------------------------------- -shards disk.indices disk.used disk.avail disk.total disk.percent host ip node - 1 260b 47.3gb 43.4gb 100.7gb 46 127.0.0.1 127.0.0.1 CSUXak2 +shards disk.indices disk.used disk.avail disk.total disk.percent host ip node node.role + 1 260b 47.3gb 43.4gb 100.7gb 46 127.0.0.1 127.0.0.1 CSUXak2 himrst -------------------------------------------------- // TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/ s/46/\\d+/] -// TESTRESPONSE[s/CSUXak2/.+/ non_json] +// TESTRESPONSE[s/CSUXak2 himrst/.+/ non_json] This response shows a single shard is allocated to the one node available. diff --git a/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc b/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc index de11bbcfc2d4e..a6c13e5aae708 100644 --- a/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc +++ b/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc @@ -138,7 +138,7 @@ and that the next generation index will also be managed by {ilm-init}: <2> For each backing index we display the value of the <> configuration which will indicate if {ilm-init} takes precedence over data stream lifecycle in case both systems are configured for an index. -<3> The {ilm-ini} policy configured for this index. +<3> The {ilm-init} policy configured for this index. <4> The system that manages this index (possible values are "Index Lifecycle Management", "Data stream lifecycle", or "Unmanaged") <5> The system that will manage the next generation index (the new write index of this @@ -196,7 +196,7 @@ precedence over Data stream lifecycle. <2> We're configuring the data stream lifecycle so _new_ data streams will be managed by Data stream lifecycle. -We've now make sure that new data streams will be managed by Data stream lifecycle. +We've now made sure that new data streams will be managed by Data stream lifecycle. Let's update our existing `dsl-data-stream` and configure Data stream lifecycle: @@ -359,7 +359,7 @@ the index template>>. We can achieve this in two ways: 1. <> from the data streams -2. Disable Data stream lifecycle by configured the `enabled` flag to `false`. +2. Disable Data stream lifecycle by configuring the `enabled` flag to `false`. Let's implement option 2 and disable the data stream lifecycle: diff --git a/docs/reference/esql/esql-examples.asciidoc b/docs/reference/esql/esql-examples.asciidoc index 569dcf1172b38..817ec4f7b6f24 100644 --- a/docs/reference/esql/esql-examples.asciidoc +++ b/docs/reference/esql/esql-examples.asciidoc @@ -13,11 +13,11 @@ ---- FROM logs-* | WHERE event.code IS NOT NULL -| STATS event_code_count = count(event.code) by event.code,host.name -| ENRICH win_events on event.code with event_description +| STATS event_code_count = COUNT(event.code) BY event.code,host.name +| ENRICH win_events ON event.code WITH event_description | WHERE event_description IS NOT NULL and host.name IS NOT NULL -| RENAME event_description as event.description -| SORT event_code_count desc +| RENAME event_description AS event.description +| SORT event_code_count DESC | KEEP event_code_count,event.code,host.name,event.description ---- @@ -40,7 +40,7 @@ FROM logs-endpoint | WHERE process.name == "curl.exe" | STATS bytes = SUM(destination.bytes) BY destination.address | EVAL kb = bytes/1024 -| SORT kb desc +| SORT kb DESC | LIMIT 10 | KEEP kb,destination.address ---- @@ -60,7 +60,7 @@ FROM logs-endpoint ---- FROM logs-* | GROK dns.question.name "%{DATA}\\.%{GREEDYDATA:dns.question.registered_domain:string}" -| STATS unique_queries = count_distinct(dns.question.name) by dns.question.registered_domain, process.name +| STATS unique_queries = COUNT_DISTINCT(dns.question.name) BY dns.question.registered_domain, process.name | WHERE unique_queries > 10 | SORT unique_queries DESC | RENAME unique_queries AS `Unique Queries`, dns.question.registered_domain AS `Registered Domain`, process.name AS `Process` @@ -85,7 +85,7 @@ FROM logs-* | ENRICH ldap_lookup_new ON user.name | WHERE group.name IS NOT NULL | EVAL follow_up = CASE(destcount >= 100, "true","false") -| SORT destcount desc +| SORT destcount DESC | KEEP destcount, host.name, user.name, group.name, follow_up ---- diff --git a/docs/reference/esql/esql-get-started.asciidoc b/docs/reference/esql/esql-get-started.asciidoc index 82831ef943398..e54825406257f 100644 --- a/docs/reference/esql/esql-get-started.asciidoc +++ b/docs/reference/esql/esql-get-started.asciidoc @@ -7,50 +7,14 @@ This guide shows how you can use {esql} to query and aggregate your data. -TIP: To get started with {esql} without setting up your own deployment, visit -the public {esql} demo environment at -https://esql.demo.elastic.co/[esql.demo.elastic.co]. It comes with preloaded -data sets and sample queries. - [discrete] [[esql-getting-started-prerequisites]] === Prerequisites -To follow along with the queries in this getting started guide, first ingest -some sample data using the following requests: - -[source,console] ----- -PUT sample_data -{ - "mappings": { - "properties": { - "client.ip": { - "type": "ip" - }, - "message": { - "type": "keyword" - } - } - } -} - -PUT sample_data/_bulk -{"index": {}} -{"@timestamp": "2023-10-23T12:15:03.360Z", "client.ip": "172.21.2.162", "message": "Connected to 10.1.0.3", "event.duration": 3450233} -{"index": {}} -{"@timestamp": "2023-10-23T12:27:28.948Z", "client.ip": "172.21.2.113", "message": "Connected to 10.1.0.2", "event.duration": 2764889} -{"index": {}} -{"@timestamp": "2023-10-23T13:33:34.937Z", "client.ip": "172.21.0.5", "message": "Disconnected", "event.duration": 1232382} -{"index": {}} -{"@timestamp": "2023-10-23T13:51:54.732Z", "client.ip": "172.21.3.15", "message": "Connection error", "event.duration": 725448} -{"index": {}} -{"@timestamp": "2023-10-23T13:52:55.015Z", "client.ip": "172.21.3.15", "message": "Connection error", "event.duration": 8268153} -{"index": {}} -{"@timestamp": "2023-10-23T13:53:55.832Z", "client.ip": "172.21.3.15", "message": "Connection error", "event.duration": 5033755} -{"index": {}} -{"@timestamp": "2023-10-23T13:55:01.543Z", "client.ip": "172.21.3.15", "message": "Connected to 10.1.0.1", "event.duration": 1756467} ----- +To follow along with the queries in this guide, you can either set up your own +deployment, or use Elastic's public {esql} demo environment. + +include::{es-repo-dir}/tab-widgets/esql/esql-getting-started-widget-sample-data.asciidoc[] [discrete] [[esql-getting-started-running-queries]] @@ -58,7 +22,7 @@ PUT sample_data/_bulk In {kib}, you can use Console or Discover to run {esql} queries: -include::{es-repo-dir}/tab-widgets/esql/esql-getting-started-widget.asciidoc[] +include::{es-repo-dir}/tab-widgets/esql/esql-getting-started-widget-discover-console.asciidoc[] [discrete] [[esql-getting-started-first-query]] @@ -300,57 +264,9 @@ image::images/esql/esql-enrich.png[align="center"] Before you can use `ENRICH`, you first need to <> and <> -an <>. The following requests create and -execute a policy that links an IP address to an environment ("Development", -"QA", or "Production"): - -[source,console] ----- -PUT clientips -{ - "mappings": { - "properties": { - "client.ip": { - "type": "keyword" - }, - "env": { - "type": "keyword" - } - } - } -} - -PUT clientips/_bulk -{ "index" : {}} -{ "client.ip": "172.21.0.5", "env": "Development" } -{ "index" : {}} -{ "client.ip": "172.21.2.113", "env": "QA" } -{ "index" : {}} -{ "client.ip": "172.21.2.162", "env": "QA" } -{ "index" : {}} -{ "client.ip": "172.21.3.15", "env": "Production" } -{ "index" : {}} -{ "client.ip": "172.21.3.16", "env": "Production" } - -PUT /_enrich/policy/clientip_policy -{ - "match": { - "indices": "clientips", - "match_field": "client.ip", - "enrich_fields": ["env"] - } -} - -PUT /_enrich/policy/clientip_policy/_execute ----- - -//// -[source,console] ----- -DELETE /_enrich/policy/clientip_policy ----- -// TEST[continued] -//// +an <>. + +include::{es-repo-dir}/tab-widgets/esql/esql-getting-started-widget-enrich-policy.asciidoc[] After creating and executing a policy, you can use it with the `ENRICH` command: diff --git a/docs/reference/esql/esql-limitations.asciidoc b/docs/reference/esql/esql-limitations.asciidoc index 303f9a337b6c4..c7829ab9fba81 100644 --- a/docs/reference/esql/esql-limitations.asciidoc +++ b/docs/reference/esql/esql-limitations.asciidoc @@ -10,20 +10,8 @@ === Result set size limit By default, an {esql} query returns up to 500 rows. You can increase the number -of rows up to 10,000 using the <> command. Queries do not return -more than 10,000 rows, regardless of the `LIMIT` command's value. - -This limit only applies to the number of rows that are retrieved by the query -and displayed in Discover. Queries and aggregations run on the full data set. - -To overcome this limitation: - -* Reduce the result set size by modifying the query to only return relevant -data. Use <> to select a smaller subset of the data. -* Shift any post-query processing to the query itself. You can use the {esql} -<> command to aggregate data in the query. -* Increase the limit with the `esql.query.result_truncation_max_size` static -cluster setting. +of rows up to 10,000 using the <> command. +include::processing-commands/limit.asciidoc[tag=limitation] [discrete] [[esql-supported-types]] @@ -69,6 +57,7 @@ cluster setting. ** `completion` ** `dense_vector` ** `double_range` +** `flattened` ** `float_range` ** `histogram` ** `integer_range` @@ -112,6 +101,12 @@ you query, and query `keyword` sub-fields instead of `text` fields. {esql} does not support querying time series data streams (TSDS). +[discrete] +[[esql-limitations-ccs]] +=== {ccs-cap} is not supported + +{esql} does not support {ccs}. + [discrete] [[esql-limitations-date-math]] === Date math limitations @@ -142,6 +137,33 @@ now() - 2023-10-26 include::esql-enrich-data.asciidoc[tag=limitations] +[discrete] +[[esql-limitations-dissect]] +=== Dissect limitations + +include::esql-process-data-with-dissect-grok.asciidoc[tag=dissect-limitations] + +[discrete] +[[esql-limitations-grok]] +=== Grok limitations + +include::esql-process-data-with-dissect-grok.asciidoc[tag=grok-limitations] + +[discrete] +[[esql-limitations-mv]] +=== Multivalue limitations + +{esql} <>, but functions +return `null` when applied to a multivalued field, unless documented otherwise. +Work around this limitation by converting the field to single value with one of +the <>. + +[discrete] +[[esql-limitations-timezone]] +=== Timezone support + +{esql} only supports the UTC timezone. + [discrete] [[esql-limitations-kibana]] === Kibana limitations diff --git a/docs/reference/esql/esql-process-data-with-dissect-grok.asciidoc b/docs/reference/esql/esql-process-data-with-dissect-grok.asciidoc index a37989b2b2da8..8f235ed0b7add 100644 --- a/docs/reference/esql/esql-process-data-with-dissect-grok.asciidoc +++ b/docs/reference/esql/esql-process-data-with-dissect-grok.asciidoc @@ -120,7 +120,6 @@ include::../ingest/processors/dissect.asciidoc[tag=dissect-key-modifiers] | `+` | Append | left | `%{+keyname} %{+keyname}` | Appends two or more fields together | <> | `+` with `/n` | Append with order | left and right | `%{+keyname/2} %{+keyname/1}` | Appends two or more fields together in the order specified | <> | `?` | Named skip key | left | `%{?ignoreme}` | Skips the matched value in the output. Same behavior as `%{}`| <> -| `*` and `&` | Reference keys | left | `%{*r1} %{&r1}` | Sets the output key as value of `*` and output value of `&` | <> |====== [[esql-dissect-modifier-skip-right-padding]] @@ -139,9 +138,13 @@ include::../ingest/processors/dissect.asciidoc[tag=append-order-modifier] ====== Named skip key (`?`) include::../ingest/processors/dissect.asciidoc[tag=named-skip-key] -[[esql-reference-keys]] -====== Reference keys (`*` and `&`) -include::../ingest/processors/dissect.asciidoc[tag=reference-keys] +[[esql-dissect-limitations]] +===== Limitations + +// tag::dissect-limitations[] +The `DISSECT` command does not support +<>. +// end::dissect-limitations[] [[esql-process-data-with-grok]] ==== Process data with `GROK` @@ -161,7 +164,14 @@ matches a log line of this format: 1.2.3.4 [2023-01-23T12:15:00.000Z] Connected ---- -and results in adding the following columns to the input table: +Putting it together as an {esql} query: + +[source.merge.styled,esql] +---- +include::{esql-specs}/docs.csv-spec[tag=grokWithEscape] +---- + +`GROK` adds the following columns to the input table: [%header.monospaced.styled,format=dsv,separator=|] |=== @@ -169,6 +179,25 @@ and results in adding the following columns to the input table: 2023-01-23T12:15:00.000Z | 1.2.3.4 | Connected |=== +[NOTE] +==== + +Special regex characters in grok patterns, like `[` and `]` need to be escaped +with a `\`. For example, in the earlier pattern: +[source,txt] +---- +%{IP:ip} \[%{TIMESTAMP_ISO8601:@timestamp}\] %{GREEDYDATA:status} +---- + +In {esql} queries, the backslash character itself is a special character that +needs to be escaped with another `\`. For this example, the corresponding {esql} +query becomes: +[source.merge.styled,esql] +---- +include::{esql-specs}/docs.csv-spec[tag=grokWithEscape] +---- +==== + [[esql-grok-patterns]] ===== Grok patterns @@ -202,24 +231,6 @@ as well. Grok uses the Oniguruma regular expression library. Refer to https://github.com/kkos/oniguruma/blob/master/doc/RE[the Oniguruma GitHub repository] for the full supported regexp syntax. -[NOTE] -==== -Special regex characters like `[` and `]` need to be escaped with a `\`. For -example, in the earlier pattern: -[source,txt] ----- -%{IP:ip} \[%{TIMESTAMP_ISO8601:@timestamp}\] %{GREEDYDATA:status} ----- - -In {esql} queries, the backslash character itself is a special character that -needs to be escaped with another `\`. For this example, the corresponding {esql} -query becomes: -[source.merge.styled,esql] ----- -include::{esql-specs}/docs.csv-spec[tag=grokWithEscape] ----- -==== - [[esql-custom-patterns]] ===== Custom patterns @@ -253,6 +264,8 @@ as the `GROK` command. [[esql-grok-limitations]] ===== Limitations +// tag::grok-limitations[] The `GROK` command does not support configuring <>, or <>. The `GROK` command is not subject to <>. +// end::grok-limitations[] \ No newline at end of file diff --git a/docs/reference/esql/esql-query-api.asciidoc b/docs/reference/esql/esql-query-api.asciidoc index 437871d31a88f..afa9ab7254cfa 100644 --- a/docs/reference/esql/esql-query-api.asciidoc +++ b/docs/reference/esql/esql-query-api.asciidoc @@ -68,11 +68,6 @@ responses. See <>. `query`:: (Required, object) {esql} query to run. For syntax, refer to <>. -[[esql-search-api-time-zone]] -`time_zone`:: -(Optional, string) ISO-8601 time zone ID for the search. Several {esql} -date/time functions use this time zone. Defaults to `Z` (UTC). - [discrete] [role="child_attributes"] [[esql-query-api-response-body]] diff --git a/docs/reference/esql/esql-security-solution.asciidoc b/docs/reference/esql/esql-security-solution.asciidoc new file mode 100644 index 0000000000000..45e8e44e44bdd --- /dev/null +++ b/docs/reference/esql/esql-security-solution.asciidoc @@ -0,0 +1,41 @@ +[[esql-elastic-security]] +=== Using {esql} in {elastic-sec} + +++++ +Using {esql} in {elastic-sec} +++++ + +You can use {esql} in {elastic-sec} to investigate events in Timeline and create +detection rules. Use the Elastic AI Assistant to build {esql} queries, or answer +questions about the {esql} query language. + +[discrete] +[[esql-elastic-security-timeline]] +=== Use {esql} to investigate events in Timeline + +You can use {esql} in Timeline to filter, transform, and analyze event data +stored in {es}. To start using {esql}, open the the **{esql}** tab. To learn +more, refer to {security-guide}/timelines-ui.html#esql-in-timeline[Investigate +events in Timeline]. + +[discrete] +[[esql-elastic-security-detection-rules]] +=== Use {esql} to create detection rules + +Use the {esql} rule type to create detection rules using {esql} queries. The +{esql} rule type supports aggregating and non-aggregating queries. To learn +more, refer to {security-guide}/rules-ui-create.html#create-esql-rule[Create an +{esql} rule]. + +[discrete] +[[esql-elastic-security-ai-assistant]] +=== Elastic AI Assistant + +Use the Elastic AI Assistant to build {esql} queries, or answer questions about +the {esql} query language. To learn more, refer to +{security-guide}/security-assistant.html[AI Assistant]. + +NOTE: For AI Assistant to answer questions about {esql} and write {esql} +queries, you need to +{security-guide}/security-assistant.html#set-up-ai-assistant[enable knowledge +base]. \ No newline at end of file diff --git a/docs/reference/esql/esql-syntax.asciidoc b/docs/reference/esql/esql-syntax.asciidoc index 725b1d3ff1e03..22c9b1f100827 100644 --- a/docs/reference/esql/esql-syntax.asciidoc +++ b/docs/reference/esql/esql-syntax.asciidoc @@ -9,7 +9,7 @@ [[esql-basic-syntax]] === Basic syntax -An {esql} query is composed of a <> followed +An {esql} query is composed of a <> followed by an optional series of <>, separated by a pipe character: `|`. For example: @@ -36,6 +36,101 @@ source-command | processing-command1 | processing-command2 ---- ==== +[discrete] +[[esql-identifiers]] +==== Identifiers + +The identifiers can be used as they are and don't require quoting, unless +containing special characters, in which case they must be quoted with +backticks (+{backtick}+). What "special characters" means is command dependent. + +For <>, <>, <>, +<>, <> and +<> these are: `=`, +{backtick}+, `,`, ` ` (space), `|` , +`[`, `]`, `\t` (TAB), `\r` (CR), `\n` (LF); one `/` is allowed unquoted, but +a sequence of two or more require quoting. + +The rest of the commands - those allowing for identifiers be used in +expressions - require quoting if the identifier contains characters other than +letters, numbers and `_` and doesn't start with a letter, `_` or `@`. + +For instance: + +[source,esql] +---- +// Retain just one field +FROM index +| KEEP 1.field +---- + +is legal. However, if same field is to be used with an <>, +it'd have to be quoted: + +[source,esql] +---- +// Copy one field +FROM index +| EVAL my_field = `1.field` +---- + +[discrete] +[[esql-literals]] +==== Literals + +{esql} currently supports numeric and string literals. + +[discrete] +[[esql-string-literals]] +===== String literals + +A string literal is a sequence of unicode characters delimited by double +quotes (`"`). + +[source,esql] +---- +// Filter by a string value +FROM index +| WHERE first_name == "Georgi" +---- + +If the literal string itself contains quotes, these need to be escaped (`\\"`). +{esql} also supports the triple-quotes (`"""`) delimiter, for convenience: + +[source,esql] +---- +ROW name = """Indiana "Indy" Jones""" +---- + +The special characters CR, LF and TAB can be provided with the usual escaping: +`\r`, `\n`, `\t`, respectively. + +[discrete] +[[esql-numeric-literals]] +===== Numerical literals + +The numeric literals are accepted in decimal and in the scientific notation +with the exponent marker (`e` or `E`), starting either with a digit, decimal +point `.` or the negative sign `-`: + +[source, sql] +---- +1969 -- integer notation +3.14 -- decimal notation +.1234 -- decimal notation starting with decimal point +4E5 -- scientific notation (with exponent marker) +1.2e-3 -- scientific notation with decimal point +-.1e2 -- scientific notation starting with the negative sign +---- + +The integer numeric literals are implicitly converted to the `integer`, `long` +or the `double` type, whichever can first accommodate the literal's value. + +The floating point literals are implicitly converted the `double` type. + +To obtain constant values of different types, use one of the numeric +<>. + + [discrete] [[esql-comments]] ==== Comments diff --git a/docs/reference/esql/esql-using.asciidoc b/docs/reference/esql/esql-using.asciidoc index f586f3a28de5c..235c7defe559b 100644 --- a/docs/reference/esql/esql-using.asciidoc +++ b/docs/reference/esql/esql-using.asciidoc @@ -6,11 +6,16 @@ Information about using the <>. <>:: Using {esql} in {kib} to query and aggregate your data, create visualizations, -and set up alerts. +and set up alerts. + +<>:: +Using {esql} in {elastic-sec} to investigate events in Timeline, create +detection rules, and build {esql} queries using Elastic AI Assistant. <>:: Using the <> to list and cancel {esql} queries. include::esql-rest.asciidoc[] include::esql-kibana.asciidoc[] -include::task-management.asciidoc[] \ No newline at end of file +include::esql-security-solution.asciidoc[] +include::task-management.asciidoc[] diff --git a/docs/reference/esql/functions/case.asciidoc b/docs/reference/esql/functions/case.asciidoc index b243adf875cb4..84ff083147cb9 100644 --- a/docs/reference/esql/functions/case.asciidoc +++ b/docs/reference/esql/functions/case.asciidoc @@ -4,7 +4,7 @@ *Syntax* -[source,txt] +[source,esql] ---- CASE(condition1, value1[, ..., conditionN, valueN][, default_value]) ---- @@ -27,7 +27,8 @@ Accepts pairs of conditions and values. The function returns the value that belongs to the first condition that evaluates to `true`. If the number of arguments is odd, the last argument is the default value which -is returned when no condition matches. +is returned when no condition matches. If the number of arguments is even, and +no condition matches, the function returns `null`. *Example* diff --git a/docs/reference/esql/functions/date_parse.asciidoc b/docs/reference/esql/functions/date_parse.asciidoc index c74656ff1dbd7..9580ae238b663 100644 --- a/docs/reference/esql/functions/date_parse.asciidoc +++ b/docs/reference/esql/functions/date_parse.asciidoc @@ -4,7 +4,7 @@ *Syntax* -[source,txt] +[source,esql] ---- DATE_PARSE([format,] date_string) ---- diff --git a/docs/reference/esql/functions/date_trunc.asciidoc b/docs/reference/esql/functions/date_trunc.asciidoc index cacfefe73d0fd..ad0e1eb1170b4 100644 --- a/docs/reference/esql/functions/date_trunc.asciidoc +++ b/docs/reference/esql/functions/date_trunc.asciidoc @@ -8,6 +8,6 @@ Rounds down a date to the closest interval. Intervals can be expressed using the ---- FROM employees | EVAL year_hired = DATE_TRUNC(1 year, hire_date) -| STATS count(emp_no) BY year_hired +| STATS COUNT(emp_no) BY year_hired | SORT year_hired ---- diff --git a/docs/reference/esql/functions/starts_with.asciidoc b/docs/reference/esql/functions/starts_with.asciidoc index 38cee79ea63f8..f98a76ef68206 100644 --- a/docs/reference/esql/functions/starts_with.asciidoc +++ b/docs/reference/esql/functions/starts_with.asciidoc @@ -2,7 +2,7 @@ [[esql-starts_with]] === `STARTS_WITH` [.text-center] -image::esql/functions/signature/ends_with.svg[Embedded,opts=inline] +image::esql/functions/signature/starts_with.svg[Embedded,opts=inline] Returns a boolean that indicates whether a keyword string starts with another string: diff --git a/docs/reference/esql/index.asciidoc b/docs/reference/esql/index.asciidoc index 2946f4e61d629..dcbe426b1bcac 100644 --- a/docs/reference/esql/index.asciidoc +++ b/docs/reference/esql/index.asciidoc @@ -55,14 +55,14 @@ fields>> and <>. And guidance for GROK>> and <>. <>:: -An overview of using the <>, <>, and -<>. +An overview of using the <>, <>, +<>, and <>. <>:: The current limitations of {esql}. <>:: -A few examples of what you can with {esql}. +A few examples of what you can do with {esql}. include::esql-get-started.asciidoc[] diff --git a/docs/reference/esql/processing-commands/dissect.asciidoc b/docs/reference/esql/processing-commands/dissect.asciidoc index eca10c201c968..c48b72af0de7e 100644 --- a/docs/reference/esql/processing-commands/dissect.asciidoc +++ b/docs/reference/esql/processing-commands/dissect.asciidoc @@ -4,9 +4,9 @@ **Syntax** -[source,txt] +[source,esql] ---- -DISSECT input "pattern" [ append_separator=""] +DISSECT input "pattern" [APPEND_SEPARATOR=""] ---- *Parameters* @@ -16,9 +16,9 @@ The column that contains the string you want to structure. If the column has multiple values, `DISSECT` will process each value. `pattern`:: -A dissect pattern. +A <>. -`append_separator=""`:: +``:: A string used as the separator between appended values, when using the <>. *Description* @@ -29,7 +29,7 @@ delimiter-based pattern, and extracts the specified keys as columns. Refer to <> for the syntax of dissect patterns. -*Example* +*Examples* // tag::examples[] The following example parses a string that contains a timestamp, some text, and diff --git a/docs/reference/esql/processing-commands/drop.asciidoc b/docs/reference/esql/processing-commands/drop.asciidoc index 50e3b27fb1b28..4787c5f137314 100644 --- a/docs/reference/esql/processing-commands/drop.asciidoc +++ b/docs/reference/esql/processing-commands/drop.asciidoc @@ -2,7 +2,23 @@ [[esql-drop]] === `DROP` -Use `DROP` to remove columns: +**Syntax** + +[source,esql] +---- +DROP columns +---- + +*Parameters* + +`columns`:: +A comma-separated list of columns to remove. Supports wildcards. + +*Description* + +The `DROP` processing command removes one or more columns. + +*Examples* [source,esql] ---- diff --git a/docs/reference/esql/processing-commands/enrich.asciidoc b/docs/reference/esql/processing-commands/enrich.asciidoc index df402f3b1bd50..603683858b8c0 100644 --- a/docs/reference/esql/processing-commands/enrich.asciidoc +++ b/docs/reference/esql/processing-commands/enrich.asciidoc @@ -4,7 +4,7 @@ **Syntax** -[source,txt] +[source,esql] ---- ENRICH policy [ON match_field] [WITH [new_name1 = ]field1, [new_name2 = ]field2, ...] ---- @@ -15,18 +15,18 @@ ENRICH policy [ON match_field] [WITH [new_name1 = ]field1, [new_name2 = ]field2, The name of the enrich policy. You need to <> and <> the enrich policy first. -`ON match_field`:: +`match_field`:: The match field. `ENRICH` uses its value to look for records in the enrich index. If not specified, the match will be performed on the column with the same name as the `match_field` defined in the <>. -`WITH fieldX`:: +`fieldX`:: The enrich fields from the enrich index that are added to the result as new columns. If a column with the same name as the enrich field already exists, the existing column will be replaced by the new column. If not specified, each of the enrich fields defined in the policy is added -`new_nameX =`:: +`new_nameX`:: Enables you to change the name of the column that's added for each of the enrich fields. Defaults to the enrich field name. @@ -74,7 +74,7 @@ include::{esql-specs}/docs-IT_tests_only.csv-spec[tag=enrich_on-result] By default, each of the enrich fields defined in the policy is added as a column. To explicitly select the enrich fields that are added, use -`WITH , ...`: +`WITH , , ...`: [source.merge.styled,esql] ---- diff --git a/docs/reference/esql/processing-commands/eval.asciidoc b/docs/reference/esql/processing-commands/eval.asciidoc index a0a78f2a3bf97..eb69a587014ab 100644 --- a/docs/reference/esql/processing-commands/eval.asciidoc +++ b/docs/reference/esql/processing-commands/eval.asciidoc @@ -1,7 +1,30 @@ [discrete] [[esql-eval]] === `EVAL` -`EVAL` enables you to append new columns: + +**Syntax** + +[source,esql] +---- +EVAL column1 = value1[, ..., columnN = valueN] +---- + +*Parameters* + +`columnX`:: +The column name. + +`valueX`:: +The value for the column. Can be a literal, an expression, or a +<>. + +*Description* + +The `EVAL` processing command enables you to append new columns with calculated +values. `EVAL` supports various functions for calculating values. Refer to +<> for more information. + +*Examples* [source.merge.styled,esql] ---- @@ -23,8 +46,3 @@ include::{esql-specs}/docs.csv-spec[tag=evalReplace] |=== include::{esql-specs}/docs.csv-spec[tag=evalReplace-result] |=== - -[discrete] -==== Functions -`EVAL` supports various functions for calculating values. Refer to -<> for more information. diff --git a/docs/reference/esql/processing-commands/grok.asciidoc b/docs/reference/esql/processing-commands/grok.asciidoc index c95fe59f888ce..d5d58a9eaee12 100644 --- a/docs/reference/esql/processing-commands/grok.asciidoc +++ b/docs/reference/esql/processing-commands/grok.asciidoc @@ -4,7 +4,7 @@ **Syntax** -[source,txt] +[source,esql] ---- GROK input "pattern" ---- diff --git a/docs/reference/esql/processing-commands/keep.asciidoc b/docs/reference/esql/processing-commands/keep.asciidoc index 3e54e5a7d1c5c..7515583b1bfd1 100644 --- a/docs/reference/esql/processing-commands/keep.asciidoc +++ b/docs/reference/esql/processing-commands/keep.asciidoc @@ -2,11 +2,25 @@ [[esql-keep]] === `KEEP` -The `KEEP` command enables you to specify what columns are returned and the -order in which they are returned. +**Syntax** -To limit the columns that are returned, use a comma-separated list of column -names. The columns are returned in the specified order: +[source,esql] +---- +KEEP columns +---- + +*Parameters* +`columns`:: +A comma-separated list of columns to keep. Supports wildcards. + +*Description* + +The `KEEP` processing command enables you to specify what columns are returned +and the order in which they are returned. + +*Examples* + +The columns are returned in the specified order: [source.merge.styled,esql] ---- @@ -27,7 +41,7 @@ include::{esql-specs}/docs.csv-spec[tag=keepWildcard] The asterisk wildcard (`*`) by itself translates to all columns that do not match the other arguments. This query will first return all columns with a name -that starts with an h, followed by all other columns: +that starts with `h`, followed by all other columns: [source,esql] ---- diff --git a/docs/reference/esql/processing-commands/limit.asciidoc b/docs/reference/esql/processing-commands/limit.asciidoc index c02b534af59e1..5f659fc493a75 100644 --- a/docs/reference/esql/processing-commands/limit.asciidoc +++ b/docs/reference/esql/processing-commands/limit.asciidoc @@ -2,12 +2,46 @@ [[esql-limit]] === `LIMIT` -The `LIMIT` processing command enables you to limit the number of rows: +**Syntax** [source,esql] ---- -include::{esql-specs}/docs.csv-spec[tag=limit] +LIMIT max_number_of_rows ---- -If not specified, `LIMIT` defaults to `500`. A single query will not return -more than 10,000 rows, regardless of the `LIMIT` value. +*Parameters* + +`max_number_of_rows`:: +The maximum number of rows to return. + +*Description* + +The `LIMIT` processing command enables you to limit the number of rows that are +returned. +// tag::limitation[] +Queries do not return more than 10,000 rows, regardless of the `LIMIT` command's +value. + +This limit only applies to the number of rows that are retrieved by the query. +Queries and aggregations run on the full data set. + +To overcome this limitation: + +* Reduce the result set size by modifying the query to only return relevant +data. Use <> to select a smaller subset of the data. +* Shift any post-query processing to the query itself. You can use the {esql} +<> command to aggregate data in the query. + +The default and maximum limits can be changed using these dynamic cluster +settings: + +* `esql.query.result_truncation_default_size` +* `esql.query.result_truncation_max_size` +// end::limitation[] + +*Example* + +[source,esql] +---- +include::{esql-specs}/docs.csv-spec[tag=limit] +---- diff --git a/docs/reference/esql/processing-commands/mv_expand.asciidoc b/docs/reference/esql/processing-commands/mv_expand.asciidoc index d62b28aabe440..46dc4fd0a33cf 100644 --- a/docs/reference/esql/processing-commands/mv_expand.asciidoc +++ b/docs/reference/esql/processing-commands/mv_expand.asciidoc @@ -2,7 +2,24 @@ [[esql-mv_expand]] === `MV_EXPAND` -The `MV_EXPAND` processing command expands multivalued fields into one row per value, duplicating other fields: +**Syntax** + +[source,esql] +---- +MV_EXPAND column +---- + +*Parameters* + +`column`:: +The multivalued column to expand. + +*Description* + +The `MV_EXPAND` processing command expands multivalued columns into one row per +value, duplicating other columns. + +*Example* [source.merge.styled,esql] ---- diff --git a/docs/reference/esql/processing-commands/rename.asciidoc b/docs/reference/esql/processing-commands/rename.asciidoc index 1dda424317976..773fe8b640f75 100644 --- a/docs/reference/esql/processing-commands/rename.asciidoc +++ b/docs/reference/esql/processing-commands/rename.asciidoc @@ -2,22 +2,33 @@ [[esql-rename]] === `RENAME` -Use `RENAME` to rename a column using the following syntax: +**Syntax** [source,esql] ---- -RENAME AS +RENAME old_name1 AS new_name1[, ..., old_nameN AS new_nameN] ---- -For example: +*Parameters* + +`old_nameX`:: +The name of a column you want to rename. + +`new_nameX`:: +The new name of the column. + +*Description* + +The `RENAME` processing command renames one or more columns. If a column with +the new name already exists, it will be replaced by the new column. + +*Examples* [source,esql] ---- include::{esql-specs}/docs.csv-spec[tag=rename] ---- -If a column with the new name already exists, it will be replaced by the new -column. Multiple columns can be renamed with a single `RENAME` command: diff --git a/docs/reference/esql/processing-commands/sort.asciidoc b/docs/reference/esql/processing-commands/sort.asciidoc index 76a9193375932..fea7bfaf0c65f 100644 --- a/docs/reference/esql/processing-commands/sort.asciidoc +++ b/docs/reference/esql/processing-commands/sort.asciidoc @@ -1,35 +1,59 @@ [discrete] [[esql-sort]] === `SORT` -Use the `SORT` command to sort rows on one or more fields: + +**Syntax** + +[source,esql] +---- +SORT column1 [ASC/DESC][NULLS FIRST/NULLS LAST][, ..., columnN [ASC/DESC][NULLS FIRST/NULLS LAST]] +---- + +*Parameters* + +`columnX`:: +The column to sort on. + +*Description* + +The `SORT` processing command sorts a table on one or more columns. + +The default sort order is ascending. Use `ASC` or `DESC` to specify an explicit +sort order. + +Two rows with the same sort key are considered equal. You can provide additional +sort expressions to act as tie breakers. + +Sorting on multivalued columns uses the lowest value when sorting ascending and +the highest value when sorting descending. + +By default, `null` values are treated as being larger than any other value. With +an ascending sort order, `null` values are sorted last, and with a descending +sort order, `null` values are sorted first. You can change that by providing +`NULLS FIRST` or `NULLS LAST`. + +*Examples* [source,esql] ---- include::{esql-specs}/docs.csv-spec[tag=sort] ---- -The default sort order is ascending. Set an explicit sort order using `ASC` or -`DESC`: +Explicitly sorting in ascending order with `ASC`: [source,esql] ---- include::{esql-specs}/docs.csv-spec[tag=sortDesc] ---- -Two rows with the same sort key are considered equal. You can provide additional -sort expressions to act as tie breakers: +Providing additional sort expressions to act as tie breakers: [source,esql] ---- include::{esql-specs}/docs.csv-spec[tag=sortTie] ---- -[discrete] -==== `null` values -By default, `null` values are treated as being larger than any other value. With -an ascending sort order, `null` values are sorted last, and with a descending -sort order, `null` values are sorted first. You can change that by providing -`NULLS FIRST` or `NULLS LAST`: +Sorting `null` values first using `NULLS FIRST`: [source,esql] ---- diff --git a/docs/reference/esql/processing-commands/stats.asciidoc b/docs/reference/esql/processing-commands/stats.asciidoc index e0a9bbb52b03e..cbdb74d350fb1 100644 --- a/docs/reference/esql/processing-commands/stats.asciidoc +++ b/docs/reference/esql/processing-commands/stats.asciidoc @@ -1,8 +1,49 @@ [discrete] [[esql-stats-by]] === `STATS ... BY` -Use `STATS ... BY` to group rows according to a common value and calculate one -or more aggregated values over the grouped rows. + +**Syntax** + +[source,esql] +---- +STATS [column1 =] expression1[, ..., [columnN =] expressionN] [BY grouping_column1[, ..., grouping_columnN]] +---- + +*Parameters* + +`columnX`:: +The name by which the aggregated value is returned. If omitted, the name is +equal to the corresponding expression (`expressionX`). + +`expressionX`:: +An expression that computes an aggregated value. + +`grouping_columnX`:: +The column containing the values to group by. + +*Description* + +The `STATS ... BY` processing command groups rows according to a common value +and calculate one or more aggregated values over the grouped rows. If `BY` is +omitted, the output table contains exactly one row with the aggregations applied +over the entire dataset. + +The following aggregation functions are supported: + +include::../functions/aggregation-functions.asciidoc[tag=agg_list] + +NOTE: `STATS` without any groups is much much faster than adding a group. + +NOTE: Grouping on a single column is currently much more optimized than grouping + on many columns. In some tests we have seen grouping on a single `keyword` + column to be five times faster than grouping on two `keyword` columns. Do + not try to work around this by combining the two columns together with + something like <> and then grouping - that is not going to be + faster. + +*Examples* + +Calculating a statistic and grouping by the values of another column: [source.merge.styled,esql] ---- @@ -13,8 +54,8 @@ include::{esql-specs}/docs.csv-spec[tag=stats] include::{esql-specs}/docs.csv-spec[tag=stats-result] |=== -If `BY` is omitted, the output table contains exactly one row with the -aggregations applied over the entire dataset: +Omitting `BY` returns one row with the aggregations applied over the entire +dataset: [source.merge.styled,esql] ---- @@ -39,15 +80,3 @@ keyword family fields): ---- include::{esql-specs}/docs.csv-spec[tag=statsGroupByMultipleValues] ---- - -The following aggregation functions are supported: - -include::../functions/aggregation-functions.asciidoc[tag=agg_list] - -NOTE: `STATS` without any groups is much much faster than adding group. - -NOTE: Grouping on a single field is currently much more optimized than grouping - on many fields. In some tests we've seen grouping on a single `keyword` - field to be five times faster than grouping on two `keyword` fields. Don't - try to work around this combining the two fields together with something - like <> and then grouping - that's not going to be faster. diff --git a/docs/reference/esql/processing-commands/where.asciidoc b/docs/reference/esql/processing-commands/where.asciidoc index 8dd55df12b9e7..e723a977bf99c 100644 --- a/docs/reference/esql/processing-commands/where.asciidoc +++ b/docs/reference/esql/processing-commands/where.asciidoc @@ -2,8 +2,27 @@ [[esql-where]] === `WHERE` -Use `WHERE` to produce a table that contains all the rows from the input table -for which the provided condition evaluates to `true`: +**Syntax** + +[source,esql] +---- +WHERE expression +---- + +*Parameters* + +`expression`:: +A boolean expression. + +*Description* + +The `WHERE` processing command produces a table that contains all the rows from +the input table for which the provided condition evaluates to `true`. + +`WHERE` supports various <> and +<>. + +*Examples* [source,esql] ---- @@ -17,15 +36,7 @@ Which, if `still_hired` is a boolean field, can be simplified to: include::{esql-specs}/docs.csv-spec[tag=whereBoolean] ---- -[discrete] -==== Operators - -Refer to <> for an overview of the supported operators. - -[discrete] -==== Functions -`WHERE` supports various functions for calculating values. Refer to -<> for more information. +Using a function: [source,esql] ---- diff --git a/docs/reference/esql/source-commands/from.asciidoc b/docs/reference/esql/source-commands/from.asciidoc index 5718bfc27ac1c..6f54a42ddad35 100644 --- a/docs/reference/esql/source-commands/from.asciidoc +++ b/docs/reference/esql/source-commands/from.asciidoc @@ -2,10 +2,47 @@ [[esql-from]] === `FROM` -The `FROM` source command returns a table with up to 10,000 documents from a -data stream, index, or alias. Each row in the resulting table represents a -document. Each column corresponds to a field, and can be accessed by the name -of that field. +**Syntax** + +[source,esql] +---- +FROM index_pattern [METADATA fields] +---- + +*Parameters* + +`index_pattern`:: +A list of indices, data streams or aliases. Supports wildcards and date math. + +`fields`:: +A comma-separated list of <> to retrieve. + +*Description* + +The `FROM` source command returns a table with data from a data stream, index, +or alias. Each row in the resulting table represents a document. Each column +corresponds to a field, and can be accessed by the name of that field. + +[NOTE] +==== +By default, an {esql} query without an explicit <> uses an implicit +limit of 500. This applies to `FROM` too. A `FROM` command without `LIMIT`: + +[source,esql] +---- +FROM employees +---- + +is executed as: + +[source,esql] +---- +FROM employees +| LIMIT 500 +---- +==== + +*Examples* [source,esql] ---- diff --git a/docs/reference/esql/source-commands/row.asciidoc b/docs/reference/esql/source-commands/row.asciidoc index edfe5ecbf7cf3..adce844f365b8 100644 --- a/docs/reference/esql/source-commands/row.asciidoc +++ b/docs/reference/esql/source-commands/row.asciidoc @@ -2,9 +2,29 @@ [[esql-row]] === `ROW` +**Syntax** + +[source,esql] +---- +ROW column1 = value1[, ..., columnN = valueN] +---- + +*Parameters* + +`columnX`:: +The column name. + +`valueX`:: +The value for the column. Can be a literal, an expression, or a +<>. + +*Description* + The `ROW` source command produces a row with one or more columns with values that you specify. This can be useful for testing. +*Examples* + [source.merge.styled,esql] ---- include::{esql-specs}/row.csv-spec[tag=example] diff --git a/docs/reference/esql/source-commands/show.asciidoc b/docs/reference/esql/source-commands/show.asciidoc index 956baf628e9f3..ea8c83ceb772a 100644 --- a/docs/reference/esql/source-commands/show.asciidoc +++ b/docs/reference/esql/source-commands/show.asciidoc @@ -1,10 +1,35 @@ [discrete] [[esql-show]] -=== `SHOW ` +=== `SHOW` -The `SHOW ` source command returns information about the deployment and +**Syntax** + +[source,esql] +---- +SHOW item +---- + +*Parameters* + +`item`:: +Can be `INFO` or `FUNCTIONS`. + +*Description* + +The `SHOW` source command returns information about the deployment and its capabilities: * Use `SHOW INFO` to return the deployment's version, build date and hash. * Use `SHOW FUNCTIONS` to return a list of all supported functions and a synopsis of each function. + +*Examples* + +[source.merge.styled,esql] +---- +include::{esql-specs}/show.csv-spec[tag=showFunctionsFiltered] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/show.csv-spec[tag=showFunctionsFiltered-result] +|=== diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index 4f15bb1c1d694..31fe747feb63b 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -16,7 +16,10 @@ Index level settings can be set per-index. Settings may be: _static_:: They can only be set at index creation time or on a -<>. +<>, or by using the +<> API with the +`reopen` query parameter set to `true` (which automatically +closes and reopens impacted indices). _dynamic_:: diff --git a/docs/reference/indices/resolve.asciidoc b/docs/reference/indices/resolve.asciidoc index 1f405a2e49a7a..c919bba5c7651 100644 --- a/docs/reference/indices/resolve.asciidoc +++ b/docs/reference/indices/resolve.asciidoc @@ -88,9 +88,11 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=allow-no-indices] + Defaults to `true`. -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=ignore_throttled] +`ignore_throttled`:: +(Optional, Boolean) If `true`, concrete, expanded or aliased indices are +ignored when frozen. Defaults to `false`. + -Defaults to `false`. +deprecated:[7.16.0] [[resolve-index-api-example]] ==== {api-examples-title} diff --git a/docs/reference/indices/update-settings.asciidoc b/docs/reference/indices/update-settings.asciidoc index 45531dd58ccfc..1ac9ecbb6a6a3 100644 --- a/docs/reference/indices/update-settings.asciidoc +++ b/docs/reference/indices/update-settings.asciidoc @@ -60,6 +60,16 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailab (Optional, Boolean) If `true`, existing index settings remain unchanged. Defaults to `false`. +`reopen`:: +(Optional, Boolean) If `true`, then any static settings that would ordinarily only +be updated on closed indices will be updated by automatically closing and reopening +the affected indices. If `false`, attempts to update static settings on open indices +will fail. Defaults to `false`. + +NOTE: Changing index settings on an automatically closed index using the `reopen` +parameter will result in the index becoming unavailable momentarily while the index +is in the process of reopening. + include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms] diff --git a/docs/reference/inference/post-inference.asciidoc b/docs/reference/inference/post-inference.asciidoc index f26a73d093091..f8515a8b33c39 100644 --- a/docs/reference/inference/post-inference.asciidoc +++ b/docs/reference/inference/post-inference.asciidoc @@ -25,9 +25,9 @@ Performs an inference task on an input text by using an {infer} model. [[post-inference-api-desc]] ==== {api-description-title} -The perform {infer} API enables you to use {infer} models to perform specific -tasks on data that you provide as an input. The API returns a response with the -resutls of the tasks. The {infer} model you use can perform one specific task +The perform {infer} API enables you to use {infer} models to perform specific +tasks on data that you provide as an input. The API returns a response with the +resutls of the tasks. The {infer} model you use can perform one specific task that has been defined when the model was created with the <>. @@ -50,8 +50,9 @@ The type of {infer} task that the model performs. == {api-request-body-title} `input`:: -(Required, string) +(Required, array of strings) The text on which you want to perform the {infer} task. +`input` can be a single string or an array. [discrete] @@ -77,23 +78,26 @@ The API returns the following response: [source,console-result] ------------------------------------------------------------ { - "sparse_embedding": { - "port": 2.1259406, - "sky": 1.7073475, - "color": 1.6922266, - "dead": 1.6247464, - "television": 1.3525393, - "above": 1.2425821, - "tuned": 1.1440028, - "colors": 1.1218185, - "tv": 1.0111054, - "ports": 1.0067928, - "poem": 1.0042328, - "channel": 0.99471164, - "tune": 0.96235967, - "scene": 0.9020516, + "sparse_embedding": [ + { + "port": 2.1259406, + "sky": 1.7073475, + "color": 1.6922266, + "dead": 1.6247464, + "television": 1.3525393, + "above": 1.2425821, + "tuned": 1.1440028, + "colors": 1.1218185, + "tv": 1.0111054, + "ports": 1.0067928, + "poem": 1.0042328, + "channel": 0.99471164, + "tune": 0.96235967, + "scene": 0.9020516, + (...) + }, (...) - } + ] } ------------------------------------------------------------ -// NOTCONSOLE \ No newline at end of file +// NOTCONSOLE diff --git a/docs/reference/ingest/search-inference-processing.asciidoc b/docs/reference/ingest/search-inference-processing.asciidoc index fad11b28858b7..48505ab314c1e 100644 --- a/docs/reference/ingest/search-inference-processing.asciidoc +++ b/docs/reference/ingest/search-inference-processing.asciidoc @@ -54,7 +54,7 @@ A common use case is a user searching FAQs, or a support agent searching a knowl The diagram below shows how documents are processed during ingestion. // Original diagram: https://whimsical.com/ml-in-enterprise-search-ErCetPqrcCPu2QYHvAwrgP@2bsEvpTYSt1Hiuq6UBf68tUWvFiXdzLt6ao -image::../images/ingest/document-enrichment-diagram.png["ML inference pipeline diagram"] +image::images/ingest/document-enrichment-diagram.png["ML inference pipeline diagram"] * Documents are processed by the `my-index-0001` pipeline, which happens automatically when indexing through a an Elastic connector or crawler. * The `_run_ml_inference` field is set to `true` to ensure the ML inference pipeline (`my-index-0001@ml-inference`) is executed. @@ -95,7 +95,7 @@ Once your index-specific ML inference pipeline is ready, you can add inference p To add an inference processor to the ML inference pipeline, click the *Add Inference Pipeline* button in the *Machine Learning Inference Pipelines* card. [role="screenshot"] -image::../images/ingest/document-enrichment-add-inference-pipeline.png["Add Inference Pipeline"] +image::images/ingest/document-enrichment-add-inference-pipeline.png["Add Inference Pipeline"] Here, you'll be able to: diff --git a/docs/reference/ingest/search-ingest-pipelines.asciidoc b/docs/reference/ingest/search-ingest-pipelines.asciidoc index 049a74670581d..f37e07f632810 100644 --- a/docs/reference/ingest/search-ingest-pipelines.asciidoc +++ b/docs/reference/ingest/search-ingest-pipelines.asciidoc @@ -22,7 +22,7 @@ To find this tab in the Kibana UI: The tab is highlighted in this screenshot: [.screenshot] -image::../images/ingest/ingest-pipeline-ent-search-ui.png[align="center"] +image::images/ingest/ingest-pipeline-ent-search-ui.png[align="center"] [discrete#ingest-pipeline-search-in-enterprise-search] === Overview diff --git a/docs/reference/landing-page.asciidoc b/docs/reference/landing-page.asciidoc index 1ddd0cfa28128..a53a5770fe030 100644 --- a/docs/reference/landing-page.asciidoc +++ b/docs/reference/landing-page.asciidoc @@ -105,6 +105,9 @@
  • Troubleshooting
  • +
  • + Enterprise Search server +
  • @@ -119,6 +122,12 @@
  • Adding data to Elasticsearch
  • +
  • + Connectors +
  • +
  • + Web crawler +
  • Data streams
  • @@ -145,6 +154,12 @@
  • Query data with the Query DSL, ES|QL, EQL, or SQL
  • +
  • + Search applications +
  • +
  • + Search analytics +
  • Aggregations
  • @@ -207,7 +222,7 @@
    - +

    diff --git a/docs/reference/mapping/fields/source-field.asciidoc b/docs/reference/mapping/fields/source-field.asciidoc index f905be3d452ba..ec824e421e015 100644 --- a/docs/reference/mapping/fields/source-field.asciidoc +++ b/docs/reference/mapping/fields/source-field.asciidoc @@ -43,6 +43,8 @@ available then a number of features are not supported: * The <>, <>, and <> APIs. +* In the {kib} link:{kibana-ref}/discover.html[Discover] application, field data will not be displayed. + * On the fly <>. * The ability to reindex from one Elasticsearch index to another, either diff --git a/docs/reference/migration/migrate_8_11.asciidoc b/docs/reference/migration/migrate_8_11.asciidoc index a353d1a6a87fa..098456e1aca42 100644 --- a/docs/reference/migration/migrate_8_11.asciidoc +++ b/docs/reference/migration/migrate_8_11.asciidoc @@ -9,12 +9,61 @@ your application to {es} 8.11. See also <> and <>. -coming::[8.11.0] - [discrete] [[breaking-changes-8.11]] === Breaking changes -There are no breaking changes in {es} 8.11. +The following changes in {es} 8.11 might affect your applications +and prevent them from operating normally. +Before upgrading to 8.11, review these changes and take the described steps +to mitigate the impact. + + +There are no notable breaking changes in {es} 8.11. +But there are some less critical breaking changes. + +[discrete] +[[breaking_811_rest_api_changes]] +==== REST API changes + +[[remove_transport_versions_from_cluster_state_api]] +.Remove `transport_versions` from cluster state API +[%collapsible] +==== +*Details* + +The `transport_versions` subobject of the response to `GET _cluster/state` has been replaced by the `nodes_versions` subobject. + +*Impact* + +If needed, retrieve the per-node transport versions from the `nodes_versions` subobject. +==== + + +[discrete] +[[deprecated-8.11]] +=== Deprecations + +The following functionality has been deprecated in {es} 8.11 +and will be removed in a future version. +While this won't have an immediate impact on your applications, +we strongly encourage you to take the described steps to update your code +after upgrading to 8.11. + +To find out if you are using any deprecated functionality, +enable <>. + +[discrete] +[[deprecations_811_rollup]] +==== Rollup deprecations + +[[rollup_functionality_deprecated]] +.Rollup functionality is now deprecated +[%collapsible] +==== +*Details* + +<> has been deprecated and will be removed in a future release. Previously, rollups were available in technical preview. + +*Impact* + +Use <> to reduce storage costs for time series data by storing it at reduced granularity. +==== diff --git a/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc index ec2ef3631f0c6..05e23d901d5d3 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc @@ -66,8 +66,6 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] Defaults to `open`. include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=ignore_throttled] -+ -deprecated:[7.16.0] `ignore_unavailable`:: (Optional, Boolean) If `true`, unavailable indices (missing or closed) are diff --git a/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc index 48893f1aadb82..5e6121cd01ac9 100644 --- a/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc @@ -55,8 +55,6 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] Defaults to `open`. include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=ignore_throttled] -+ -deprecated:[7.16.0] `ignore_unavailable`:: (Optional, Boolean) If `true`, unavailable indices (missing or closed) are diff --git a/docs/reference/ml/trained-models/apis/put-trained-models.asciidoc b/docs/reference/ml/trained-models/apis/put-trained-models.asciidoc index 7da46e13a8ce4..45517b99c2177 100644 --- a/docs/reference/ml/trained-models/apis/put-trained-models.asciidoc +++ b/docs/reference/ml/trained-models/apis/put-trained-models.asciidoc @@ -443,121 +443,8 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field] (Optional, object) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] + -.Properties of tokenization -[%collapsible%open] -====== -`bert`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert] -+ -.Properties of bert -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-with-special-tokens] -======= -`roberta`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta] -+ -.Properties of roberta -[%collapsible%open] -======= -`add_prefix_space`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-add-prefix-space] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-with-special-tokens] -======= -`mpnet`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet] -+ -.Properties of mpnet -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet-with-special-tokens] -======= -`xlm_roberta`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-xlm-roberta] -+ -.Properties of xlm_roberta -[%collapsible%open] -======= -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-with-special-tokens] -======= -`bert_ja`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja] -+ -.Properties of bert_ja -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja-with-special-tokens] -======= -====== +Refer to <> to review the properties of the +`tokenization` object. ===== `ner`::: @@ -582,121 +469,8 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field] (Optional, object) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] + -.Properties of tokenization -[%collapsible%open] -====== -`bert`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert] -+ -.Properties of bert -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-with-special-tokens] -======= -`roberta`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta] -+ -.Properties of roberta -[%collapsible%open] -======= -`add_prefix_space`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-add-prefix-space] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-with-special-tokens] -======= -`mpnet`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet] -+ -.Properties of mpnet -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet-with-special-tokens] -======= -`xlm_roberta`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-xlm-roberta] -+ -.Properties of xlm_roberta -[%collapsible%open] -======= -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-with-special-tokens] -======= -`bert_ja`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja] -+ -.Properties of bert_ja -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja-with-special-tokens] -======= -====== +Refer to <> to review the +properties of the `tokenization` object. ===== `pass_through`::: @@ -714,738 +488,121 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field] (Optional, object) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] + -.Properties of tokenization -[%collapsible%open] -====== -`bert`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert] -+ -.Properties of bert -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] +Refer to <> to review the properties of the +`tokenization` object. +===== -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-with-special-tokens] -======= -`roberta`:::: +`question_answering`::: (Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta] +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-question-answering] + -.Properties of roberta +.Properties of question_answering inference [%collapsible%open] -======= -`add_prefix_space`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-add-prefix-space] - -`max_sequence_length`:::: +===== +`max_answer_length`:::: (Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] +The maximum amount of words in the answer. Defaults to `15`. -`truncate`:::: +`results_field`:::: (Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field] -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-with-special-tokens] -======= -`mpnet`:::: +`tokenization`:::: (Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet] +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] + -.Properties of mpnet -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] +Recommended to set `max_sentence_length` to `386` with `128` of `span` and set +`truncate` to `none`. ++ +Refer to <> to review the properties of the +`tokenization` object. +===== -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet-with-special-tokens] -======= -`xlm_roberta`:::: +`regression`::: (Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-xlm-roberta] +Regression configuration for inference. + -.Properties of xlm_roberta +.Properties of regression inference [%collapsible%open] -======= -`max_sequence_length`:::: +===== +`num_top_feature_importance_values`:::: (Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-regression-num-top-feature-importance-values] -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-with-special-tokens] -======= -`bert_ja`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja] -+ -.Properties of bert_ja -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja-with-special-tokens] -======= -====== -===== - -`question_answering`::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-question-answering] -+ -.Properties of question_answering inference -[%collapsible%open] -===== -`max_answer_length`:::: -(Optional, integer) -The maximum amount of words in the answer. Defaults to `15`. - -`results_field`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field] - -`tokenization`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] -+ -Recommended to set `max_sentence_length` to `386` with `128` of `span` and set -`truncate` to `none`. -+ -.Properties of tokenization -[%collapsible%open] -====== -`bert`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert] -+ -.Properties of bert -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`span`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-with-special-tokens] -======= -`roberta`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta] -+ -.Properties of roberta -[%collapsible%open] -======= -`add_prefix_space`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-add-prefix-space] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`span`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-with-special-tokens] -======= -`mpnet`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet] -+ -.Properties of mpnet -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`span`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet-with-special-tokens] -======= -`xlm_roberta`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-xlm-roberta] -+ -.Properties of xlm_roberta -[%collapsible%open] -======= -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`span`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-with-special-tokens] -======= -`bert_ja`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja] -+ -.Properties of bert_ja -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`span`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja-with-special-tokens] -======= -====== -===== - -`regression`::: -(Optional, object) -Regression configuration for inference. -+ -.Properties of regression inference -[%collapsible%open] -===== -`num_top_feature_importance_values`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-regression-num-top-feature-importance-values] - -`results_field`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field] -===== - -`text_classification`::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-classification] -+ -.Properties of text_classification inference -[%collapsible%open] -===== -`classification_labels`:::: -(Optional, string) An array of classification labels. - -`num_top_classes`:::: -(Optional, integer) -Specifies the number of top class predictions to return. Defaults to all classes (-1). - -`results_field`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field] - -`tokenization`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] -+ -.Properties of tokenization -[%collapsible%open] -====== -`bert`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert] -+ -.Properties of bert -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`span`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-with-special-tokens] -======= -`roberta`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta] -+ -.Properties of roberta -[%collapsible%open] -======= -`add_prefix_space`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-add-prefix-space] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`span`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-with-special-tokens] -======= -`mpnet`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet] -+ -.Properties of mpnet -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet-with-special-tokens] -======= -`xlm_roberta`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-xlm-roberta] -+ -.Properties of xlm_roberta -[%collapsible%open] -======= -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`span`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-with-special-tokens] -======= -`bert_ja`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja] -+ -.Properties of bert_ja -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`span`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja-with-special-tokens] -======= -====== -===== -`text_embedding`::: -(Object, optional) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-embedding] -+ -.Properties of text_embedding inference -[%collapsible%open] -===== -`embedding_size`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-embedding-size] - -`results_field`:::: +`results_field`:::: (Optional, string) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field] - -`tokenization`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] -+ -.Properties of tokenization -[%collapsible%open] -====== -`bert`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert] -+ -.Properties of bert -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-with-special-tokens] -======= -`roberta`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta] -+ -.Properties of roberta -[%collapsible%open] -======= -`add_prefix_space`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-add-prefix-space] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-with-special-tokens] -======= -`mpnet`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet] -+ -.Properties of mpnet -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet-with-special-tokens] -======= -`xlm_roberta`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-xlm-roberta] -+ -.Properties of xlm_roberta -[%collapsible%open] -======= -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-with-special-tokens] -======= -`bert_ja`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja] -+ -.Properties of bert_ja -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja-with-special-tokens] -======= -====== ===== -`text_similarity`:::: -(Object, optional) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-similarity] -+ -.Properties of text_similarity inference -[%collapsible%open] -===== -`span_score_combination_function`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-similarity-span-score-func] - -`tokenization`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] -+ -.Properties of tokenization -[%collapsible%open] -====== -`bert`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert] -+ -.Properties of bert -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`span`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-with-special-tokens] -======= -`roberta`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta] -+ -.Properties of roberta -[%collapsible%open] -======= -`add_prefix_space`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-add-prefix-space] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`span`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-with-special-tokens] -======= -`mpnet`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet] -+ -.Properties of mpnet -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`span`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet-with-special-tokens] -======= -`xlm_roberta`:::: +`text_classification`::: (Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-xlm-roberta] +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-classification] + -.Properties of xlm_roberta +.Properties of text_classification inference [%collapsible%open] -======= -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] +===== +`classification_labels`:::: +(Optional, string) An array of classification labels. -`span`:::: +`num_top_classes`:::: (Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] +Specifies the number of top class predictions to return. Defaults to all classes +(-1). -`truncate`:::: +`results_field`:::: (Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field] -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-with-special-tokens] -======= -`bert_ja`:::: +`tokenization`:::: (Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja] +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] + -.Properties of bert_ja -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] +Refer to <> to review the properties of the +`tokenization` object. +===== -`max_sequence_length`:::: +`text_embedding`::: +(Object, optional) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-embedding] ++ +.Properties of text_embedding inference +[%collapsible%open] +===== +`embedding_size`:::: (Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-embedding-size] -`span`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] +`results_field`:::: +(Optional, string) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field] + +`tokenization`:::: +(Optional, object) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] ++ +Refer to <> to review the properties of the +`tokenization` object. +===== -`truncate`:::: +`text_similarity`:::: +(Object, optional) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-similarity] ++ +.Properties of text_similarity inference +[%collapsible%open] +===== +`span_score_combination_function`:::: (Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-similarity-span-score-func] -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja-with-special-tokens] -======= -====== +`tokenization`:::: +(Optional, object) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] ++ +Refer to <> to review the properties of the +`tokenization` object. ===== + `zero_shot_classification`::: (Object, optional) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-zero-shot-classification] @@ -1477,190 +634,242 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field] (Optional, object) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] + -.Properties of tokenization +Refer to <> to review the properties of the +`tokenization` object. +===== +==== +//End of inference_config + +//Begin input +`input`:: +(Required, object) +The input field names for the model definition. ++ +.Properties of `input` [%collapsible%open] -====== -`bert`:::: +==== +`field_names`::: +(Required, string) +An array of input field names for the model. +==== +//End input + +// Begin location +`location`:: +(Optional, object) +The model definition location. If the `definition` or `compressed_definition` +are not specified, the `location` is required. ++ +.Properties of `location` +[%collapsible%open] +==== +`index`::: +(Required, object) +Indicates that the model definition is stored in an index. This object must be +empty as the index for storing model definitions is configured automatically. +==== +// End location + +`metadata`:: +(Optional, object) +An object map that contains metadata about the model. + +`model_size_bytes`:: +(Optional, integer) +The estimated memory usage in bytes to keep the trained model in memory. This +property is supported only if `defer_definition_decompression` is `true` or the +model definition is not supplied. + +`model_type`:: +(Optional, string) +The created model type. By default the model type is `tree_ensemble`. +Appropriate types are: ++ +-- +* `tree_ensemble`: The model definition is an ensemble model of decision trees. +* `lang_ident`: A special type reserved for language identification models. +* `pytorch`: The stored definition is a PyTorch (specifically a TorchScript) model. Currently only +NLP models are supported. For more information, refer to {ml-docs}/ml-nlp.html[{nlp-cap}]. +-- +`platform_architecture`:: +(Optional, string) +If the model only works on one platform, because it is heavily +optimized for a particular processor architecture and OS combination, +then this field specifies which. The format of the string must match +the platform identifiers used by Elasticsearch, so one of, `linux-x86_64`, +`linux-aarch64`, `darwin-x86_64`, `darwin-aarch64`, or `windows-x86_64`. +For portable models (those that work independent of processor architecture or +OS features), leave this field unset. + +//Begin prefix_strings +`prefix_strings`:: +(Optional, object) +Certain NLP models are trained in such a way that a prefix string should +be applied to the input text before the input is evaluated. The prefix +may be different depending on the intention. For asymmetric tasks such +as infromation retrieval the prefix applied to a passage as it is indexed +can be different to the prefix applied when searching those passages. + +`prefix_strings` has 2 options, a prefix string that is always applied +in the search context and one that is always applied when ingesting the +docs. Both are optional. ++ +.Properties of `prefix_strings` +[%collapsible%open] +==== +`search`::: +(Optional, string) +The prefix string to prepend to the input text for requests +originating from a search query. + +`ingest`::: +(Optional, string) +The prefix string to prepend to the input text for requests +at ingest where the {infer} ingest processor is used. // TODO is there a shortcut for Inference ingest processor? +==== +//End prefix_strings + +`tags`:: +(Optional, string) +An array of tags to organize the model. + + +[[tokenization-properties]] +=== Properties of `tokenizaton` + +The `tokenization` object has the following properties. + +`bert`:: (Optional, object) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert] + .Properties of bert [%collapsible%open] -======= -`do_lower_case`:::: +==== +`do_lower_case`::: (Optional, boolean) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] -`max_sequence_length`:::: +`max_sequence_length`::: (Optional, integer) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] -`truncate`:::: +`span`::: +(Optional, integer) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] + +`truncate`::: (Optional, string) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] -`with_special_tokens`:::: +`with_special_tokens`::: (Optional, boolean) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-with-special-tokens] -======= -`roberta`:::: +==== +`roberta`:: (Optional, object) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta] + .Properties of roberta [%collapsible%open] -======= -`add_prefix_space`:::: +==== +`add_prefix_space`::: (Optional, boolean) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-add-prefix-space] -`max_sequence_length`:::: +`max_sequence_length`::: (Optional, integer) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] -`truncate`:::: +`span`::: +(Optional, integer) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] + +`truncate`::: (Optional, string) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] -`with_special_tokens`:::: +`with_special_tokens`::: (Optional, boolean) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-with-special-tokens] -======= -`mpnet`:::: +==== +`mpnet`:: (Optional, object) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet] + .Properties of mpnet [%collapsible%open] -======= -`do_lower_case`:::: +==== +`do_lower_case`::: (Optional, boolean) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] -`max_sequence_length`:::: +`max_sequence_length`::: (Optional, integer) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] -`truncate`:::: +`span`::: +(Optional, integer) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] + +`truncate`::: (Optional, string) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] -`with_special_tokens`:::: +`with_special_tokens`::: (Optional, boolean) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet-with-special-tokens] -======= -`xlm_roberta`:::: +==== +`xlm_roberta`:: (Optional, object) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-xlm-roberta] + .Properties of xlm_roberta [%collapsible%open] -======= -`max_sequence_length`:::: +==== +`max_sequence_length`::: (Optional, integer) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] -`truncate`:::: +`span`::: +(Optional, integer) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] + +`truncate`::: (Optional, string) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] -`with_special_tokens`:::: +`with_special_tokens`::: (Optional, boolean) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-with-special-tokens] -======= -`bert_ja`:::: +==== +`bert_ja`:: (Optional, object) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja] + .Properties of bert_ja [%collapsible%open] -======= -`do_lower_case`:::: +==== +`do_lower_case`::: (Optional, boolean) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] -`max_sequence_length`:::: +`max_sequence_length`::: (Optional, integer) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] -`truncate`:::: +`span`::: +(Optional, integer) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] + +`truncate`::: (Optional, string) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] -`with_special_tokens`:::: +`with_special_tokens`::: (Optional, boolean) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja-with-special-tokens] -======= -====== -===== -==== -//End of inference_config - -//Begin input -`input`:: -(Required, object) -The input field names for the model definition. -+ -.Properties of `input` -[%collapsible%open] -==== -`field_names`::: -(Required, string) -An array of input field names for the model. -==== -//End input - -// Begin location -`location`:: -(Optional, object) -The model definition location. If the `definition` or `compressed_definition` -are not specified, the `location` is required. -+ -.Properties of `location` -[%collapsible%open] -==== -`index`::: -(Required, object) -Indicates that the model definition is stored in an index. This object must be -empty as the index for storing model definitions is configured automatically. ==== -// End location - -`metadata`:: -(Optional, object) -An object map that contains metadata about the model. - -`model_size_bytes`:: -(Optional, integer) -The estimated memory usage in bytes to keep the trained model in memory. This -property is supported only if `defer_definition_decompression` is `true` or the -model definition is not supplied. - -`model_type`:: -(Optional, string) -The created model type. By default the model type is `tree_ensemble`. -Appropriate types are: -+ --- -* `tree_ensemble`: The model definition is an ensemble model of decision trees. -* `lang_ident`: A special type reserved for language identification models. -* `pytorch`: The stored definition is a PyTorch (specifically a TorchScript) model. Currently only -NLP models are supported. For more information, refer to {ml-docs}/ml-nlp.html[{nlp-cap}]. --- -`platform_architecture`:: -(Optional, string) -If the model only works on one platform, because it is heavily -optimized for a particular processor architecture and OS combination, -then this field specifies which. The format of the string must match -the platform identifiers used by Elasticsearch, so one of, `linux-x86_64`, -`linux-aarch64`, `darwin-x86_64`, `darwin-aarch64`, or `windows-x86_64`. -For portable models (those that work independent of processor architecture or -OS features), leave this field unset. - - -`tags`:: -(Optional, string) -An array of tags to organize the model. [[ml-put-trained-models-example]] diff --git a/docs/reference/monitoring/how-monitoring-works.asciidoc b/docs/reference/monitoring/how-monitoring-works.asciidoc index 80cd560bd8f09..13fa006bdbbe0 100644 --- a/docs/reference/monitoring/how-monitoring-works.asciidoc +++ b/docs/reference/monitoring/how-monitoring-works.asciidoc @@ -34,7 +34,6 @@ collection methods, you should migrate to using {agent} or {metricbeat}. * Monitoring {beats}: ** {auditbeat-ref}/monitoring.html[{auditbeat}] ** {filebeat-ref}/monitoring.html[{filebeat}] -** {functionbeat-ref}/monitoring.html[{functionbeat}] ** {heartbeat-ref}/monitoring.html[{heartbeat}] ** {metricbeat-ref}/monitoring.html[{metricbeat}] ** {packetbeat-ref}/monitoring.html[{packetbeat}] diff --git a/docs/reference/release-notes/8.10.0.asciidoc b/docs/reference/release-notes/8.10.0.asciidoc index 9fbe7a2b1d099..34d1d26e5d69a 100644 --- a/docs/reference/release-notes/8.10.0.asciidoc +++ b/docs/reference/release-notes/8.10.0.asciidoc @@ -35,6 +35,8 @@ delete all the snapshots in the repository taken with version 8.10.0 or later using a cluster running version 8.10.4. // end::repositorydata-format-change[] +include::8.7.1.asciidoc[tag=no-preventive-gc-issue] + [[breaking-8.10.0]] [float] === Breaking changes diff --git a/docs/reference/release-notes/8.10.1.asciidoc b/docs/reference/release-notes/8.10.1.asciidoc index d049d5b33b1f7..0cb00699eeac7 100644 --- a/docs/reference/release-notes/8.10.1.asciidoc +++ b/docs/reference/release-notes/8.10.1.asciidoc @@ -9,6 +9,8 @@ Also see <>. include::8.10.0.asciidoc[tag=repositorydata-format-change] +include::8.7.1.asciidoc[tag=no-preventive-gc-issue] + [[bug-8.10.1]] [float] === Bug fixes diff --git a/docs/reference/release-notes/8.10.2.asciidoc b/docs/reference/release-notes/8.10.2.asciidoc index c428b4534fe79..911a410104a26 100644 --- a/docs/reference/release-notes/8.10.2.asciidoc +++ b/docs/reference/release-notes/8.10.2.asciidoc @@ -7,4 +7,6 @@ include::8.10.0.asciidoc[tag=repositorydata-format-change] +include::8.7.1.asciidoc[tag=no-preventive-gc-issue] + Also see <>. diff --git a/docs/reference/release-notes/8.10.3.asciidoc b/docs/reference/release-notes/8.10.3.asciidoc index b7828f52ad082..119930058a42e 100644 --- a/docs/reference/release-notes/8.10.3.asciidoc +++ b/docs/reference/release-notes/8.10.3.asciidoc @@ -7,6 +7,19 @@ include::8.10.0.asciidoc[tag=repositorydata-format-change] +// tag::no-preventive-gc-issue[] +* High Memory Pressure due to a GC change in JDK 21 ++ +This version of Elasticsearch is bundled with JDK 21. In JDK 21 +https://bugs.openjdk.org/browse/JDK-8297639[Preventive GC has been removed]. +This may lead to increased memory pressure and an increased number of CircuitBreakerExceptions when retrieving large +documents under some particular load. (issue: {es-issue}99592[#99592]) ++ +If you needed to explicitly <>, we recommend you avoid to upgrade to this version, as the settings to enable Preventive GC have been removed +from JDK 21. +// end::no-preventive-gc-issue[] + Also see <>. [[bug-8.10.3]] diff --git a/docs/reference/release-notes/8.10.4.asciidoc b/docs/reference/release-notes/8.10.4.asciidoc index f2e95af71afcb..6c49bae1e2150 100644 --- a/docs/reference/release-notes/8.10.4.asciidoc +++ b/docs/reference/release-notes/8.10.4.asciidoc @@ -25,6 +25,8 @@ first. If you cannot repair the repository in this way, first delete all the snapshots in the repository taken with version 8.10.0 or later using a cluster running version 8.10.4. +include::8.10.3.asciidoc[tag=no-preventive-gc-issue] + Also see <>. [[bug-8.10.4]] diff --git a/docs/reference/release-notes/8.11.0.asciidoc b/docs/reference/release-notes/8.11.0.asciidoc index a5c8c8c73128e..acb27dc180727 100644 --- a/docs/reference/release-notes/8.11.0.asciidoc +++ b/docs/reference/release-notes/8.11.0.asciidoc @@ -1,8 +1,342 @@ [[release-notes-8.11.0]] == {es} version 8.11.0 -coming[8.11.0] - Also see <>. +[[breaking-8.11.0]] +[float] +=== Breaking changes + +Infra/Core:: +* Remove `transport_versions` from cluster state API {es-pull}99223[#99223] + +[[known-issues-8.11.0]] +[float] +=== Known issues +include::8.10.3.asciidoc[tag=no-preventive-gc-issue] + +[[bug-8.11.0]] +[float] +=== Bug fixes + +Aggregations:: +* Adjust `DateHistogram's` bucket accounting to be iteratively {es-pull}101012[#101012] +* Allow parsing on non-string routing fields {es-pull}97729[#97729] +* Support runtime fields in synthetic source {es-pull}99796[#99796] (issue: {es-issue}98287[#98287]) + +Allocation:: +* Consider node shutdown in `DataTierAllocationDecider` {es-pull}98824[#98824] (issue: {es-issue}97207[#97207]) + +Application:: +* Align look-back with client-side cache {es-pull}101264[#101264] +* Increase K/V look-back time interval {es-pull}101205[#101205] +* Provide stable resampling {es-pull}101255[#101255] +* [Profiling] Tighten resource creation check {es-pull}99873[#99873] + +Authorization:: +* Allow `enrich_user` to read/view enrich indices {es-pull}100707[#100707] +* Grant editor and viewer access to profiling {es-pull}100594[#100594] + +CCR:: +* CCR: Use local cluster state request {es-pull}100323[#100323] + +CRUD:: +* Change `GetFromTranslog` to indices action {es-pull}99300[#99300] +* Wait for cluster to recover before resolving index template {es-pull}99797[#99797] + +Cluster Coordination:: +* Reset `GatewayService` flags before reroute {es-pull}98653[#98653] (issue: {es-issue}98606[#98606]) + +Data streams:: +* DSL waits for the tsdb time boundaries to lapse {es-pull}100470[#100470] (issue: {es-issue}99696[#99696]) +* Propagate cancellation in `DataTiersUsageTransportAction` {es-pull}100253[#100253] +* [DSL] skip deleting indices that have in-progress downsampling operations {es-pull}101495[#101495] + +Downsampling:: +* Make downsample target index replicas configurable {es-pull}99712[#99712] + +ES|QL:: +* "params" correctly parses the values including an optional "type" {es-pull}99310[#99310] (issue: {es-issue}99294[#99294]) +* Account for an exception being thrown when building a `BytesRefArrayBlock` {es-pull}99726[#99726] (issue: {es-issue}99472[#99472]) +* Add arithmetic operators {es-pull}98628[#98628] +* Add identity check in Block equality {es-pull}100377[#100377] (issue: {es-issue}100374[#100374]) +* Adds Enrich implicit `match_fields` to `field_caps` call {es-pull}101456[#101456] (issue: {es-issue}101328[#101328]) +* Better management of not stored TEXT fiels with synthetic source {es-pull}99695[#99695] +* Continue resolving attributes for Eval {es-pull}99601[#99601] (issue: {es-issue}99576[#99576]) +* Create a Vector when needed for IN {es-pull}99382[#99382] (issue: {es-issue}99347[#99347]) +* ESQL: Fix unreleased block in topn {es-pull}101648[#101648] (issue: {es-issue}101588[#101588]) +* ESQL: check type before casting {es-pull}101492[#101492] (issue: {es-issue}101489[#101489]) +* Fix NPE when aggregating literals {es-pull}99827[#99827] +* Fix escaping of backslash in LIKE operator {es-pull}101120[#101120] (issue: {es-issue}101106[#101106]) +* Fix eval of functions on foldable literals {es-pull}101438[#101438] (issue: {es-issue}101425[#101425]) +* Fix non-null value being returned for unsupported data types in `ValueSources` {es-pull}100656[#100656] (issue: {es-issue}100048[#100048]) +* Graceful handling of non-bool condition in the filter {es-pull}100645[#100645] (issues: {es-issue}100049[#100049], {es-issue}100409[#100409]) +* Handle queries with non-existing enrich policies and no field {es-pull}100647[#100647] (issue: {es-issue}100593[#100593]) +* Implement serialization of `InvalidMappedField` {es-pull}98972[#98972] (issue: {es-issue}98851[#98851]) +* Improve verifier error for incorrect agg declaration {es-pull}100650[#100650] (issue: {es-issue}100641[#100641]) +* Limit how many bytes `concat()` can process {es-pull}100360[#100360] +* Make DISSECT parameter `append_separator` case insensitive {es-pull}101358[#101358] (issue: {es-issue}101138[#101138]) +* Page shouldn't close a block twice {es-pull}100370[#100370] (issues: {es-issue}100356[#100356], {es-issue}100365[#100365]) +* Preserve intermediate aggregation output in local relation {es-pull}100866[#100866] (issue: {es-issue}100807[#100807]) +* Properly handle multi-values in fold() and date math {es-pull}100766[#100766] (issue: {es-issue}100497[#100497]) +* Remove aliasing inside Eval {es-pull}100238[#100238] (issue: {es-issue}100174[#100174]) +* Resilience to non-indexed fields {es-pull}99588[#99588] (issue: {es-issue}99506[#99506]) +* Skip synthetic attributes when planning the physical fragment {es-pull}99188[#99188] (issue: {es-issue}99170[#99170]) +* Support date and time intervals as input params {es-pull}101001[#101001] (issue: {es-issue}99570[#99570]) +* Support queries that don't return underlying fields {es-pull}98759[#98759] (issue: {es-issue}98404[#98404]) +* Use exact attributes for data source extraction {es-pull}99874[#99874] (issue: {es-issue}99183[#99183]) +* `mv_expand` pushes down limit and project and keep the limit after it untouched {es-pull}100782[#100782] (issues: {es-issue}99971[#99971], {es-issue}100774[#100774]) +* support metric tsdb fields while querying index patterns {es-pull}100351[#100351] (issue: {es-issue}100144[#100144]) + +Geo:: +* Use `NamedWritable` to enable `GeoBoundingBox` serialisation {es-pull}99163[#99163] (issue: {es-issue}99089[#99089]) + +Health:: +* Fix NPE in `StableMasterHealthIndicatorService` {es-pull}98635[#98635] +* Health report infrastructure doesn't trip the circuit breakers {es-pull}101629[#101629] +* Propagate cancellation in `GetHealthAction` {es-pull}100273[#100273] + +Highlighting:: +* Correctly handle `ScriptScoreQuery` in plain highlighter {es-pull}99804[#99804] (issue: {es-issue}99700[#99700]) +* Disable `weight_matches` when kNN query is present {es-pull}101713[#101713] + +ILM+SLM:: +* Compute SLM retention from `RepositoryData` {es-pull}100092[#100092] (issue: {es-issue}99953[#99953]) +* `WaitForSnapshotStep` verifies if the index belongs to the latest snapshot of that SLM policy {es-pull}100911[#100911] + +Infra/Core:: +* Add `java.net.NetPermission` to APM module's permissions {es-pull}99474[#99474] +* Don't update system index mappings in mixed clusters {es-pull}101778[#101778] (issues: {es-issue}101331[#101331], {es-issue}99778[#99778]) +* Revert "Kibana system index does not allow user templates to affect it" {es-pull}98888[#98888] +* Specify correct current `IndexVersion` after 8.10 release {es-pull}98574[#98574] (issue: {es-issue}98555[#98555]) +* Tracing: Use `doPriv` when working with spans, use `SpanId` {es-pull}100232[#100232] + +Infra/Scripting:: +* Improve painless error wrapping {es-pull}100872[#100872] + +Ingest Node:: +* Improving tika handling {es-pull}101486[#101486] +* Update enrich execution to only set index false on fields that support it {es-pull}98038[#98038] (issue: {es-issue}98019[#98019]) + +Machine Learning:: +* Avoid risk of OOM in datafeeds when memory is constrained {es-pull}98915[#98915] (issue: {es-issue}89769[#89769]) +* Fix for inference requests being sent to every node with a model allocation. If there are more nodes than items in the original request then empty requests were sent. {es-pull}100388[#100388] (issue: {es-issue}100180[#100180]) +* Preserve order of inference results when calling the _infer API with multiple inputs on a model deployment with more than one allocation the output results order was not guaranteed to match the input order. The fix ensures the output order matches the input order. {es-pull}100143[#100143] +* Remove noisy 'Could not find trained model' message {es-pull}100760[#100760] +* Safely drain deployment request queues before allowing node to shutdown {es-pull}98406[#98406] +* Use the correct writable name for model assignment metadata in mixed version clusters. Prevents a node failure due to IllegalArgumentException Unknown NamedWriteable [trained_model_assignment] {es-pull}100886[#100886] +* Wait to gracefully stop deployments until alternative allocation exists {es-pull}99107[#99107] + +Mapping:: +* Automatically disable `ignore_malformed` on datastream `@timestamp` fields {es-pull}99346[#99346] +* Correct behaviour of `ContentPath::remove()` {es-pull}98332[#98332] (issue: {es-issue}98327[#98327]) +* Fix merges of mappings with `subobjects: false` for composable index templates {es-pull}97317[#97317] (issue: {es-issue}96768[#96768]) +* Percolator to support parsing script score query with params {es-pull}101051[#101051] (issue: {es-issue}97377[#97377]) + +Network:: +* Do not report failure after connections are made {es-pull}99117[#99117] + +Percolator:: +* Fix percolator query for stored queries that expand on wildcard field names {es-pull}98878[#98878] + +Query Languages:: +* Preserve subfields for unsupported types {es-pull}100875[#100875] (issue: {es-issue}100869[#100869]) + +Recovery:: +* Fix interruption of `markAllocationIdAsInSync` {es-pull}100610[#100610] (issues: {es-issue}96578[#96578], {es-issue}100589[#100589]) + +Search:: +* Consistent scores for multi-term `SourceConfirmedTestQuery` {es-pull}100846[#100846] (issue: {es-issue}98712[#98712]) +* Fix UnsignedLong field range query gt "0" can get the result equal to 0 {es-pull}98843[#98843] +* Fix `advanceExact` for doc values from sources {es-pull}99685[#99685] +* Fork response-sending in `OpenPointInTimeAction` {es-pull}99222[#99222] +* [CI] `SearchResponseTests#testSerialization` failing resolved {es-pull}100020[#100020] (issue: {es-issue}100005[#100005]) +* fix fuzzy query rewrite parameter not work {es-pull}97642[#97642] + +Security:: +* Fix NullPointerException in RotableSecret {es-pull}100779[#100779] (issue: {es-issue}99759[#99759]) + +Snapshot/Restore:: +* Fix race condition in `SnapshotsService` {es-pull}101652[#101652] +* Fix snapshot double finalization {es-pull}101497[#101497] +* Fix thread context in `getRepositoryData` {es-pull}99627[#99627] +* Frozen index input clone copy cache file {es-pull}98930[#98930] +* Make S3 anti-contention delay configurable {es-pull}101245[#101245] +* More robust timeout for repo analysis {es-pull}101184[#101184] (issue: {es-issue}101182[#101182]) +* Register `repository_s3` settings {es-pull}101344[#101344] +* Reinstate `RepositoryData` BwC {es-pull}100447[#100447] + +TSDB:: +* Don't ignore empty index template that have no template definition {es-pull}98840[#98840] (issue: {es-issue}98834[#98834]) +* Fix painless execute api and tsdb issue {es-pull}101212[#101212] (issue: {es-issue}101072[#101072]) +* Make tsdb settings public in Serverless {es-pull}99567[#99567] (issue: {es-issue}99563[#99563]) + +Transform:: +* Fix possible NPE when getting transform stats for failed transforms {es-pull}98061[#98061] (issue: {es-issue}98052[#98052]) +* Ignore `IndexNotFound` error when refreshing destination index {es-pull}101627[#101627] +* Make Transform Feature Reset really wait for all the tasks {es-pull}100624[#100624] +* Make tasks that calculate checkpoints cancellable {es-pull}100808[#100808] + +Watcher:: +* Treating watcher webhook response header names as case-insensitive {es-pull}99717[#99717] + +[[deprecation-8.11.0]] +[float] +=== Deprecations + +Rollup:: +* Rollup functionality is now deprecated {es-pull}101265[#101265] + +[[enhancement-8.11.0]] +[float] +=== Enhancements + +Aggregations:: +* Disable `FilterByFilterAggregator` through `ClusterSettings` {es-pull}99417[#99417] (issue: {es-issue}99335[#99335]) +* Represent histogram value count as long {es-pull}99912[#99912] (issue: {es-issue}99820[#99820]) +* Skip `DisiPriorityQueue` on single filter agg {es-pull}99215[#99215] (issue: {es-issue}99202[#99202]) +* Use a competitive iterator in `FiltersAggregator` {es-pull}98360[#98360] (issue: {es-issue}97544[#97544]) + +Allocation:: +* Report a node's "roles" setting in the /_cluster/allocation/explain response {es-pull}98550[#98550] (issue: {es-issue}97859[#97859]) + +Application:: +* Add flamegraph API {es-pull}99091[#99091] +* [Profiling] Allow to customize the ILM policy {es-pull}99909[#99909] +* [Profiling] Allow to wait until resources created {es-pull}99655[#99655] + +Audit:: +* Reduce verbosity of the bulk indexing audit log {es-pull}98470[#98470] + +Authentication:: +* Allow native users/roles to be disabled via setting {es-pull}98654[#98654] + +CAT APIs:: +* Add 'dataset' size to cat indices and cat shards {es-pull}98622[#98622] (issue: {es-issue}95092[#95092]) + +Data streams:: +* Allow explain data stream lifecycle to accept a data stream {es-pull}98811[#98811] + +ES|QL:: +* Add `CEIL` function {es-pull}98847[#98847] +* Add ability to perform date math {es-pull}98870[#98870] (issue: {es-issue}98402[#98402]) +* Add support for TEXT fields in comparison operators and SORT {es-pull}98528[#98528] (issue: {es-issue}98642[#98642]) +* Compact topn {es-pull}99316[#99316] +* Date math for negatives {es-pull}99711[#99711] +* Enable arithmetics for durations and periods {es-pull}99432[#99432] (issue: {es-issue}99293[#99293]) +* Enhance SHOW FUNCTIONS command {es-pull}99736[#99736] (issue: {es-issue}99507[#99507]) +* Improve log messages {es-pull}99470[#99470] +* Log execution time consistently {es-pull}99286[#99286] +* Log query and execution time {es-pull}99058[#99058] +* Log start and end of queries {es-pull}99746[#99746] +* Lower the implicit limit, if none is user-provided {es-pull}99816[#99816] (issue: {es-issue}99458[#99458]) +* Make settings dynamic {es-pull}101516[#101516] +* Mark counter fields as unsupported {es-pull}99054[#99054] +* Remove the swapped-args check for date_xxx() {es-pull}101362[#101362] (issue: {es-issue}99562[#99562]) +* Serialize the source in expressions {es-pull}99956[#99956] +* Simple check if all blocks get released {es-pull}100199[#100199] +* Support unsigned long in sqrt and log10 {es-pull}98711[#98711] +* Use DEBUG log level to report execution steps {es-pull}99303[#99303] + +Engine:: +* Use `IndexWriter.flushNextBuffer()` to reclaim memory from indexing buffers {es-pull}94607[#94607] + +Health:: +* Avoiding the use of nodes that are no longer in the cluster when computing master stability {es-pull}98809[#98809] (issue: {es-issue}98636[#98636]) +* When a primary is inactive but this is considered expected, the same applies for the replica of this shard. {es-pull}99995[#99995] (issue: {es-issue}99951[#99951]) + +Infra/Core:: +* APM Metering API {es-pull}99832[#99832] +* Update the elastic-apm-agent version {es-pull}100064[#100064] +* Use mappings version to retrieve system index mappings at creation time {es-pull}99555[#99555] + +Infra/Node Lifecycle:: +* Add links to docs from failing bootstrap checks {es-pull}99644[#99644] (issue: {es-issue}99614[#99614]) +* Chunk `SingleNodeShutdownStatus` and `ShutdownShardMigrationStatus` (and related action) response {es-pull}99798[#99798] (issue: {es-issue}99678[#99678]) + +Infra/REST API:: +* Add `IndexVersion` to node info {es-pull}99515[#99515] +* Add component info versions to node info in a pluggable way {es-pull}99631[#99631] +* Return a 410 (Gone) status code for unavailable API endpoints {es-pull}97397[#97397] + +Machine Learning:: +* Add new _inference API {es-pull}99224[#99224] +* Adding an option for trained models to be platform specific {es-pull}99584[#99584] +* Log warnings for jobs unassigned for a long time {es-pull}100154[#100154] +* Simplify the Inference Ingest Processor configuration {es-pull}100205[#100205] + +Mapping:: +* Automatically flatten objects when subobjects:false {es-pull}97972[#97972] (issue: {es-issue}88934[#88934]) +* Explicit parsing object capabilities of `FieldMappers` {es-pull}98684[#98684] (issue: {es-issue}98537[#98537]) +* Reintroduce `sparse_vector` mapping {es-pull}98996[#98996] + +Network:: +* Chunk the cluster allocation explain response {es-pull}99641[#99641] (issue: {es-issue}97803[#97803]) + +Recovery:: +* Wait for cluster state in recovery {es-pull}99193[#99193] + +Search:: +* Add additional counters to `_clusters` response for all Cluster search states {es-pull}99566[#99566] (issue: {es-issue}98927[#98927]) +* Adding support for exist queries to `sparse_vector` fields {es-pull}99775[#99775] (issue: {es-issue}99319[#99319]) +* Make `_index` optional for pinned query docs {es-pull}97450[#97450] +* Reduce copying when creating scroll/PIT ids {es-pull}99219[#99219] +* Refactor `SearchResponseClusters` to use CHM {es-pull}100129[#100129] (issue: {es-issue}99101[#99101]) +* Support cluster/details for CCS minimize_roundtrips=false {es-pull}98457[#98457] + +Security:: +* Support rotatating the JWT shared secret {es-pull}99278[#99278] + +Snapshot/Restore:: +* Remove shard data files when they fail to write for snapshot {es-pull}99694[#99694] + +Stats:: +* Prune unnecessary information from TransportNodesInfoAction.NodeInfoRequest {es-pull}99938[#99938] (issue: {es-issue}99744[#99744]) + +TSDB:: +* Add `index.look_back_time` setting for tsdb data streams {es-pull}98518[#98518] (issue: {es-issue}98463[#98463]) +* Improve time-series error and documentation {es-pull}100018[#100018] +* Trim stored fields for `_id` field in tsdb {es-pull}97409[#97409] + +Transform:: +* Add accessors required to recreate `TransformStats` object from the fields {es-pull}98844[#98844] + +Vector Search:: +* Add new max_inner_product vector similarity function {es-pull}99527[#99527] +* Adds `nested` support for indexed `dense_vector` fields {es-pull}99763[#99763] +* Dense vector field types are indexed by default {es-pull}98268[#98268] +* Increase the max vector dims to 4096 {es-pull}99682[#99682] + +[[feature-8.11.0]] +[float] +=== New features + +Analysis:: +* Add support for Persian language stemmer {es-pull}99106[#99106] (issue: {es-issue}98911[#98911]) + +Application:: +* Automatically map float arrays of lengths 128 - 2048 as dense_vector {es-pull}98512[#98512] (issue: {es-issue}97532[#97532]) + +Data streams:: +* GA the data stream lifecycle {es-pull}100187[#100187] +* GET `_data_stream` displays both ILM and DSL information {es-pull}99947[#99947] + +ES|QL:: +* Integrate Elasticsearch Query Language, ES|QL {es-pull}98309[#98309] +* LEAST and GREATEST functions {es-pull}98630[#98630] +* LEFT function {es-pull}98942[#98942] +* LTRIM, RTRIM and fix unicode whitespace {es-pull}98590[#98590] +* RIGHT function {es-pull}98974[#98974] +* TopN sorting with min and max for multi-value fields {es-pull}98337[#98337] + +[[upgrade-8.11.0]] +[float] +=== Upgrades + +Packaging:: +* Update bundled JDK to 21.0.1 {es-pull}101133[#101133] + +Search:: +* Upgrade main to Lucene 9.8.0 {es-pull}100138[#100138] + diff --git a/docs/reference/release-notes/8.7.1.asciidoc b/docs/reference/release-notes/8.7.1.asciidoc index a0513bc1a8f0e..70f5e4add88ca 100644 --- a/docs/reference/release-notes/8.7.1.asciidoc +++ b/docs/reference/release-notes/8.7.1.asciidoc @@ -18,6 +18,23 @@ This issue is fixed in 8.8.0. include::8.6.0.asciidoc[tag=reconciliation-imbalance-known-issue] +// tag::no-preventive-gc-issue[] +* High Memory Pressure due to a GC JVM setting change ++ +This version of Elasticsearch is bundled with JDK 20. In JDK 20 +https://bugs.openjdk.org/browse/JDK-8293861[Preventive GC is disabled by default]. +This may lead to increased memory pressure and an increased number of CircuitBreakerExceptions when retrieving large +documents under some load patterns. (issue: {es-issue}99592[#99592]) ++ +If this change affects your use of Elasticsearch, consider re-enabling the previous behaviour +by adding the JVM arguments `-XX:+UnlockDiagnosticVMOptions -XX:+G1UsePreventiveGC` (reference: +https://www.oracle.com/java/technologies/javase/20-relnote-issues.html#JDK-8293861[JDK 20 release notes]). It is +important to note that this workaround is temporary and works only with JDK 20, which is bundled with Elasticsearch up +to version 8.10.2 inclusive. Successive versions are bundling JDK 21+, where this setting +https://bugs.openjdk.org/browse/JDK-8297639[has been removed]. Specifying those JVM arguments will prevent the +JVM (and therefore Elasticsearch Nodes) from starting. +// end::no-preventive-gc-issue[] + [[bug-8.7.1]] [float] === Bug fixes diff --git a/docs/reference/release-notes/8.8.2.asciidoc b/docs/reference/release-notes/8.8.2.asciidoc index d7e6b9b1fcc76..8a24ae2e8d4ef 100644 --- a/docs/reference/release-notes/8.8.2.asciidoc +++ b/docs/reference/release-notes/8.8.2.asciidoc @@ -3,6 +3,11 @@ Also see <>. +[[known-issues-8.8.2]] +[float] +=== Known issues +include::8.7.1.asciidoc[tag=no-preventive-gc-issue] + [[bug-8.8.2]] [float] === Bug fixes diff --git a/docs/reference/release-notes/8.9.0.asciidoc b/docs/reference/release-notes/8.9.0.asciidoc index 2b7b143c268dc..c49eac9f0327c 100644 --- a/docs/reference/release-notes/8.9.0.asciidoc +++ b/docs/reference/release-notes/8.9.0.asciidoc @@ -12,6 +12,8 @@ task is longer than the model's max_sequence_length and truncate is set to none then inference fails with the message `question answering result has invalid dimension`. (issue: {es-issue}97917[#97917]) +include::8.7.1.asciidoc[tag=no-preventive-gc-issue] + [[breaking-8.9.0]] [float] === Breaking changes diff --git a/docs/reference/release-notes/8.9.1.asciidoc b/docs/reference/release-notes/8.9.1.asciidoc index 18c226538c4b9..680860622c1bb 100644 --- a/docs/reference/release-notes/8.9.1.asciidoc +++ b/docs/reference/release-notes/8.9.1.asciidoc @@ -3,6 +3,11 @@ Also see <>. +[[known-issues-8.9.1]] +[float] +=== Known issues +include::8.7.1.asciidoc[tag=no-preventive-gc-issue] + [[bug-8.9.1]] [float] === Bug fixes diff --git a/docs/reference/release-notes/8.9.2.asciidoc b/docs/reference/release-notes/8.9.2.asciidoc index 6b00405261daf..8464d21e1ccc4 100644 --- a/docs/reference/release-notes/8.9.2.asciidoc +++ b/docs/reference/release-notes/8.9.2.asciidoc @@ -3,6 +3,11 @@ Also see <>. +[[known-issues-8.9.2]] +[float] +=== Known issues +include::8.7.1.asciidoc[tag=no-preventive-gc-issue] + [float] [[security-updates-8.9.2]] === Security updates diff --git a/docs/reference/rest-api/common-parms.asciidoc b/docs/reference/rest-api/common-parms.asciidoc index 41fd3eefc31f2..55f277218d210 100644 --- a/docs/reference/rest-api/common-parms.asciidoc +++ b/docs/reference/rest-api/common-parms.asciidoc @@ -438,6 +438,8 @@ tag::ignore_throttled[] `ignore_throttled`:: (Optional, Boolean) If `true`, concrete, expanded or aliased indices are ignored when frozen. Defaults to `true`. ++ +deprecated:[7.16.0] end::ignore_throttled[] tag::index-ignore-unavailable[] diff --git a/docs/reference/rest-api/usage.asciidoc b/docs/reference/rest-api/usage.asciidoc index c33d203f1415b..959a798378fc6 100644 --- a/docs/reference/rest-api/usage.asciidoc +++ b/docs/reference/rest-api/usage.asciidoc @@ -183,6 +183,7 @@ GET /_xpack/usage "avg": 0.0, "max": 0.0 }, + "stats_by_model": [], "model_sizes_bytes": { "total": 0.0, "min": 0.0, diff --git a/docs/reference/search/multi-search.asciidoc b/docs/reference/search/multi-search.asciidoc index e8d29e00ba486..90056d5036558 100644 --- a/docs/reference/search/multi-search.asciidoc +++ b/docs/reference/search/multi-search.asciidoc @@ -84,10 +84,7 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] + Defaults to `open`. -`ignore_throttled`:: -(Optional, Boolean) -If `true`, concrete, expanded or aliased indices are ignored when frozen. -Defaults to `true`. +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=ignore_throttled] include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] diff --git a/docs/reference/search/search-template-api.asciidoc b/docs/reference/search/search-template-api.asciidoc index 55142b953a194..539048a324746 100644 --- a/docs/reference/search/search-template-api.asciidoc +++ b/docs/reference/search/search-template-api.asciidoc @@ -92,9 +92,7 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] (Optional, Boolean) If `true`, the response includes additional details about score computation as part of a hit. Defaults to `false`. -`ignore_throttled`:: -(Optional, Boolean) If `true`, specified concrete, expanded, or aliased indices -are not included in the response when throttled. Defaults to `true`. +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=ignore_throttled] include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] diff --git a/docs/reference/search/search-your-data/ccs-version-compat-matrix.asciidoc b/docs/reference/search/search-your-data/ccs-version-compat-matrix.asciidoc index cae5627d65b54..8c289c27a2d31 100644 --- a/docs/reference/search/search-your-data/ccs-version-compat-matrix.asciidoc +++ b/docs/reference/search/search-your-data/ccs-version-compat-matrix.asciidoc @@ -1,20 +1,21 @@ -[cols="^,^,^,^,^,^,^,^,^,^,^,^,^,^,^"] +[cols="^,^,^,^,^,^,^,^,^,^,^,^,^,^,^,^"] |==== -| 14+^h| Remote cluster version +| 15+^h| Remote cluster version h| Local cluster version - | 6.8 | 7.1–7.16 | 7.17 | 8.0 | 8.1 | 8.2 | 8.3 | 8.4 | 8.5 |8.6 |8.7 |8.8 |8.9 |8.10 -| 6.8 | {yes-icon} | {yes-icon} | {yes-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} -| 7.1–7.16 | {yes-icon} | {yes-icon} | {yes-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} -| 7.17 | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.0 | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.1 | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.2 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.3 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon}|{yes-icon} | {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.4 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} |{yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.5 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} |{yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.6 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.7 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.8 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.9 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.10 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} + | 6.8 | 7.1–7.16 | 7.17 | 8.0 | 8.1 | 8.2 | 8.3 | 8.4 | 8.5 |8.6 |8.7 |8.8 |8.9 |8.10 |8.11 +| 6.8 | {yes-icon} | {yes-icon} | {yes-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} +| 7.1–7.16 | {yes-icon} | {yes-icon} | {yes-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} +| 7.17 | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.0 | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.1 | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.2 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.3 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon}|{yes-icon} | {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.4 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} |{yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.5 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} |{yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.6 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.7 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.8 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.9 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.10 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.11 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} |==== \ No newline at end of file diff --git a/docs/reference/search/search-your-data/knn-search.asciidoc b/docs/reference/search/search-your-data/knn-search.asciidoc index 4bf1ceabe08d8..c39719f1a3b61 100644 --- a/docs/reference/search/search-your-data/knn-search.asciidoc +++ b/docs/reference/search/search-your-data/knn-search.asciidoc @@ -76,12 +76,10 @@ to search one or more `dense_vector` fields with indexing enabled. requires the following mapping options: + -- -* An `index` value of `true`. - * A `similarity` value. This value determines the similarity metric used to score documents based on similarity between the query and document vector. For a list of available metrics, see the <> -parameter documentation. +parameter documentation. The `similarity` setting defaults to `cosine`. [source,console] ---- @@ -92,13 +90,11 @@ PUT image-index "image-vector": { "type": "dense_vector", "dims": 3, - "index": true, "similarity": "l2_norm" }, "title-vector": { "type": "dense_vector", "dims": 5, - "index": true, "similarity": "l2_norm" }, "title": { @@ -158,7 +154,7 @@ NOTE: Support for approximate kNN search was added in version 8.0. Before this, `dense_vector` fields did not support enabling `index` in the mapping. If you created an index prior to 8.0 containing `dense_vector` fields, then to support approximate kNN search the data must be reindexed using a new field -mapping that sets `index: true`. +mapping that sets `index: true` which is the default option. [discrete] [[tune-approximate-knn-for-speed-accuracy]] @@ -199,9 +195,7 @@ PUT byte-image-index "byte-image-vector": { "type": "dense_vector", "element_type": "byte", - "dims": 2, - "index": true, - "similarity": "cosine" + "dims": 2 }, "title": { "type": "text" @@ -516,9 +510,7 @@ PUT passage_vectors "properties": { "vector": { "type": "dense_vector", - "dims": 2, - "index": true, - "similarity": "cosine" + "dims": 2 }, "text": { "type": "text", @@ -877,7 +869,6 @@ PUT image-index "image-vector": { "type": "dense_vector", "dims": 3, - "index": true, "similarity": "l2_norm", "index_options": { "type": "hnsw", @@ -912,8 +903,8 @@ the global top `k` matches across shards. You cannot set the To run an exact kNN search, use a `script_score` query with a vector function. . Explicitly map one or more `dense_vector` fields. If you don't intend to use -the field for approximate kNN, omit the `index` mapping option or set it to -`false`. This can significantly improve indexing speed. +the field for approximate kNN, set the `index` mapping option to `false`. This +can significantly improve indexing speed. + [source,console] ---- diff --git a/docs/reference/search/search.asciidoc b/docs/reference/search/search.asciidoc index f953ce03ab1eb..68d286b3f267b 100644 --- a/docs/reference/search/search.asciidoc +++ b/docs/reference/search/search.asciidoc @@ -109,9 +109,7 @@ By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the <> parameter. -`ignore_throttled`:: -(Optional, Boolean) If `true`, concrete, expanded or aliased indices will be -ignored when frozen. Defaults to `true`. +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=ignore_throttled] include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] diff --git a/docs/reference/snapshot-restore/apis/repo-analysis-api.asciidoc b/docs/reference/snapshot-restore/apis/repo-analysis-api.asciidoc index 8269ba376f878..2b2090405af60 100644 --- a/docs/reference/snapshot-restore/apis/repo-analysis-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/repo-analysis-api.asciidoc @@ -59,12 +59,13 @@ the impact of running an analysis inadvertently and to provide a sensible starting point for your investigations. Run your first analysis with the default parameter values to check for simple problems. If successful, run a sequence of increasingly large analyses until you encounter a failure or you reach a -`blob_count` of at least `2000`, a `max_blob_size` of at least `2gb`, and a -`max_total_data_size` of at least `1tb`. Always specify a generous timeout, -possibly `1h` or longer, to allow time for each analysis to run to completion. -Perform the analyses using a multi-node cluster of a similar size to your -production cluster so that it can detect any problems that only arise when the -repository is accessed by many nodes at once. +`blob_count` of at least `2000`, a `max_blob_size` of at least `2gb`, a +`max_total_data_size` of at least `1tb`, and a `register_operation_count` of at +least `100`. Always specify a generous timeout, possibly `1h` or longer, to +allow time for each analysis to run to completion. Perform the analyses using a +multi-node cluster of a similar size to your production cluster so that it can +detect any problems that only arise when the repository is accessed by many +nodes at once. If the analysis fails then {es} detected that your repository behaved unexpectedly. This usually means you are using a third-party storage system @@ -141,8 +142,10 @@ between versions. The request parameters and response format depend on details of the implementation so may also be different in newer versions. The analysis comprises a number of blob-level tasks, as set by the `blob_count` -parameter. The blob-level tasks are distributed over the data and -master-eligible nodes in the cluster for execution. +parameter, and a number of compare-and-exchange operations on linearizable +registers, as set by the `register_operation_count` parameter. These tasks are +distributed over the data and master-eligible nodes in the cluster for +execution. For most blob-level tasks, the executing node first writes a blob to the repository, and then instructs some of the other nodes in the cluster to @@ -175,6 +178,20 @@ complete. In this case it still instructs some of the other nodes in the cluster to attempt to read the blob, but all of these reads must fail to find the blob. +Linearizable registers are special blobs that {es} manipulates using an atomic +compare-and-exchange operation. This operation ensures correct and +strongly-consistent behavior even when the blob is accessed by multiple nodes +at the same time. The detailed implementation of the compare-and-exchange +operation on linearizable registers varies by repository type. Repository +analysis verifies that that uncontended compare-and-exchange operations on a +linearizable register blob always succeed. Repository analysis also verifies +that contended operations either succeed or report the contention but do not +return incorrect results. If an operation fails due to contention, {es} retries +the operation until it succeeds. Most of the compare-and-exchange operations +performed by repository analysis atomically increment a counter which is +represented as an 8-byte blob. Some operations also verify the behavior on +small blobs with sizes other than 8 bytes. + [[repo-analysis-api-path-params]] ==== {api-path-parms-title} @@ -200,6 +217,11 @@ this to at least `2gb`. the blobs written during the test. Defaults to `1gb`. For realistic experiments you should set this to at least `1tb`. +`register_operation_count`:: +(Optional, integer) The minimum number of linearizable register operations to +perform in total. Defaults to `10`. For realistic experiments you should set +this to at least `100`. + `timeout`:: (Optional, <>) Specifies the period of time to wait for the test to complete. If no response is received before the timeout expires, diff --git a/docs/reference/snapshot-restore/repository-azure.asciidoc b/docs/reference/snapshot-restore/repository-azure.asciidoc index e848ec9620cb4..35cf454906050 100644 --- a/docs/reference/snapshot-restore/repository-azure.asciidoc +++ b/docs/reference/snapshot-restore/repository-azure.asciidoc @@ -257,3 +257,15 @@ following naming rules: permitted in container names. * All letters in a container name must be lowercase. * Container names must be from 3 through 63 characters long. + +[[repository-azure-linearizable-registers]] +==== Linearizable register implementation + +The linearizable register implementation for Azure repositories is based on +Azure's support for strongly consistent leases. Each lease may only be held by +a single node at any time. The node presents its lease when performing a read +or write operation on a protected blob. Lease-protected operations fail if the +lease is invalid or expired. To perform a compare-and-exchange operation on a +register, {es} first obtains a lease on the blob, then reads the blob contents +under the lease, and finally uploads the updated blob under the same lease. +This process ensures that the read and write operations happen atomically. diff --git a/docs/reference/snapshot-restore/repository-gcs.asciidoc b/docs/reference/snapshot-restore/repository-gcs.asciidoc index d99b9bc81567f..b359952715a73 100644 --- a/docs/reference/snapshot-restore/repository-gcs.asciidoc +++ b/docs/reference/snapshot-restore/repository-gcs.asciidoc @@ -275,3 +275,13 @@ The service account used to access the bucket must have the "Writer" access to t 3. Go to the https://console.cloud.google.com/storage/browser[Storage Browser]. 4. Select the bucket and "Edit bucket permission". 5. The service account must be configured as a "User" with "Writer" access. + +[[repository-gcs-linearizable-registers]] +==== Linearizable register implementation + +The linearizable register implementation for GCS repositories is based on GCS's +support for strongly consistent preconditions on put-blob operations. To +perform a compare-and-exchange operation on a register, {es} retrieves the +register blob and its current generation, and then uploads the updated blob +using the observed generation as its precondition. The precondition ensures +that the generation has not changed in the meantime. diff --git a/docs/reference/snapshot-restore/repository-s3.asciidoc b/docs/reference/snapshot-restore/repository-s3.asciidoc index 70993f5b515b3..3f2210f51cbb5 100644 --- a/docs/reference/snapshot-restore/repository-s3.asciidoc +++ b/docs/reference/snapshot-restore/repository-s3.asciidoc @@ -12,7 +12,7 @@ https://www.elastic.co/cloud/.* To register an S3 repository, specify the type as `s3` when creating the repository. The repository defaults to using https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html[ECS -IAM Role] credentials for authentication. You can also use <> Kubernetes service accounts. +IAM Role] credentials for authentication. You can also use <> for authentication. The only mandatory setting is the bucket name: @@ -198,75 +198,6 @@ pattern then you should set this setting to `true` when upgrading. https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/ClientConfiguration.html#setSignerOverride-java.lang.String-[AWS Java SDK documentation] for details. Defaults to empty string which means that no signing algorithm override will be used. -[discrete] -[[repository-s3-compatible-services]] -===== S3-compatible services - -There are a number of storage systems that provide an S3-compatible API, and -the `repository-s3` type allows you to use these systems in place of AWS S3. -To do so, you should set the `s3.client.CLIENT_NAME.endpoint` setting to the -system's endpoint. This setting accepts IP addresses and hostnames and may -include a port. For example, the endpoint may be `172.17.0.2` or -`172.17.0.2:9000`. - -By default {es} communicates with your storage system using HTTPS, and -validates the repository's certificate chain using the JVM-wide truststore. -Ensure that the JVM-wide truststore includes an entry for your repository. If -you wish to use unsecured HTTP communication instead of HTTPS, set -`s3.client.CLIENT_NAME.protocol` to `http`. - -https://minio.io[MinIO] is an example of a storage system that provides an -S3-compatible API. The `repository-s3` type allows {es} to work with -MinIO-backed repositories as well as repositories stored on AWS S3. Other -S3-compatible storage systems may also work with {es}, but these are not -covered by the {es} test suite. - -Note that some storage systems claim to be S3-compatible but do not faithfully -emulate S3's behaviour in full. The `repository-s3` type requires full -compatibility with S3. In particular it must support the same set of API -endpoints, return the same errors in case of failures, and offer consistency and -performance at least as good as S3 even when accessed concurrently by multiple -nodes. You will need to work with the supplier of your storage system to address -any incompatibilities you encounter. Please do not report {es} issues involving -storage systems which claim to be S3-compatible unless you can demonstrate that -the same issue exists when using a genuine AWS S3 repository. - -You can perform some basic checks of the suitability of your storage system -using the {ref}/repo-analysis-api.html[repository analysis API]. If this API -does not complete successfully, or indicates poor performance, then your -storage system is not fully compatible with AWS S3 and therefore unsuitable for -use as a snapshot repository. However, these checks do not guarantee full -compatibility. - -Most storage systems can be configured to log the details of their interaction -with {es}. If you are investigating a suspected incompatibility with AWS S3, it -is usually simplest to collect these logs and provide them to the supplier of -your storage system for further analysis. If the incompatibility is not clear -from the logs emitted by the storage system, configure {es} to log every -request it makes to the S3 API by <> of the `com.amazonaws.request` logger to `DEBUG`: - -[source,console] ----- -PUT /_cluster/settings -{ - "persistent": { - "logger.com.amazonaws.request": "DEBUG" - } -} ----- -// TEST[skip:we don't really want to change this logger] - -Collect the Elasticsearch logs covering the time period of the failed analysis -from all nodes in your cluster and share them with the supplier of your storage -system along with the analysis response so they can use them to determine the -problem. See the -https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/java-dg-logging.html[AWS Java SDK] -documentation for further information, including details about other loggers -that can be used to obtain even more verbose logs. When you have finished -collecting the logs needed by your supplier, set the logger settings back to -`null` to return to the default logging configuration. See <> -and <> for more information. [[repository-s3-repository]] ==== Repository settings @@ -401,7 +332,7 @@ This sets up a repository that uses all client settings from the client `my.s3.endpoint` by the repository settings. [[repository-s3-permissions]] -===== Recommended S3 permissions +==== Recommended S3 permissions In order to restrict the Elasticsearch snapshot process to the minimum required resources, we recommend using Amazon IAM in conjunction with pre-existing S3 @@ -493,7 +424,28 @@ bucket, in this example, named "foo". The bucket needs to exist to register a repository for snapshots. If you did not create the bucket then the repository registration will fail. -===== Cleaning up multi-part uploads +[[iam-kubernetes-service-accounts]] +[discrete] +===== Using IAM roles for Kubernetes service accounts for authentication + +If you want to use https://aws.amazon.com/blogs/opensource/introducing-fine-grained-iam-roles-service-accounts/[Kubernetes service accounts] +for authentication, you need to add a symlink to the `$AWS_WEB_IDENTITY_TOKEN_FILE` environment variable +(which should be automatically set by a Kubernetes pod) in the S3 repository config directory, so the repository +can have the read access for the service account (a repository can't read any files outside its config directory). +For example: + +[source,bash] +---- +mkdir -p "${ES_PATH_CONF}/repository-s3" +ln -s $AWS_WEB_IDENTITY_TOKEN_FILE "${ES_PATH_CONF}/repository-s3/aws-web-identity-token-file" +---- + +IMPORTANT: The symlink must be created on all data and master eligible nodes and be readable +by the `elasticsearch` user. By default, {es} runs as user `elasticsearch` using uid:gid `1000:0`. + +If the symlink exists, it will be used by default by all S3 repositories that don't have explicit `client` credentials. + +==== Cleaning up multi-part uploads {es} uses S3's multi-part upload process to upload larger blobs to the repository. The multi-part upload process works by dividing each blob into @@ -521,7 +473,6 @@ a bucket lifecycle policy] to automatically abort incomplete uploads once they reach a certain age. [[repository-s3-aws-vpc]] -[discrete] ==== AWS VPC bandwidth settings AWS instances resolve S3 endpoints to a public IP. If the Elasticsearch @@ -537,23 +488,81 @@ bandwidth of your VPC's NAT instance. Instances residing in a public subnet in an AWS VPC will connect to S3 via the VPC's internet gateway and not be bandwidth limited by the VPC's NAT instance. +[[repository-s3-compatible-services]] +==== S3-compatible services -[[iam-kubernetes-service-accounts]] -[discrete] -==== Using IAM roles for Kubernetes service accounts for authentication -If you want to use https://aws.amazon.com/blogs/opensource/introducing-fine-grained-iam-roles-service-accounts/[Kubernetes service accounts] -for authentication, you need to add a symlink to the `$AWS_WEB_IDENTITY_TOKEN_FILE` environment variable -(which should be automatically set by a Kubernetes pod) in the S3 repository config directory, so the repository -can have the read access for the service account (a repository can't read any files outside its config directory). -For example: +There are a number of storage systems that provide an S3-compatible API, and +the `repository-s3` type allows you to use these systems in place of AWS S3. +To do so, you should set the `s3.client.CLIENT_NAME.endpoint` setting to the +system's endpoint. This setting accepts IP addresses and hostnames and may +include a port. For example, the endpoint may be `172.17.0.2` or +`172.17.0.2:9000`. -[source,bash] +By default {es} communicates with your storage system using HTTPS, and +validates the repository's certificate chain using the JVM-wide truststore. +Ensure that the JVM-wide truststore includes an entry for your repository. If +you wish to use unsecured HTTP communication instead of HTTPS, set +`s3.client.CLIENT_NAME.protocol` to `http`. + +https://minio.io[MinIO] is an example of a storage system that provides an +S3-compatible API. The `repository-s3` type allows {es} to work with +MinIO-backed repositories as well as repositories stored on AWS S3. Other +S3-compatible storage systems may also work with {es}, but these are not +covered by the {es} test suite. + +Note that some storage systems claim to be S3-compatible but do not faithfully +emulate S3's behaviour in full. The `repository-s3` type requires full +compatibility with S3. In particular it must support the same set of API +endpoints, return the same errors in case of failures, and offer consistency and +performance at least as good as S3 even when accessed concurrently by multiple +nodes. You will need to work with the supplier of your storage system to address +any incompatibilities you encounter. Please do not report {es} issues involving +storage systems which claim to be S3-compatible unless you can demonstrate that +the same issue exists when using a genuine AWS S3 repository. + +You can perform some basic checks of the suitability of your storage system +using the {ref}/repo-analysis-api.html[repository analysis API]. If this API +does not complete successfully, or indicates poor performance, then your +storage system is not fully compatible with AWS S3 and therefore unsuitable for +use as a snapshot repository. However, these checks do not guarantee full +compatibility. + +Most storage systems can be configured to log the details of their interaction +with {es}. If you are investigating a suspected incompatibility with AWS S3, it +is usually simplest to collect these logs and provide them to the supplier of +your storage system for further analysis. If the incompatibility is not clear +from the logs emitted by the storage system, configure {es} to log every +request it makes to the S3 API by <> of the `com.amazonaws.request` logger to `DEBUG`: + +[source,console] ---- -mkdir -p "${ES_PATH_CONF}/repository-s3" -ln -s $AWS_WEB_IDENTITY_TOKEN_FILE "${ES_PATH_CONF}/repository-s3/aws-web-identity-token-file" +PUT /_cluster/settings +{ + "persistent": { + "logger.com.amazonaws.request": "DEBUG" + } +} ---- +// TEST[skip:we don't really want to change this logger] -IMPORTANT: The symlink must be created on all data and master eligible nodes and be readable -by the `elasticsearch` user. By default, {es} runs as user `elasticsearch` using uid:gid `1000:0`. +Collect the Elasticsearch logs covering the time period of the failed analysis +from all nodes in your cluster and share them with the supplier of your storage +system along with the analysis response so they can use them to determine the +problem. See the +https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/java-dg-logging.html[AWS Java SDK] +documentation for further information, including details about other loggers +that can be used to obtain even more verbose logs. When you have finished +collecting the logs needed by your supplier, set the logger settings back to +`null` to return to the default logging configuration. See <> +and <> for more information. -If the symlink exists, it will be used by default by all S3 repositories that don't have explicit `client` credentials. +[[repository-s3-linearizable-registers]] +==== Linearizable register implementation + +The linearizable register implementation for S3 repositories is based on the +strongly consistent semantics of the multipart upload API. {es} first creates a +multipart upload to indicate its intention to perform a linearizable register +operation. {es} then lists and cancels all other multipart uploads for the same +register. {es} then attempts to complete the upload. If the upload completes +successfully then the compare-and-exchange operation was atomic. diff --git a/docs/reference/snapshot-restore/repository-shared-file-system.asciidoc b/docs/reference/snapshot-restore/repository-shared-file-system.asciidoc index 0bd64d43f1381..6be49d9d4422f 100644 --- a/docs/reference/snapshot-restore/repository-shared-file-system.asciidoc +++ b/docs/reference/snapshot-restore/repository-shared-file-system.asciidoc @@ -84,3 +84,12 @@ each node, but for these accounts to have different numeric user or group IDs. If your shared file system uses NFS then ensure that every node is running with the same numeric UID and GID, or else update your NFS configuration to account for the variance in numeric IDs across nodes. + +[[repository-fs-linearizable-registers]] +==== Linearizable register implementation + +The linearizable register implementation for shared filesystem repositories is +based around file locking. To perform a compare-and-exchange operation on a +register, {es} first locks he underlying file and then writes the updated +contents under the same lock. This ensures that the file has not changed in the +meantime. diff --git a/docs/reference/tab-widgets/esql/esql-getting-started.asciidoc b/docs/reference/tab-widgets/esql/esql-getting-started-discover-console.asciidoc similarity index 89% rename from docs/reference/tab-widgets/esql/esql-getting-started.asciidoc rename to docs/reference/tab-widgets/esql/esql-getting-started-discover-console.asciidoc index 0ebcb7c92e59f..b8998ef199c99 100644 --- a/docs/reference/tab-widgets/esql/esql-getting-started.asciidoc +++ b/docs/reference/tab-widgets/esql/esql-getting-started-discover-console.asciidoc @@ -34,6 +34,9 @@ FROM sample_data include::../../esql/esql-kibana.asciidoc[tag=esql-mode] +Adjust the time filter so it includes the timestamps in the sample data (October +23rd, 2023). + After switching to {esql} mode, the query bar shows a sample query. You can replace this query with the queries in this getting started guide. diff --git a/docs/reference/tab-widgets/esql/esql-getting-started-enrich-policy.asciidoc b/docs/reference/tab-widgets/esql/esql-getting-started-enrich-policy.asciidoc new file mode 100644 index 0000000000000..39560c7500b42 --- /dev/null +++ b/docs/reference/tab-widgets/esql/esql-getting-started-enrich-policy.asciidoc @@ -0,0 +1,65 @@ +// tag::own-deployment[] + +The following requests create and execute a policy called `clientip_policy`. The +policy links an IP address to an environment ("Development", "QA", or +"Production"): + +[source,console] +---- +PUT clientips +{ + "mappings": { + "properties": { + "client.ip": { + "type": "keyword" + }, + "env": { + "type": "keyword" + } + } + } +} + +PUT clientips/_bulk +{ "index" : {}} +{ "client.ip": "172.21.0.5", "env": "Development" } +{ "index" : {}} +{ "client.ip": "172.21.2.113", "env": "QA" } +{ "index" : {}} +{ "client.ip": "172.21.2.162", "env": "QA" } +{ "index" : {}} +{ "client.ip": "172.21.3.15", "env": "Production" } +{ "index" : {}} +{ "client.ip": "172.21.3.16", "env": "Production" } + +PUT /_enrich/policy/clientip_policy +{ + "match": { + "indices": "clientips", + "match_field": "client.ip", + "enrich_fields": ["env"] + } +} + +PUT /_enrich/policy/clientip_policy/_execute +---- + +//// +[source,console] +---- +DELETE /_enrich/policy/clientip_policy +---- +// TEST[continued] +//// + +// end::own-deployment[] + + +// tag::demo-env[] + +On the demo environment at https://esql.demo.elastic.co/[esql.demo.elastic.co], +an enrich policy called `clientip_policy` has already been created an executed. +The policy links an IP address to an environment ("Development", "QA", or +"Production") + +// end::demo-env[] diff --git a/docs/reference/tab-widgets/esql/esql-getting-started-sample-data.asciidoc b/docs/reference/tab-widgets/esql/esql-getting-started-sample-data.asciidoc new file mode 100644 index 0000000000000..434954d8d400a --- /dev/null +++ b/docs/reference/tab-widgets/esql/esql-getting-started-sample-data.asciidoc @@ -0,0 +1,48 @@ +// tag::own-deployment[] + +First ingest some sample data. In {kib}, open the main menu and select *Dev +Tools*. Run the the following two requests: + +[source,console] +---- +PUT sample_data +{ + "mappings": { + "properties": { + "client.ip": { + "type": "ip" + }, + "message": { + "type": "keyword" + } + } + } +} + +PUT sample_data/_bulk +{"index": {}} +{"@timestamp": "2023-10-23T12:15:03.360Z", "client.ip": "172.21.2.162", "message": "Connected to 10.1.0.3", "event.duration": 3450233} +{"index": {}} +{"@timestamp": "2023-10-23T12:27:28.948Z", "client.ip": "172.21.2.113", "message": "Connected to 10.1.0.2", "event.duration": 2764889} +{"index": {}} +{"@timestamp": "2023-10-23T13:33:34.937Z", "client.ip": "172.21.0.5", "message": "Disconnected", "event.duration": 1232382} +{"index": {}} +{"@timestamp": "2023-10-23T13:51:54.732Z", "client.ip": "172.21.3.15", "message": "Connection error", "event.duration": 725448} +{"index": {}} +{"@timestamp": "2023-10-23T13:52:55.015Z", "client.ip": "172.21.3.15", "message": "Connection error", "event.duration": 8268153} +{"index": {}} +{"@timestamp": "2023-10-23T13:53:55.832Z", "client.ip": "172.21.3.15", "message": "Connection error", "event.duration": 5033755} +{"index": {}} +{"@timestamp": "2023-10-23T13:55:01.543Z", "client.ip": "172.21.3.15", "message": "Connected to 10.1.0.1", "event.duration": 1756467} +---- + +// end::own-deployment[] + + +// tag::demo-env[] + +The data set used in this guide has been preloaded into the Elastic {esql} +public demo environment. Visit +https://esql.demo.elastic.co/[esql.demo.elastic.co] to start using it. + +// end::demo-env[] diff --git a/docs/reference/tab-widgets/esql/esql-getting-started-widget.asciidoc b/docs/reference/tab-widgets/esql/esql-getting-started-widget-discover-console.asciidoc similarity index 72% rename from docs/reference/tab-widgets/esql/esql-getting-started-widget.asciidoc rename to docs/reference/tab-widgets/esql/esql-getting-started-widget-discover-console.asciidoc index 49dc573f3b0bb..dff80e25812c3 100644 --- a/docs/reference/tab-widgets/esql/esql-getting-started-widget.asciidoc +++ b/docs/reference/tab-widgets/esql/esql-getting-started-widget-discover-console.asciidoc @@ -1,6 +1,6 @@ ++++ -
    -
    +
    +
    @@ -31,7 +31,7 @@ include::esql-getting-started.asciidoc[tag=console] hidden=""> ++++ -include::esql-getting-started.asciidoc[tag=discover] +include::esql-getting-started-discover-console.asciidoc[tag=discover] ++++
    diff --git a/docs/reference/tab-widgets/esql/esql-getting-started-widget-enrich-policy.asciidoc b/docs/reference/tab-widgets/esql/esql-getting-started-widget-enrich-policy.asciidoc new file mode 100644 index 0000000000000..cafefeb2652e4 --- /dev/null +++ b/docs/reference/tab-widgets/esql/esql-getting-started-widget-enrich-policy.asciidoc @@ -0,0 +1,39 @@ +++++ +
    +
    + + +
    +
    +++++ + +include::esql-getting-started-enrich-policy.asciidoc[tag=own-deployment] + +++++ +
    + +
    +++++ \ No newline at end of file diff --git a/docs/reference/tab-widgets/esql/esql-getting-started-widget-sample-data.asciidoc b/docs/reference/tab-widgets/esql/esql-getting-started-widget-sample-data.asciidoc new file mode 100644 index 0000000000000..4a33cf3f08866 --- /dev/null +++ b/docs/reference/tab-widgets/esql/esql-getting-started-widget-sample-data.asciidoc @@ -0,0 +1,39 @@ +++++ +
    +
    + + +
    +
    +++++ + +include::esql-getting-started-sample-data.asciidoc[tag=own-deployment] + +++++ +
    + +
    +++++ \ No newline at end of file diff --git a/docs/reference/tab-widgets/semantic-search/field-mappings.asciidoc b/docs/reference/tab-widgets/semantic-search/field-mappings.asciidoc index 2fe2f9cea83f9..b702a1fc8f426 100644 --- a/docs/reference/tab-widgets/semantic-search/field-mappings.asciidoc +++ b/docs/reference/tab-widgets/semantic-search/field-mappings.asciidoc @@ -63,9 +63,7 @@ PUT my-index "properties": { "my_embeddings.predicted_value": { <1> "type": "dense_vector", <2> - "dims": 384,<3> - "index": true, - "similarity": "cosine" + "dims": 384 <3> }, "my_text_field": { <4> "type": "text" <5> diff --git a/libs/core/src/main/java/org/elasticsearch/core/UpdateForV9.java b/libs/core/src/main/java/org/elasticsearch/core/UpdateForV9.java new file mode 100644 index 0000000000000..2a31e2ccde222 --- /dev/null +++ b/libs/core/src/main/java/org/elasticsearch/core/UpdateForV9.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.core; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Annotation to identify a block of code (a whole class, a method, or a field) that needs to be reviewed (for cleanup, remove or change) + * before releasing 9.0 + */ +@Retention(RetentionPolicy.SOURCE) +@Target({ ElementType.LOCAL_VARIABLE, ElementType.CONSTRUCTOR, ElementType.FIELD, ElementType.METHOD, ElementType.TYPE }) +public @interface UpdateForV9 { +} diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java index 83f7d496f698f..dd497e8ca5478 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java @@ -166,11 +166,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - public String getMinimumIntervalExpression() { return minimumIntervalExpression; } diff --git a/modules/analysis-common/src/internalClusterTest/java/org/elasticsearch/analysis/common/ReloadSynonymAnalyzerIT.java b/modules/analysis-common/src/internalClusterTest/java/org/elasticsearch/analysis/common/ReloadSynonymAnalyzerIT.java index a9ffdb60419f9..f0063f663142d 100644 --- a/modules/analysis-common/src/internalClusterTest/java/org/elasticsearch/analysis/common/ReloadSynonymAnalyzerIT.java +++ b/modules/analysis-common/src/internalClusterTest/java/org/elasticsearch/analysis/common/ReloadSynonymAnalyzerIT.java @@ -17,7 +17,6 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.InternalTestCluster; import java.io.FileNotFoundException; import java.io.IOException; @@ -44,15 +43,6 @@ protected Collection> nodePlugins() { return Arrays.asList(CommonAnalysisPlugin.class); } - /** - * This test needs to write to the config directory, this is difficult in an external cluster so we overwrite this to force running with - * {@link InternalTestCluster} - */ - @Override - protected boolean ignoreExternalCluster() { - return true; - } - public void testSynonymsUpdateable() throws FileNotFoundException, IOException, InterruptedException { testSynonymsUpdate(false); } diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HighlighterWithAnalyzersTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HighlighterWithAnalyzersTests.java index e7e9aa32b1684..9c46e8830647b 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HighlighterWithAnalyzersTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HighlighterWithAnalyzersTests.java @@ -8,13 +8,10 @@ package org.elasticsearch.analysis.common; -import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.query.Operator; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xcontent.XContentBuilder; @@ -30,9 +27,9 @@ import static org.elasticsearch.index.query.QueryBuilders.matchQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.search.builder.SearchSourceBuilder.highlight; -import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHighlight; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; @@ -114,10 +111,18 @@ public void testNgramHighlightingWithBrokenPositions() throws IOException { ); client().prepareIndex("test").setId("1").setSource("name", "ARCOTEL Hotels Deutschland").get(); refresh(); - SearchResponse search = prepareSearch("test").setQuery(matchQuery("name.autocomplete", "deut tel").operator(Operator.OR)) - .highlighter(new HighlightBuilder().field("name.autocomplete")) - .get(); - assertHighlight(search, 0, "name.autocomplete", 0, equalTo("ARCOTEL Hotels Deutschland")); + assertResponse( + prepareSearch("test").setQuery(matchQuery("name.autocomplete", "deut tel").operator(Operator.OR)) + .highlighter(new HighlightBuilder().field("name.autocomplete")), + response -> assertHighlight( + response, + 0, + "name.autocomplete", + 0, + equalTo("ARCOTEL Hotels Deutschland") + ) + ); + } public void testMultiPhraseCutoff() throws IOException { @@ -158,30 +163,34 @@ public void testMultiPhraseCutoff() throws IOException { ) .get(); refresh(); - SearchResponse search = prepareSearch().setQuery(matchPhraseQuery("body", "Test: http://www.facebook.com ")) - .highlighter(new HighlightBuilder().field("body").highlighterType("fvh")) - .get(); - assertHighlight(search, 0, "body", 0, startsWith("Test: http://www.facebook.com")); - search = prepareSearch().setQuery( - matchPhraseQuery( + assertResponse( + prepareSearch().setQuery(matchPhraseQuery("body", "Test: http://www.facebook.com ")) + .highlighter(new HighlightBuilder().field("body").highlighterType("fvh")), + response -> assertHighlight(response, 0, "body", 0, startsWith("Test: http://www.facebook.com")) + ); + + assertResponse( + prepareSearch().setQuery( + matchPhraseQuery( + "body", + "Test: http://www.facebook.com " + + "http://elasticsearch.org http://xing.com http://cnn.com " + + "http://quora.com http://twitter.com this is a test for highlighting " + + "feature Test: http://www.facebook.com http://elasticsearch.org " + + "http://xing.com http://cnn.com http://quora.com http://twitter.com this " + + "is a test for highlighting feature" + ) + ).highlighter(new HighlightBuilder().field("body").highlighterType("fvh")), + response -> assertHighlight( + response, + 0, "body", - "Test: http://www.facebook.com " - + "http://elasticsearch.org http://xing.com http://cnn.com " - + "http://quora.com http://twitter.com this is a test for highlighting " - + "feature Test: http://www.facebook.com http://elasticsearch.org " - + "http://xing.com http://cnn.com http://quora.com http://twitter.com this " - + "is a test for highlighting feature" - ) - ).highlighter(new HighlightBuilder().field("body").highlighterType("fvh")).execute().actionGet(); - assertHighlight( - search, - 0, - "body", - 0, - equalTo( - "Test: " - + "http://www.facebook.com http://elasticsearch.org " - + "http://xing.com http://cnn.com http://quora.com" + 0, + equalTo( + "Test: " + + "http://www.facebook.com http://elasticsearch.org " + + "http://xing.com http://cnn.com http://quora.com" + ) ) ); } @@ -207,15 +216,24 @@ public void testSynonyms() throws IOException { refresh(); for (String highlighterType : new String[] { "plain", "fvh", "unified" }) { logger.info("--> highlighting (type=" + highlighterType + ") and searching on field1"); - SearchSourceBuilder source = searchSource().query(matchQuery("field1", "quick brown fox").operator(Operator.AND)) - .highlighter(highlight().field("field1").order("score").preTags("").postTags("").highlighterType(highlighterType)); - SearchResponse searchResponse = client().search(new SearchRequest("test").source(source)).actionGet(); - assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); - - source = searchSource().query(matchQuery("field1", "fast brown fox").operator(Operator.AND)) - .highlighter(highlight().field("field1").order("score").preTags("").postTags("")); - searchResponse = client().search(new SearchRequest("test").source(source)).actionGet(); - assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); + assertResponse( + prepareSearch("test").setQuery(matchQuery("field1", "quick brown fox").operator(Operator.AND)) + .highlighter( + highlight().field("field1").order("score").preTags("").postTags("").highlighterType(highlighterType) + ), + resp -> { + assertHighlight(resp, 0, "field1", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); + } + ); + assertResponse( + prepareSearch("test").setQuery(matchQuery("field1", "fast brown fox").operator(Operator.AND)) + .highlighter( + highlight().field("field1").order("score").preTags("").postTags("").highlighterType(highlighterType) + ), + resp -> { + assertHighlight(resp, 0, "field1", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); + } + ); } } @@ -242,67 +260,66 @@ public void testPhrasePrefix() throws IOException { refresh(); logger.info("--> highlighting and searching on field0"); - SearchSourceBuilder source = searchSource().query(matchPhrasePrefixQuery("field0", "bro")) - .highlighter(highlight().field("field0").order("score").preTags("").postTags("")); - SearchResponse searchResponse = client().search(new SearchRequest("first_test_index").source(source)).actionGet(); - - assertHighlight(searchResponse, 0, "field0", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); - - source = searchSource().query(matchPhrasePrefixQuery("field0", "quick bro")) - .highlighter(highlight().field("field0").order("score").preTags("").postTags("")); + assertResponse( + prepareSearch("first_test_index").setQuery(matchPhrasePrefixQuery("field0", "bro")) + .highlighter(highlight().field("field0").order("score").preTags("").postTags("")), + resp -> { + assertHighlight(resp, 0, "field0", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); + } + ); - searchResponse = client().search(new SearchRequest("first_test_index").source(source)).actionGet(); - assertHighlight(searchResponse, 0, "field0", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); + assertResponse( + prepareSearch("first_test_index").setQuery(matchPhrasePrefixQuery("field0", "quick bro")) + .highlighter(highlight().field("field0").order("score").preTags("").postTags("")), + resp -> { + assertHighlight(resp, 0, "field0", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); + } + ); logger.info("--> highlighting and searching on field1"); - source = searchSource().query( - boolQuery().should(matchPhrasePrefixQuery("field1", "test")).should(matchPhrasePrefixQuery("field1", "bro")) - ).highlighter(highlight().field("field1").order("score").preTags("").postTags("")); - - searchResponse = client().search(new SearchRequest("first_test_index").source(source)).actionGet(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - for (int i = 0; i < 2; i++) { - assertHighlight( - searchResponse, - i, - "field1", - 0, - 1, - anyOf( - equalTo("The quick browse button is a fancy thing, right bro?"), - equalTo("The quick brown fox jumps over the lazy dog") - ) - ); - } - - source = searchSource().query(matchPhrasePrefixQuery("field1", "quick bro")) - .highlighter(highlight().field("field1").order("score").preTags("").postTags("")); - - searchResponse = client().search(new SearchRequest("first_test_index").source(source)).actionGet(); - - assertHighlight( - searchResponse, - 0, - "field1", - 0, - 1, - anyOf( - equalTo("The quick browse button is a fancy thing, right bro?"), - equalTo("The quick brown fox jumps over the lazy dog") - ) + assertResponse( + prepareSearch("first_test_index").setQuery( + boolQuery().should(matchPhrasePrefixQuery("field1", "test")).should(matchPhrasePrefixQuery("field1", "bro")) + ).highlighter(highlight().field("field1").order("score").preTags("").postTags("")), + resp -> { + assertThat(resp.getHits().getTotalHits().value, equalTo(2L)); + for (int i = 0; i < 2; i++) { + assertHighlight( + resp, + i, + "field1", + 0, + 1, + anyOf( + equalTo("The quick browse button is a fancy thing, right bro?"), + equalTo("The quick brown fox jumps over the lazy dog") + ) + ); + } + } ); - assertHighlight( - searchResponse, - 1, - "field1", - 0, - 1, - anyOf( - equalTo("The quick browse button is a fancy thing, right bro?"), - equalTo("The quick brown fox jumps over the lazy dog") - ) + + assertResponse( + prepareSearch("first_test_index").setQuery(matchPhrasePrefixQuery("field1", "quick bro")) + .highlighter(highlight().field("field1").order("score").preTags("").postTags("")), + resp -> { + for (int i = 0; i < 2; i++) { + assertHighlight( + resp, + i, + "field1", + 0, + 1, + anyOf( + equalTo("The quick browse button is a fancy thing, right bro?"), + equalTo("The quick brown fox jumps over the lazy dog") + ) + ); + } + } ); + // with synonyms assertAcked( prepareCreate("second_test_index").setSettings(builder.build()) .setMapping( @@ -312,7 +329,6 @@ public void testPhrasePrefix() throws IOException { "type=text,analyzer=synonym" ) ); - // with synonyms client().prepareIndex("second_test_index") .setId("0") .setSource( @@ -331,56 +347,49 @@ public void testPhrasePrefix() throws IOException { client().prepareIndex("second_test_index").setId("2").setSource("type", "type2", "field4", "a quick fast blue car").get(); refresh(); - source = searchSource().postFilter(termQuery("type", "type2")) - .query(matchPhrasePrefixQuery("field3", "fast bro")) - .highlighter(highlight().field("field3").order("score").preTags("").postTags("")); - - searchResponse = client().search(new SearchRequest("second_test_index").source(source)).actionGet(); - - assertHighlight(searchResponse, 0, "field3", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); - - logger.info("--> highlighting and searching on field4"); - source = searchSource().postFilter(termQuery("type", "type2")) - .query(matchPhrasePrefixQuery("field4", "the fast bro")) - .highlighter(highlight().field("field4").order("score").preTags("").postTags("")); - searchResponse = client().search(new SearchRequest("second_test_index").source(source)).actionGet(); - - assertHighlight( - searchResponse, - 0, - "field4", - 0, - 1, - anyOf( - equalTo("The quick browse button is a fancy thing, right bro?"), - equalTo("The quick brown fox jumps over the lazy dog") - ) + assertResponse( + prepareSearch("second_test_index").setQuery(matchPhrasePrefixQuery("field3", "fast bro")) + .highlighter(highlight().field("field3").order("score").preTags("").postTags("")), + resp -> { + assertHighlight(resp, 0, "field3", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); + } ); - assertHighlight( - searchResponse, - 1, - "field4", - 0, - 1, - anyOf( - equalTo("The quick browse button is a fancy thing, right bro?"), - equalTo("The quick brown fox jumps over the lazy dog") - ) + + assertResponse( + prepareSearch("second_test_index").setQuery(matchPhrasePrefixQuery("field4", "the fast bro")) + .highlighter(highlight().field("field4").order("score").preTags("").postTags("")), + resp -> { + for (int i = 0; i < 2; i++) { + assertHighlight( + resp, + i, + "field4", + 0, + 1, + anyOf( + equalTo("The quick browse button is a fancy thing, right bro?"), + equalTo("The quick brown fox jumps over the lazy dog") + ) + ); + } + } ); logger.info("--> highlighting and searching on field4"); - source = searchSource().postFilter(termQuery("type", "type2")) - .query(matchPhrasePrefixQuery("field4", "a fast quick blue ca")) - .highlighter(highlight().field("field4").order("score").preTags("").postTags("")); - searchResponse = client().search(new SearchRequest("second_test_index").source(source)).actionGet(); - - assertHighlight( - searchResponse, - 0, - "field4", - 0, - 1, - anyOf(equalTo("a quick fast blue car"), equalTo("a quick fast blue car")) + assertResponse( + prepareSearch("second_test_index").setQuery(matchPhrasePrefixQuery("field4", "a fast quick blue ca")) + .setPostFilter(termQuery("type", "type2")) + .highlighter(highlight().field("field4").order("score").preTags("").postTags("")), + resp -> { + assertHighlight( + resp, + 0, + "field4", + 0, + 1, + anyOf(equalTo("a quick fast blue car"), equalTo("a quick fast blue car")) + ); + } ); } diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PredicateTokenScriptFilterTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PredicateTokenScriptFilterTests.java index 3a519f594a57f..b333c8534d19b 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PredicateTokenScriptFilterTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PredicateTokenScriptFilterTests.java @@ -97,9 +97,6 @@ private static class MockClient extends AbstractClient { super(settings, threadPool); } - @Override - public void close() {} - @Override protected void doExecute( ActionType action, diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/ScriptedConditionTokenFilterTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/ScriptedConditionTokenFilterTests.java index 81df5836015f0..98fdb551c27f1 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/ScriptedConditionTokenFilterTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/ScriptedConditionTokenFilterTests.java @@ -97,9 +97,6 @@ private class MockClient extends AbstractClient { super(settings, threadPool); } - @Override - public void close() {} - @Override protected void doExecute( ActionType action, diff --git a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/MeterRegistryConcurrencyTests.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/MeterRegistryConcurrencyTests.java index d1c74681c2bd7..e8dafd996f5b0 100644 --- a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/MeterRegistryConcurrencyTests.java +++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/MeterRegistryConcurrencyTests.java @@ -90,7 +90,6 @@ public ObservableLongCounter buildWithCallback(Consumer assertThat(setProviderThread.getState(), equalTo(Thread.State.WAITING))); // assert that the old lockingMeter is still in place - assertBusy(() -> assertThat(meterRegistrar.getMeter(), sameInstance(lockingMeter))); + assertThat(meterRegistrar.getMeter(), sameInstance(lockingMeter)); // finish long-running registration registerLatch.countDown(); + // wait for everything to quiesce, registerLatch.countDown() doesn't ensure lock has been released + setProviderThread.join(); + registerThread.join(); // assert that a meter was overriden - assertBusy(() -> assertThat(meterRegistrar.getMeter(), sameInstance(lockingMeter))); - + assertThat(meterRegistrar.getMeter(), sameInstance(noopMeter)); } } diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java index 90c76d630f0d0..4f1c33819fee9 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java @@ -52,7 +52,6 @@ import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; @@ -86,7 +85,6 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; -import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.subphase.FieldAndFormat; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.transport.MockTransportService; @@ -122,6 +120,7 @@ import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.arrayWithSize; @@ -436,16 +435,12 @@ public void testComposableTemplateOnlyMatchingWithDataStreamName() throws Except }"""; PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request("id_1"); request.indexTemplate( - new ComposableIndexTemplate( - List.of(dataStreamName), // use no wildcard, so that backing indices don't match just by name - new Template(null, new CompressedXContent(mapping), null), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ) + ComposableIndexTemplate.builder() + // use no wildcard, so that backing indices don't match just by name + .indexPatterns(List.of(dataStreamName)) + .template(new Template(null, new CompressedXContent(mapping), null)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); @@ -518,16 +513,11 @@ public void testTimeStampValidationInvalidFieldMapping() throws Exception { }"""; PutComposableIndexTemplateAction.Request createTemplateRequest = new PutComposableIndexTemplateAction.Request("logs-foo"); createTemplateRequest.indexTemplate( - new ComposableIndexTemplate( - List.of("logs-*"), - new Template(null, new CompressedXContent(mapping), null), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("logs-*")) + .template(new Template(null, new CompressedXContent(mapping), null)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ); Exception e = expectThrows( @@ -673,16 +663,14 @@ public void testCannotDeleteComposableTemplateUsedByDataStream() throws Exceptio // Now replace it with a higher-priority template and delete the old one PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request("id2"); request.indexTemplate( - new ComposableIndexTemplate( - Collections.singletonList("metrics-foobar*"), // Match the other data stream with a slightly different pattern - new Template(null, null, null), - null, - 2L, // Higher priority than the other composable template - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ) + ComposableIndexTemplate.builder() + // Match the other data stream with a slightly different pattern + .indexPatterns(Collections.singletonList("metrics-foobar*")) + .template(new Template(null, null, null)) + // Higher priority than the other composable template + .priority(2L) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); @@ -1212,15 +1200,11 @@ public void testIndexDocsWithCustomRoutingTargetingDataStreamIsNotAllowed() thro } public void testIndexDocsWithCustomRoutingAllowed() throws Exception { - ComposableIndexTemplate template = new ComposableIndexTemplate( - List.of("logs-foobar*"), - new Template(null, null, null), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, true) - ); + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of("logs-foobar*")) + .template(new Template(null, null, null)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, true)) + .build(); client().execute( PutComposableIndexTemplateAction.INSTANCE, new PutComposableIndexTemplateAction.Request("id1").indexTemplate(template) @@ -1358,16 +1342,11 @@ public void testMultipleTimestampValuesInDocument() throws Exception { public void testMixedAutoCreate() throws Exception { PutComposableIndexTemplateAction.Request createTemplateRequest = new PutComposableIndexTemplateAction.Request("logs-foo"); createTemplateRequest.indexTemplate( - new ComposableIndexTemplate( - List.of("logs-foo*"), - new Template(null, new CompressedXContent(generateMapping("@timestamp")), null), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("logs-foo*")) + .template(new Template(null, new CompressedXContent(generateMapping("@timestamp")), null)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, createTemplateRequest).actionGet(); @@ -1599,21 +1578,22 @@ public void testSegmentsSortedOnTimestampDesc() throws Exception { indexDocs("metrics-foo", numDocs3); // 3rd segment int totalDocs = numDocs1 + numDocs2 + numDocs3; - SearchSourceBuilder source = new SearchSourceBuilder(); - source.fetchField(new FieldAndFormat(DEFAULT_TIMESTAMP_FIELD, "epoch_millis")); - source.size(totalDocs); - SearchRequest searchRequest = new SearchRequest(new String[] { "metrics-foo" }, source); - SearchResponse searchResponse = client().search(searchRequest).actionGet(); - assertEquals(totalDocs, searchResponse.getHits().getTotalHits().value); - SearchHit[] hits = searchResponse.getHits().getHits(); - assertEquals(totalDocs, hits.length); - - // Test that when we read data, segments come in the reverse order with a segment with the latest date first - long timestamp1 = Long.valueOf(hits[0].field(DEFAULT_TIMESTAMP_FIELD).getValue()); // 1st doc of 1st seg - long timestamp2 = Long.valueOf(hits[0 + numDocs3].field(DEFAULT_TIMESTAMP_FIELD).getValue()); // 1st doc of the 2nd seg - long timestamp3 = Long.valueOf(hits[0 + numDocs3 + numDocs2].field(DEFAULT_TIMESTAMP_FIELD).getValue()); // 1st doc of the 3rd seg - assertTrue(timestamp1 > timestamp2); - assertTrue(timestamp2 > timestamp3); + assertResponse( + prepareSearch("metrics-foo").addFetchField(new FieldAndFormat(DEFAULT_TIMESTAMP_FIELD, "epoch_millis")).setSize(totalDocs), + resp -> { + assertEquals(totalDocs, resp.getHits().getTotalHits().value); + SearchHit[] hits = resp.getHits().getHits(); + assertEquals(totalDocs, hits.length); + + // Test that when we read data, segments come in the reverse order with a segment with the latest date first + long timestamp1 = Long.valueOf(hits[0].field(DEFAULT_TIMESTAMP_FIELD).getValue()); // 1st doc of 1st seg + long timestamp2 = Long.valueOf(hits[0 + numDocs3].field(DEFAULT_TIMESTAMP_FIELD).getValue()); // 1st doc of the 2nd seg + long timestamp3 = Long.valueOf(hits[0 + numDocs3 + numDocs2].field(DEFAULT_TIMESTAMP_FIELD).getValue()); // 1st doc of the + // 3rd seg + assertTrue(timestamp1 > timestamp2); + assertTrue(timestamp2 > timestamp3); + } + ); } public void testCreateDataStreamWithSameNameAsIndexAlias() throws Exception { @@ -1914,12 +1894,10 @@ static void indexDocs(String dataStream, int numDocs) { } static void verifyDocs(String dataStream, long expectedNumHits, List expectedIndices) { - SearchRequest searchRequest = new SearchRequest(dataStream); - searchRequest.source().size((int) expectedNumHits); - SearchResponse searchResponse = client().search(searchRequest).actionGet(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(expectedNumHits)); - - Arrays.stream(searchResponse.getHits().getHits()).forEach(hit -> { assertTrue(expectedIndices.contains(hit.getIndex())); }); + assertResponse(prepareSearch(dataStream).setSize((int) expectedNumHits), resp -> { + assertThat(resp.getHits().getTotalHits().value, equalTo(expectedNumHits)); + Arrays.stream(resp.getHits().getHits()).forEach(hit -> assertTrue(expectedIndices.contains(hit.getIndex()))); + }); } static void verifyDocs(String dataStream, long expectedNumHits, long minGeneration, long maxGeneration) { @@ -1938,19 +1916,17 @@ public void testPartitionedTemplate() throws IOException { /** * partition size with no routing required */ - ComposableIndexTemplate template = new ComposableIndexTemplate( - List.of("logs"), - new Template( - Settings.builder().put("index.number_of_shards", "3").put("index.routing_partition_size", "2").build(), - null, - null - ), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, true) - ); + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of("logs")) + .template( + new Template( + Settings.builder().put("index.number_of_shards", "3").put("index.routing_partition_size", "2").build(), + null, + null + ) + ) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, true)) + .build(); ComposableIndexTemplate finalTemplate = template; client().execute( PutComposableIndexTemplateAction.INSTANCE, @@ -1959,24 +1935,22 @@ public void testPartitionedTemplate() throws IOException { /** * partition size with routing required */ - template = new ComposableIndexTemplate( - List.of("logs"), - new Template( - Settings.builder().put("index.number_of_shards", "3").put("index.routing_partition_size", "2").build(), - new CompressedXContent(""" - { - "_routing": { - "required": true - } - }"""), - null - ), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, true) - ); + template = ComposableIndexTemplate.builder() + .indexPatterns(List.of("logs")) + .template( + new Template( + Settings.builder().put("index.number_of_shards", "3").put("index.routing_partition_size", "2").build(), + new CompressedXContent(""" + { + "_routing": { + "required": true + } + }"""), + null + ) + ) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, true)) + .build(); client().execute( PutComposableIndexTemplateAction.INSTANCE, new PutComposableIndexTemplateAction.Request("my-it").indexTemplate(template) @@ -1985,19 +1959,17 @@ public void testPartitionedTemplate() throws IOException { /** * routing settings with allow custom routing false */ - template = new ComposableIndexTemplate( - List.of("logs"), - new Template( - Settings.builder().put("index.number_of_shards", "3").put("index.routing_partition_size", "2").build(), - null, - null - ), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false) - ); + template = ComposableIndexTemplate.builder() + .indexPatterns(List.of("logs")) + .template( + new Template( + Settings.builder().put("index.number_of_shards", "3").put("index.routing_partition_size", "2").build(), + null, + null + ) + ) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build(); ComposableIndexTemplate finalTemplate1 = template; Exception e = expectThrows( IllegalArgumentException.class, @@ -2015,24 +1987,22 @@ public void testPartitionedTemplate() throws IOException { } public void testRoutingEnabledInMappingDisabledInDataStreamTemplate() throws IOException { - ComposableIndexTemplate template = new ComposableIndexTemplate( - List.of("logs"), - new Template( - Settings.builder().put("index.number_of_shards", "3").put("index.routing_partition_size", "2").build(), - new CompressedXContent(""" - { - "_routing": { - "required": true - } - }"""), - null - ), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false) - ); + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of("logs")) + .template( + new Template( + Settings.builder().put("index.number_of_shards", "3").put("index.routing_partition_size", "2").build(), + new CompressedXContent(""" + { + "_routing": { + "required": true + } + }"""), + null + ) + ) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build(); Exception e = expectThrows( IllegalArgumentException.class, () -> client().execute( @@ -2048,37 +2018,34 @@ public void testSearchWithRouting() throws IOException, ExecutionException, Inte /** * partition size with routing required */ - ComposableIndexTemplate template = new ComposableIndexTemplate( - List.of("my-logs"), - new Template( - Settings.builder() - .put("index.number_of_shards", "10") - .put("index.number_of_routing_shards", "10") - .put("index.routing_partition_size", "4") - .build(), - new CompressedXContent(""" - { - "_routing": { - "required": true - } - }"""), - null - ), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, true) - ); + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of("my-logs")) + .template( + new Template( + Settings.builder() + .put("index.number_of_shards", "10") + .put("index.number_of_routing_shards", "10") + .put("index.routing_partition_size", "4") + .build(), + new CompressedXContent(""" + { + "_routing": { + "required": true + } + }"""), + null + ) + ) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, true)) + .build(); client().execute( PutComposableIndexTemplateAction.INSTANCE, new PutComposableIndexTemplateAction.Request("my-it").indexTemplate(template) ).actionGet(); CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request("my-logs"); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); - SearchRequest searchRequest = new SearchRequest("my-logs").routing("123"); - SearchResponse searchResponse = client().search(searchRequest).actionGet(); - assertEquals(searchResponse.getTotalShards(), 4); + + assertResponse(prepareSearch("my-logs").setRouting("123"), resp -> { assertEquals(resp.getTotalShards(), 4); }); } public void testWriteIndexWriteLoadAndAvgShardSizeIsStoredAfterRollover() throws Exception { @@ -2331,16 +2298,12 @@ static void putComposableIndexTemplate( ) throws IOException { PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request(id); request.indexTemplate( - new ComposableIndexTemplate( - patterns, - new Template(settings, mappings == null ? null : CompressedXContent.fromJSON(mappings), aliases, lifecycle), - null, - null, - null, - metadata, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(patterns) + .template(new Template(settings, mappings == null ? null : CompressedXContent.fromJSON(mappings), aliases, lifecycle)) + .metadata(metadata) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); } diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataTierDataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataTierDataStreamIT.java index aeb7516c35816..69c28a06bb206 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataTierDataStreamIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataTierDataStreamIT.java @@ -33,16 +33,11 @@ public void testDefaultDataStreamAllocateToHot() { startHotOnlyNode(); ensureGreen(); - ComposableIndexTemplate template = new ComposableIndexTemplate( - Collections.singletonList(index), - null, - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ); + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList(index)) + + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build(); client().execute( PutComposableIndexTemplateAction.INSTANCE, new PutComposableIndexTemplateAction.Request("template").indexTemplate(template) diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamIT.java index 922b58e3920e1..734e2d7273d19 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamIT.java @@ -317,15 +317,11 @@ public Collection getSystemDataStreamDescriptors() { ".test-data-stream", "system data stream test", Type.EXTERNAL, - new ComposableIndexTemplate( - List.of(".test-data-stream"), - new Template(Settings.EMPTY, mappings, null), - null, - null, - null, - null, - new DataStreamTemplate() - ), + ComposableIndexTemplate.builder() + .indexPatterns(List.of(".test-data-stream")) + .template(new Template(Settings.EMPTY, mappings, null)) + .dataStreamTemplate(new DataStreamTemplate()) + .build(), Map.of(), List.of("product"), ExecutorNames.DEFAULT_SYSTEM_DATA_STREAM_THREAD_POOLS diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamSnapshotIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamSnapshotIT.java index 0f60cbba0a4ff..b0724a9c9c0e3 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamSnapshotIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamSnapshotIT.java @@ -238,15 +238,10 @@ public Collection getSystemDataStreamDescriptors() { SYSTEM_DATA_STREAM_NAME, "a system data stream for testing", SystemDataStreamDescriptor.Type.EXTERNAL, - new ComposableIndexTemplate( - List.of(".system-data-stream"), - null, - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate() - ), + ComposableIndexTemplate.builder() + .indexPatterns(List.of(".system-data-stream")) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build(), Map.of(), Collections.singletonList("test"), new ExecutorNames(ThreadPool.Names.SYSTEM_CRITICAL_READ, ThreadPool.Names.SYSTEM_READ, ThreadPool.Names.SYSTEM_WRITE) diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java index 5dbf52f33d7da..ab42d831c6545 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java @@ -115,16 +115,11 @@ public void testTimeRanges() throws Exception { if (randomBoolean()) { var request = new PutComposableIndexTemplateAction.Request("id"); request.indexTemplate( - new ComposableIndexTemplate( - List.of("k8s*"), - new Template(templateSettings.build(), mapping, null), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("k8s*")) + .template(new Template(templateSettings.build(), mapping, null)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); } else { @@ -134,16 +129,12 @@ public void testTimeRanges() throws Exception { var putTemplateRequest = new PutComposableIndexTemplateAction.Request("id"); putTemplateRequest.indexTemplate( - new ComposableIndexTemplate( - List.of("k8s*"), - new Template(templateSettings.build(), null, null), - List.of("1"), - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("k8s*")) + .template(new Template(templateSettings.build(), null, null)) + .componentTemplates(List.of("1")) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, putTemplateRequest).actionGet(); } @@ -249,20 +240,17 @@ public void testInvalidTsdbTemplatesNoTimeSeriesDimensionAttribute() throws Exce { var request = new PutComposableIndexTemplateAction.Request("id"); request.indexTemplate( - new ComposableIndexTemplate( - List.of("k8s*"), - new Template( - Settings.builder().put("index.mode", "time_series").put("index.routing_path", "metricset").build(), - new CompressedXContent(mappingTemplate), - null - ), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("k8s*")) + .template( + new Template( + Settings.builder().put("index.mode", "time_series").put("index.routing_path", "metricset").build(), + new CompressedXContent(mappingTemplate), + null + ) + ) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build() ); var e = expectThrows( IllegalArgumentException.class, @@ -280,20 +268,17 @@ public void testInvalidTsdbTemplatesNoTimeSeriesDimensionAttribute() throws Exce { var request = new PutComposableIndexTemplateAction.Request("id"); request.indexTemplate( - new ComposableIndexTemplate( - List.of("k8s*"), - new Template( - Settings.builder().put("index.mode", "time_series").build(), - new CompressedXContent(mappingTemplate), - null - ), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("k8s*")) + .template( + new Template( + Settings.builder().put("index.mode", "time_series").build(), + new CompressedXContent(mappingTemplate), + null + ) + ) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build() ); var e = expectThrows( InvalidIndexTemplateException.class, @@ -317,20 +302,17 @@ public void testInvalidTsdbTemplatesNoKeywordFieldType() throws Exception { }"""; var request = new PutComposableIndexTemplateAction.Request("id"); request.indexTemplate( - new ComposableIndexTemplate( - List.of("k8s*"), - new Template( - Settings.builder().put("index.mode", "time_series").put("index.routing_path", "metricset").build(), - new CompressedXContent(mappingTemplate), - null - ), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("k8s*")) + .template( + new Template( + Settings.builder().put("index.mode", "time_series").put("index.routing_path", "metricset").build(), + new CompressedXContent(mappingTemplate), + null + ) + ) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build() ); Exception e = expectThrows( IllegalArgumentException.class, @@ -360,20 +342,17 @@ public void testInvalidTsdbTemplatesMissingSettings() throws Exception { }"""; var request = new PutComposableIndexTemplateAction.Request("id"); request.indexTemplate( - new ComposableIndexTemplate( - List.of("k8s*"), - new Template( - Settings.builder().put("index.routing_path", "metricset").build(), - new CompressedXContent(mappingTemplate), - null - ), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("k8s*")) + .template( + new Template( + Settings.builder().put("index.routing_path", "metricset").build(), + new CompressedXContent(mappingTemplate), + null + ) + ) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build() ); var e = expectThrows( IllegalArgumentException.class, @@ -389,16 +368,11 @@ public void testSkippingShards() throws Exception { var templateSettings = Settings.builder().put("index.mode", "time_series").put("index.routing_path", "metricset").build(); var request = new PutComposableIndexTemplateAction.Request("id1"); request.indexTemplate( - new ComposableIndexTemplate( - List.of("pattern-1"), - new Template(templateSettings, mapping, null), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("pattern-1")) + .template(new Template(templateSettings, mapping, null)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); var indexRequest = new IndexRequest("pattern-1").opType(DocWriteRequest.OpType.CREATE).setRefreshPolicy("true"); @@ -408,16 +382,11 @@ public void testSkippingShards() throws Exception { { var request = new PutComposableIndexTemplateAction.Request("id2"); request.indexTemplate( - new ComposableIndexTemplate( - List.of("pattern-2"), - new Template(null, mapping, null), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("pattern-2")) + .template(new Template(null, mapping, null)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); var indexRequest = new IndexRequest("pattern-2").opType(DocWriteRequest.OpType.CREATE).setRefreshPolicy("true"); @@ -457,26 +426,23 @@ public void testTrimId() throws Exception { String dataStreamName = "k8s"; var putTemplateRequest = new PutComposableIndexTemplateAction.Request("id"); putTemplateRequest.indexTemplate( - new ComposableIndexTemplate( - List.of(dataStreamName + "*"), - new Template( - Settings.builder() - .put("index.mode", "time_series") - .put("index.number_of_replicas", 0) - // Reduce sync interval to speedup this integraton test, - // otherwise by default it will take 30 seconds before minimum retained seqno is updated: - .put("index.soft_deletes.retention_lease.sync_interval", "100ms") - .build(), - new CompressedXContent(MAPPING_TEMPLATE), - null - ), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of(dataStreamName + "*")) + .template( + new Template( + Settings.builder() + .put("index.mode", "time_series") + .put("index.number_of_replicas", 0) + // Reduce sync interval to speedup this integraton test, + // otherwise by default it will take 30 seconds before minimum retained seqno is updated: + .put("index.soft_deletes.retention_lease.sync_interval", "100ms") + .build(), + new CompressedXContent(MAPPING_TEMPLATE), + null + ) + ) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, putTemplateRequest).actionGet(); diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudDataStreamLifecycleIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudDataStreamLifecycleIT.java index ff84501697e21..e33b1fdcfa57a 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudDataStreamLifecycleIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudDataStreamLifecycleIT.java @@ -36,10 +36,6 @@ protected Collection> nodePlugins() { return List.of(DataStreamsPlugin.class, MockTransportService.TestPlugin.class); } - protected boolean ignoreExternalCluster() { - return true; - } - public void testGetLifecycle() throws Exception { DataStreamLifecycle lifecycle = randomLifecycle(); putComposableIndexTemplate("id1", null, List.of("with-lifecycle*"), null, null, lifecycle); diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudSystemDataStreamLifecycleIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudSystemDataStreamLifecycleIT.java index d2baec3150392..8e590d3f28346 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudSystemDataStreamLifecycleIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudSystemDataStreamLifecycleIT.java @@ -199,20 +199,18 @@ public Collection getSystemDataStreamDescriptors() { ".test-data-stream", "system data stream test", Type.EXTERNAL, - new ComposableIndexTemplate( - List.of(".test-data-stream"), - new Template( - Settings.EMPTY, - mappings, - null, - DataStreamLifecycle.newBuilder().dataRetention(randomMillisUpToYear9999()).build() - ), - null, - null, - null, - null, - new DataStreamTemplate() - ), + ComposableIndexTemplate.builder() + .indexPatterns(List.of(".test-data-stream")) + .template( + new Template( + Settings.EMPTY, + mappings, + null, + DataStreamLifecycle.newBuilder().dataRetention(randomMillisUpToYear9999()).build() + ) + ) + .dataStreamTemplate(new DataStreamTemplate()) + .build(), Map.of(), List.of("product"), ExecutorNames.DEFAULT_SYSTEM_DATA_STREAM_THREAD_POOLS diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java index e4f4f88254977..5bbc007cfb272 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java @@ -90,10 +90,6 @@ protected Collection> nodePlugins() { return List.of(DataStreamsPlugin.class, MockTransportService.TestPlugin.class); } - protected boolean ignoreExternalCluster() { - return true; - } - @Override protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { Settings.Builder settings = Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings)); @@ -186,16 +182,10 @@ public void testOriginationDate() throws Exception { }"""; PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request("id2"); request.indexTemplate( - new ComposableIndexTemplate( - List.of("index_*"), - new Template(null, CompressedXContent.fromJSON(mapping), null, null), - null, - null, - null, - null, - null, - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("index_*")) + .template(new Template(null, CompressedXContent.fromJSON(mapping), null, null)) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); @@ -720,16 +710,12 @@ static void putComposableIndexTemplate( ) throws IOException { PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request(id); request.indexTemplate( - new ComposableIndexTemplate( - patterns, - new Template(settings, mappings == null ? null : CompressedXContent.fromJSON(mappings), null, lifecycle), - null, - null, - null, - metadata, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(patterns) + .template(new Template(settings, mappings == null ? null : CompressedXContent.fromJSON(mappings), null, lifecycle)) + .metadata(metadata) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); } diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java index 6ff50d88aeb05..57febae28bb4d 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java @@ -62,10 +62,6 @@ protected Collection> nodePlugins() { return List.of(DataStreamsPlugin.class, MockTransportService.TestPlugin.class); } - protected boolean ignoreExternalCluster() { - return true; - } - @Override protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { Settings.Builder settings = Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings)); @@ -355,16 +351,12 @@ static void putComposableIndexTemplate( ) throws IOException { PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request(id); request.indexTemplate( - new ComposableIndexTemplate( - patterns, - new Template(settings, mappings == null ? null : CompressedXContent.fromJSON(mappings), null, lifecycle), - null, - null, - null, - metadata, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(patterns) + .template(new Template(settings, mappings == null ? null : CompressedXContent.fromJSON(mappings), null, lifecycle)) + .metadata(metadata) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java index e55ff022693b3..07a80683b24fa 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java @@ -290,7 +290,8 @@ public void cleanup() { } private ClusterState createInitialState() { - ComposableIndexTemplate template = new ComposableIndexTemplate.Builder().indexPatterns(List.of("logs-*")) + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of("logs-*")) .template( new Template(Settings.builder().put("index.mode", "time_series").put("index.routing_path", "uid").build(), null, null) ) diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java index da0caff9e591d..928512f659039 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java @@ -226,16 +226,11 @@ private String createDataStream(boolean hidden) throws Exception { Template idxTemplate = new Template(null, new CompressedXContent(""" {"properties":{"@timestamp":{"type":"date"},"data":{"type":"keyword"}}} """), null); - ComposableIndexTemplate template = new ComposableIndexTemplate( - List.of(dataStreamName + "*"), - idxTemplate, - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(hidden, false), - null - ); + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of(dataStreamName + "*")) + .template(idxTemplate) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(hidden, false)) + .build(); assertAcked( client().execute( PutComposableIndexTemplateAction.INSTANCE, diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataDataStreamRolloverServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataDataStreamRolloverServiceTests.java index 0391f91a35fb3..4c333c3f0ab8d 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataDataStreamRolloverServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataDataStreamRolloverServiceTests.java @@ -71,7 +71,8 @@ public void testRolloverClusterStateForDataStream() throws Exception { false, IndexMode.TIME_SERIES ); - ComposableIndexTemplate template = new ComposableIndexTemplate.Builder().indexPatterns(List.of(dataStream.getName() + "*")) + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of(dataStream.getName() + "*")) .template( new Template(Settings.builder().put("index.mode", "time_series").put("index.routing_path", "uid").build(), null, null) ) @@ -176,7 +177,8 @@ public void testRolloverAndMigrateDataStream() throws Exception { false, dsIndexMode ); - ComposableIndexTemplate template = new ComposableIndexTemplate.Builder().indexPatterns(List.of(dataStream.getName() + "*")) + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of(dataStream.getName() + "*")) .template( new Template(Settings.builder().put("index.mode", "time_series").put("index.routing_path", "uid").build(), null, null) ) @@ -262,7 +264,8 @@ public void testChangingIndexModeFromTimeSeriesToSomethingElseNoEffectOnExisting false, IndexMode.TIME_SERIES ); - ComposableIndexTemplate template = new ComposableIndexTemplate.Builder().indexPatterns(List.of(dataStream.getName() + "*")) + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of(dataStream.getName() + "*")) .template( new Template(Settings.builder().put("index.mode", "time_series").put("index.routing_path", "uid").build(), null, null) ) @@ -477,7 +480,8 @@ private static ClusterState createClusterState(String dataStreamName, int number false, null ); - ComposableIndexTemplate template = new ComposableIndexTemplate.Builder().indexPatterns(List.of(dataStream.getName() + "*")) + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of(dataStream.getName() + "*")) .template( new Template(Settings.builder().put("index.mode", "time_series").put("index.routing_path", "uid").build(), null, null) ) diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java index 4f36feba17c89..e7339cc3f334a 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java @@ -59,16 +59,13 @@ public void testRequireRoutingPath() throws Exception { // Missing routing path should fail validation var componentTemplate = new ComponentTemplate(new Template(null, new CompressedXContent("{}"), null), null, null); var state = service.addComponentTemplate(ClusterState.EMPTY_STATE, true, "1", componentTemplate); - var indexTemplate = new ComposableIndexTemplate( - Collections.singletonList("logs-*-*"), - new Template(builder().put("index.mode", "time_series").build(), null, null), - List.of("1"), - 100L, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ); + var indexTemplate = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("logs-*-*")) + .template(new Template(builder().put("index.mode", "time_series").build(), null, null)) + .componentTemplates(List.of("1")) + .priority(100L) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build(); var e = expectThrows(InvalidIndexTemplateException.class, () -> service.addIndexTemplateV2(state, false, "1", indexTemplate)); assertThat(e.getMessage(), containsString("[index.mode=time_series] requires a non-empty [index.routing_path]")); } @@ -81,16 +78,13 @@ public void testRequireRoutingPath() throws Exception { null ); state = service.addComponentTemplate(state, true, "1", componentTemplate); - var indexTemplate = new ComposableIndexTemplate( - Collections.singletonList("logs-*-*"), - new Template(builder().put("index.mode", "time_series").build(), null, null), - List.of("1"), - 100L, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ); + var indexTemplate = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("logs-*-*")) + .template(new Template(builder().put("index.mode", "time_series").build(), null, null)) + .componentTemplates(List.of("1")) + .priority(100L) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build(); state = service.addIndexTemplateV2(state, false, "1", indexTemplate); assertThat(state.getMetadata().templatesV2().get("1"), equalTo(indexTemplate)); } @@ -103,46 +97,39 @@ public void testRequireRoutingPath() throws Exception { null ); state = service.addComponentTemplate(state, true, "1", componentTemplate); - var indexTemplate = new ComposableIndexTemplate( - Collections.singletonList("logs-*-*"), - new Template(null, null, null), - List.of("1"), - 100L, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ); + var indexTemplate = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("logs-*-*")) + .template(new Template(null, null, null)) + .componentTemplates(List.of("1")) + .priority(100L) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build(); state = service.addIndexTemplateV2(state, false, "1", indexTemplate); assertThat(state.getMetadata().templatesV2().get("1"), equalTo(indexTemplate)); } { // Routing path defined in index template - var indexTemplate = new ComposableIndexTemplate( - Collections.singletonList("logs-*-*"), - new Template(builder().put("index.mode", "time_series").put("index.routing_path", "uid").build(), null, null), - List.of("1"), - 100L, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ); + var indexTemplate = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("logs-*-*")) + .template(new Template(builder().put("index.mode", "time_series").put("index.routing_path", "uid").build(), null, null)) + .componentTemplates(List.of("1")) + .priority(100L) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build(); var state = service.addIndexTemplateV2(ClusterState.EMPTY_STATE, false, "1", indexTemplate); assertThat(state.getMetadata().templatesV2().get("1"), equalTo(indexTemplate)); } { // Routing fetched from mapping in index template - var indexTemplate = new ComposableIndexTemplate( - Collections.singletonList("logs-*-*"), - new Template(builder().put("index.mode", "time_series").build(), new CompressedXContent(generateTsdbMapping()), null), - List.of("1"), - 100L, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ); + var indexTemplate = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("logs-*-*")) + .template( + new Template(builder().put("index.mode", "time_series").build(), new CompressedXContent(generateTsdbMapping()), null) + ) + .componentTemplates(List.of("1")) + .priority(100L) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build(); var state = service.addIndexTemplateV2(ClusterState.EMPTY_STATE, false, "1", indexTemplate); assertThat(state.getMetadata().templatesV2().get("1"), equalTo(indexTemplate)); } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java index 803f5c8661f17..1a9287c1d5ee8 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java @@ -96,16 +96,11 @@ private void createTemplate(boolean tsdb) throws IOException { var templateSettings = Settings.builder().put("index.mode", tsdb ? "time_series" : "standard"); var request = new PutComposableIndexTemplateAction.Request("id"); request.indexTemplate( - new ComposableIndexTemplate( - List.of("k8s*"), - new Template(templateSettings.build(), new CompressedXContent(mappingTemplate), null), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("k8s*")) + .template(new Template(templateSettings.build(), new CompressedXContent(mappingTemplate), null)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleFixtures.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleFixtures.java index 5a15e831f5ad6..6833f2222b585 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleFixtures.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleFixtures.java @@ -83,16 +83,12 @@ static void putComposableIndexTemplate( ) throws IOException { PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request(id); request.indexTemplate( - new ComposableIndexTemplate( - patterns, - new Template(settings, mappings == null ? null : CompressedXContent.fromJSON(mappings), null, lifecycle), - null, - null, - null, - metadata, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(patterns) + .template(new Template(settings, mappings == null ? null : CompressedXContent.fromJSON(mappings), null, lifecycle)) + .metadata(metadata) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ); assertTrue(client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet().isAcknowledged()); } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java index bd6100c95b412..0ee168d130986 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java @@ -126,7 +126,6 @@ public class DataStreamLifecycleServiceTests extends ESTestCase { private ThreadPool threadPool; private DataStreamLifecycleService dataStreamLifecycleService; private List clientSeenRequests; - private Client client; private DoExecuteDelegate clientDelegate; private ClusterService clusterService; @@ -145,7 +144,7 @@ public void setupServices() { Clock clock = Clock.fixed(Instant.ofEpochMilli(now), ZoneId.of(randomFrom(ZoneId.getAvailableZoneIds()))); clientSeenRequests = new CopyOnWriteArrayList<>(); - client = getTransportRequestsRecordingClient(); + final Client client = getTransportRequestsRecordingClient(); AllocationService allocationService = new AllocationService( new AllocationDeciders( new HashSet<>( @@ -178,7 +177,6 @@ public void cleanup() { dataStreamLifecycleService.close(); clusterService.close(); threadPool.shutdownNow(); - client.close(); } public void testOperationsExecutedOnce() { @@ -1499,7 +1497,7 @@ private static DiscoveryNode getNode(String nodeId) { * (it does not even notify the listener), but tests can provide an implementation of clientDelegate to provide any needed behavior. */ private Client getTransportRequestsRecordingClient() { - return new NoOpClient(getTestName()) { + return new NoOpClient(threadPool) { @Override protected void doExecute( ActionType action, diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/SizedBucketAggregatorBuilder.java b/modules/health-shards-availability/src/main/java/module-info.java similarity index 52% rename from server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/SizedBucketAggregatorBuilder.java rename to modules/health-shards-availability/src/main/java/module-info.java index 55729094ace57..4ee4cafeb5f96 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/SizedBucketAggregatorBuilder.java +++ b/modules/health-shards-availability/src/main/java/module-info.java @@ -6,13 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.search.aggregations.bucket.histogram; - -import java.util.concurrent.TimeUnit; - -/** - * An aggregator capable of reporting bucket sizes in milliseconds. Used by RateAggregator for calendar-based buckets. - */ -public interface SizedBucketAggregatorBuilder { - double calendarDivider(TimeUnit timeUnit); +module org.elasticsearch.shardhealth { + requires org.elasticsearch.server; + requires org.apache.lucene.core; } diff --git a/modules/health-shards-availability/src/main/java/org/elasticsearch/health/plugin/ShardsAvailabilityPlugin.java b/modules/health-shards-availability/src/main/java/org/elasticsearch/health/plugin/ShardsAvailabilityPlugin.java index aeb45424ebc58..eac72c36bef18 100644 --- a/modules/health-shards-availability/src/main/java/org/elasticsearch/health/plugin/ShardsAvailabilityPlugin.java +++ b/modules/health-shards-availability/src/main/java/org/elasticsearch/health/plugin/ShardsAvailabilityPlugin.java @@ -9,7 +9,7 @@ package org.elasticsearch.health.plugin; import org.apache.lucene.util.SetOnce; -import org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService; +import org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService; import org.elasticsearch.health.HealthIndicatorService; import org.elasticsearch.plugins.HealthPlugin; import org.elasticsearch.plugins.Plugin; diff --git a/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/ingest/common/IngestRestartIT.java b/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/ingest/common/IngestRestartIT.java index 96ca77a5f65f9..5709fbd9d8bfc 100644 --- a/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/ingest/common/IngestRestartIT.java +++ b/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/ingest/common/IngestRestartIT.java @@ -50,11 +50,6 @@ protected Collection> nodePlugins() { return Arrays.asList(IngestCommonPlugin.class, CustomScriptPlugin.class); } - @Override - protected boolean ignoreExternalCluster() { - return true; - } - public static class CustomScriptPlugin extends MockScriptPlugin { @Override protected Map, Object>> pluginScripts() { diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java index 9a739132e5808..438b5f3f5efcd 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java @@ -14,7 +14,6 @@ import org.elasticsearch.action.ingest.SimulateDocumentBaseResult; import org.elasticsearch.action.ingest.SimulatePipelineRequest; import org.elasticsearch.action.ingest.SimulatePipelineResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.bytes.BytesReference; @@ -69,6 +68,7 @@ import static org.elasticsearch.ingest.ConfigurationUtils.readStringProperty; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; @@ -250,35 +250,43 @@ public void testGeoIpDatabasesDownload() throws Exception { state.getDatabases().keySet() ); GeoIpTaskState.Metadata metadata = state.get(id); - BoolQueryBuilder queryBuilder = new BoolQueryBuilder().filter(new MatchQueryBuilder("name", id)) - .filter(new RangeQueryBuilder("chunk").from(metadata.firstChunk()).to(metadata.lastChunk(), true)); int size = metadata.lastChunk() - metadata.firstChunk() + 1; - SearchResponse res = prepareSearch(GeoIpDownloader.DATABASES_INDEX).setSize(size) - .setQuery(queryBuilder) - .addSort("chunk", SortOrder.ASC) - .get(); - TotalHits totalHits = res.getHits().getTotalHits(); - assertEquals(TotalHits.Relation.EQUAL_TO, totalHits.relation); - assertEquals(size, totalHits.value); - assertEquals(size, res.getHits().getHits().length); - - List data = new ArrayList<>(); - - for (SearchHit hit : res.getHits().getHits()) { - data.add((byte[]) hit.getSourceAsMap().get("data")); - } - - TarInputStream stream = new TarInputStream(new GZIPInputStream(new MultiByteArrayInputStream(data))); - TarInputStream.TarEntry entry; - while ((entry = stream.getNextEntry()) != null) { - if (entry.name().endsWith(".mmdb")) { - break; + assertResponse( + prepareSearch(GeoIpDownloader.DATABASES_INDEX).setSize(size) + .setQuery( + new BoolQueryBuilder().filter(new MatchQueryBuilder("name", id)) + .filter(new RangeQueryBuilder("chunk").from(metadata.firstChunk()).to(metadata.lastChunk(), true)) + ) + .addSort("chunk", SortOrder.ASC), + res -> { + try { + TotalHits totalHits = res.getHits().getTotalHits(); + assertEquals(TotalHits.Relation.EQUAL_TO, totalHits.relation); + assertEquals(size, totalHits.value); + assertEquals(size, res.getHits().getHits().length); + + List data = new ArrayList<>(); + + for (SearchHit hit : res.getHits().getHits()) { + data.add((byte[]) hit.getSourceAsMap().get("data")); + } + + TarInputStream stream = new TarInputStream(new GZIPInputStream(new MultiByteArrayInputStream(data))); + TarInputStream.TarEntry entry; + while ((entry = stream.getNextEntry()) != null) { + if (entry.name().endsWith(".mmdb")) { + break; + } + } + + Path tempFile = createTempFile(); + Files.copy(stream, tempFile, StandardCopyOption.REPLACE_EXISTING); + parseDatabase(tempFile); + } catch (Exception e) { + fail(e); + } } - } - - Path tempFile = createTempFile(); - Files.copy(stream, tempFile, StandardCopyOption.REPLACE_EXISTING); - parseDatabase(tempFile); + ); } catch (Exception e) { throw new AssertionError(e); } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java index 76c0e6e494a74..3e04f7bfea2de 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java @@ -215,7 +215,7 @@ protected void updateTimestamp(String name, Metadata old) { } void updateTaskState() { - PlainActionFuture> future = PlainActionFuture.newFuture(); + PlainActionFuture> future = new PlainActionFuture<>(); updatePersistentTaskState(state, future); state = ((GeoIpTaskState) future.actionGet().getState()); } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java index 26ddbaa7ba854..30ecc96a3171c 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java @@ -9,7 +9,6 @@ package org.elasticsearch.ingest.geoip; import org.apache.lucene.util.SetOnce; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.client.internal.Client; @@ -65,6 +64,14 @@ public class IngestGeoIpPlugin extends Plugin implements IngestPlugin, SystemIndexPlugin, Closeable, PersistentTaskPlugin, ActionPlugin { public static final Setting CACHE_SIZE = Setting.longSetting("ingest.geoip.cache_size", 1000, 0, Setting.Property.NodeScope); private static final int GEOIP_INDEX_MAPPINGS_VERSION = 1; + /** + * No longer used for determining the age of mappings, but system index descriptor + * code requires something be set. We use a value that can be parsed by + * old nodes in mixed-version clusters, just in case any old code exists that + * tries to parse version from index metadata, and that will indicate + * to these old nodes that the mappings are newer than they are. + */ + private static final String LEGACY_VERSION_FIELD_VALUE = "8.12.0"; private final SetOnce ingestService = new SetOnce<>(); private final SetOnce databaseRegistry = new SetOnce<>(); @@ -204,7 +211,7 @@ private static XContentBuilder mappings() { return jsonBuilder().startObject() .startObject(SINGLE_MAPPING_NAME) .startObject("_meta") - .field("version", Version.CURRENT) + .field("version", LEGACY_VERSION_FIELD_VALUE) .field(SystemIndexDescriptor.VERSION_META_KEY, GEOIP_INDEX_MAPPINGS_VERSION) .endObject() .field("dynamic", "strict") diff --git a/modules/lang-expression/src/internalClusterTest/java/org/elasticsearch/script/expression/MoreExpressionIT.java b/modules/lang-expression/src/internalClusterTest/java/org/elasticsearch/script/expression/MoreExpressionIT.java index f71a55f4f6be0..23e5fcd312dcc 100644 --- a/modules/lang-expression/src/internalClusterTest/java/org/elasticsearch/script/expression/MoreExpressionIT.java +++ b/modules/lang-expression/src/internalClusterTest/java/org/elasticsearch/script/expression/MoreExpressionIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.update.UpdateRequestBuilder; import org.elasticsearch.common.lucene.search.function.CombineFunction; @@ -37,6 +36,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; @@ -44,6 +44,8 @@ import static org.elasticsearch.search.aggregations.PipelineAggregatorBuilders.bucketScript; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -77,28 +79,30 @@ public void testBasic() throws Exception { createIndex("test"); ensureGreen("test"); client().prepareIndex("test").setId("1").setSource("foo", 4).setRefreshPolicy(IMMEDIATE).get(); - SearchResponse rsp = buildRequest("doc['foo'] + 1").get(); - assertEquals(1, rsp.getHits().getTotalHits().value); - assertEquals(5.0, rsp.getHits().getAt(0).field("foo").getValue(), 0.0D); + assertResponse(buildRequest("doc['foo'] + 1"), rsp -> { + assertEquals(1, rsp.getHits().getTotalHits().value); + assertEquals(5.0, rsp.getHits().getAt(0).field("foo").getValue(), 0.0D); + }); } public void testFunction() throws Exception { createIndex("test"); ensureGreen("test"); client().prepareIndex("test").setId("1").setSource("foo", 4).setRefreshPolicy(IMMEDIATE).get(); - SearchResponse rsp = buildRequest("doc['foo'] + abs(1)").get(); - assertNoFailures(rsp); - assertEquals(1, rsp.getHits().getTotalHits().value); - assertEquals(5.0, rsp.getHits().getAt(0).field("foo").getValue(), 0.0D); + assertNoFailuresAndResponse(buildRequest("doc['foo'] + abs(1)"), rsp -> { + assertEquals(1, rsp.getHits().getTotalHits().value); + assertEquals(5.0, rsp.getHits().getAt(0).field("foo").getValue(), 0.0D); + }); } public void testBasicUsingDotValue() throws Exception { createIndex("test"); ensureGreen("test"); client().prepareIndex("test").setId("1").setSource("foo", 4).setRefreshPolicy(IMMEDIATE).get(); - SearchResponse rsp = buildRequest("doc['foo'].value + 1").get(); - assertEquals(1, rsp.getHits().getTotalHits().value); - assertEquals(5.0, rsp.getHits().getAt(0).field("foo").getValue(), 0.0D); + assertResponse(buildRequest("doc['foo'].value + 1"), rsp -> { + assertEquals(1, rsp.getHits().getTotalHits().value); + assertEquals(5.0, rsp.getHits().getAt(0).field("foo").getValue(), 0.0D); + }); } public void testScore() throws Exception { @@ -116,13 +120,14 @@ public void testScore() throws Exception { SearchRequestBuilder req = prepareSearch().setIndices("test"); req.setQuery(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("text", "hello"), score).boostMode(CombineFunction.REPLACE)); req.setSearchType(SearchType.DFS_QUERY_THEN_FETCH); // make sure DF is consistent - SearchResponse rsp = req.get(); - assertNoFailures(rsp); - SearchHits hits = rsp.getHits(); - assertEquals(3, hits.getTotalHits().value); - assertEquals("1", hits.getAt(0).getId()); - assertEquals("3", hits.getAt(1).getId()); - assertEquals("2", hits.getAt(2).getId()); + assertResponse(req, rsp -> { + assertNoFailures(rsp); + SearchHits hits = rsp.getHits(); + assertEquals(3, hits.getTotalHits().value); + assertEquals("1", hits.getAt(0).getId()); + assertEquals("3", hits.getAt(1).getId()); + assertEquals("2", hits.getAt(2).getId()); + }); req = prepareSearch().setIndices("test"); req.setQuery(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("text", "hello"), score).boostMode(CombineFunction.REPLACE)); @@ -140,26 +145,30 @@ public void testDateMethods() throws Exception { client().prepareIndex("test").setId("1").setSource("id", 1, "date0", "2015-04-28T04:02:07Z", "date1", "1985-09-01T23:11:01Z"), client().prepareIndex("test").setId("2").setSource("id", 2, "date0", "2013-12-25T11:56:45Z", "date1", "1983-10-13T23:15:00Z") ); - SearchResponse rsp = buildRequest("doc['date0'].getSeconds() - doc['date0'].getMinutes()").get(); - assertEquals(2, rsp.getHits().getTotalHits().value); - SearchHits hits = rsp.getHits(); - assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(-11.0, hits.getAt(1).field("foo").getValue(), 0.0D); - rsp = buildRequest("doc['date0'].getHourOfDay() + doc['date1'].getDayOfMonth()").get(); - assertEquals(2, rsp.getHits().getTotalHits().value); - hits = rsp.getHits(); - assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(24.0, hits.getAt(1).field("foo").getValue(), 0.0D); - rsp = buildRequest("doc['date1'].getMonth() + 1").get(); - assertEquals(2, rsp.getHits().getTotalHits().value); - hits = rsp.getHits(); - assertEquals(9.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(10.0, hits.getAt(1).field("foo").getValue(), 0.0D); - rsp = buildRequest("doc['date1'].getYear()").get(); - assertEquals(2, rsp.getHits().getTotalHits().value); - hits = rsp.getHits(); - assertEquals(1985.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(1983.0, hits.getAt(1).field("foo").getValue(), 0.0D); + assertResponse(buildRequest("doc['date0'].getSeconds() - doc['date0'].getMinutes()"), rsp -> { + assertEquals(2, rsp.getHits().getTotalHits().value); + SearchHits hits = rsp.getHits(); + assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(-11.0, hits.getAt(1).field("foo").getValue(), 0.0D); + }); + assertResponse(buildRequest("doc['date0'].getHourOfDay() + doc['date1'].getDayOfMonth()"), rsp -> { + assertEquals(2, rsp.getHits().getTotalHits().value); + SearchHits hits = rsp.getHits(); + assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(24.0, hits.getAt(1).field("foo").getValue(), 0.0D); + }); + assertResponse(buildRequest("doc['date1'].getMonth() + 1"), rsp -> { + assertEquals(2, rsp.getHits().getTotalHits().value); + SearchHits hits = rsp.getHits(); + assertEquals(9.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(10.0, hits.getAt(1).field("foo").getValue(), 0.0D); + }); + assertResponse(buildRequest("doc['date1'].getYear()"), rsp -> { + assertEquals(2, rsp.getHits().getTotalHits().value); + SearchHits hits = rsp.getHits(); + assertEquals(1985.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(1983.0, hits.getAt(1).field("foo").getValue(), 0.0D); + }); } public void testDateObjectMethods() throws Exception { @@ -170,26 +179,30 @@ public void testDateObjectMethods() throws Exception { client().prepareIndex("test").setId("1").setSource("id", 1, "date0", "2015-04-28T04:02:07Z", "date1", "1985-09-01T23:11:01Z"), client().prepareIndex("test").setId("2").setSource("id", 2, "date0", "2013-12-25T11:56:45Z", "date1", "1983-10-13T23:15:00Z") ); - SearchResponse rsp = buildRequest("doc['date0'].date.secondOfMinute - doc['date0'].date.minuteOfHour").get(); - assertEquals(2, rsp.getHits().getTotalHits().value); - SearchHits hits = rsp.getHits(); - assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(-11.0, hits.getAt(1).field("foo").getValue(), 0.0D); - rsp = buildRequest("doc['date0'].date.getHourOfDay() + doc['date1'].date.dayOfMonth").get(); - assertEquals(2, rsp.getHits().getTotalHits().value); - hits = rsp.getHits(); - assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(24.0, hits.getAt(1).field("foo").getValue(), 0.0D); - rsp = buildRequest("doc['date1'].date.monthOfYear + 1").get(); - assertEquals(2, rsp.getHits().getTotalHits().value); - hits = rsp.getHits(); - assertEquals(10.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(11.0, hits.getAt(1).field("foo").getValue(), 0.0D); - rsp = buildRequest("doc['date1'].date.year").get(); - assertEquals(2, rsp.getHits().getTotalHits().value); - hits = rsp.getHits(); - assertEquals(1985.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(1983.0, hits.getAt(1).field("foo").getValue(), 0.0D); + assertResponse(buildRequest("doc['date0'].date.secondOfMinute - doc['date0'].date.minuteOfHour"), rsp -> { + assertEquals(2, rsp.getHits().getTotalHits().value); + SearchHits hits = rsp.getHits(); + assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(-11.0, hits.getAt(1).field("foo").getValue(), 0.0D); + }); + assertResponse(buildRequest("doc['date0'].date.getHourOfDay() + doc['date1'].date.dayOfMonth"), rsp -> { + assertEquals(2, rsp.getHits().getTotalHits().value); + SearchHits hits = rsp.getHits(); + assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(24.0, hits.getAt(1).field("foo").getValue(), 0.0D); + }); + assertResponse(buildRequest("doc['date1'].date.monthOfYear + 1"), rsp -> { + assertEquals(2, rsp.getHits().getTotalHits().value); + SearchHits hits = rsp.getHits(); + assertEquals(10.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(11.0, hits.getAt(1).field("foo").getValue(), 0.0D); + }); + assertResponse(buildRequest("doc['date1'].date.year"), rsp -> { + assertEquals(2, rsp.getHits().getTotalHits().value); + SearchHits hits = rsp.getHits(); + assertEquals(1985.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(1983.0, hits.getAt(1).field("foo").getValue(), 0.0D); + }); } public void testMultiValueMethods() throws Exception { @@ -221,79 +234,79 @@ public void testMultiValueMethods() throws Exception { client().prepareIndex("test").setId("3").setSource(doc3) ); - SearchResponse rsp = buildRequest("doc['double0'].count() + doc['double1'].count()").get(); - assertNoFailures(rsp); - SearchHits hits = rsp.getHits(); - assertEquals(3, hits.getTotalHits().value); - assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(2.0, hits.getAt(1).field("foo").getValue(), 0.0D); - assertEquals(5.0, hits.getAt(2).field("foo").getValue(), 0.0D); - - rsp = buildRequest("doc['double0'].sum()").get(); - assertNoFailures(rsp); - hits = rsp.getHits(); - assertEquals(3, hits.getTotalHits().value); - assertEquals(7.5, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(5.0, hits.getAt(1).field("foo").getValue(), 0.0D); - assertEquals(6.0, hits.getAt(2).field("foo").getValue(), 0.0D); - - rsp = buildRequest("doc['double0'].avg() + doc['double1'].avg()").get(); - assertNoFailures(rsp); - hits = rsp.getHits(); - assertEquals(3, hits.getTotalHits().value); - assertEquals(4.3, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(8.0, hits.getAt(1).field("foo").getValue(), 0.0D); - assertEquals(5.5, hits.getAt(2).field("foo").getValue(), 0.0D); - - rsp = buildRequest("doc['double0'].median()").get(); - assertNoFailures(rsp); - hits = rsp.getHits(); - assertEquals(3, hits.getTotalHits().value); - assertEquals(1.5, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(5.0, hits.getAt(1).field("foo").getValue(), 0.0D); - assertEquals(1.25, hits.getAt(2).field("foo").getValue(), 0.0D); - - rsp = buildRequest("doc['double0'].min()").get(); - assertNoFailures(rsp); - hits = rsp.getHits(); - assertEquals(3, hits.getTotalHits().value); - assertEquals(1.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(5.0, hits.getAt(1).field("foo").getValue(), 0.0D); - assertEquals(-1.5, hits.getAt(2).field("foo").getValue(), 0.0D); - - rsp = buildRequest("doc['double0'].max()").get(); - assertNoFailures(rsp); - hits = rsp.getHits(); - assertEquals(3, hits.getTotalHits().value); - assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(5.0, hits.getAt(1).field("foo").getValue(), 0.0D); - assertEquals(5.0, hits.getAt(2).field("foo").getValue(), 0.0D); - - rsp = buildRequest("doc['double0'].sum()/doc['double0'].count()").get(); - assertNoFailures(rsp); - hits = rsp.getHits(); - assertEquals(3, hits.getTotalHits().value); - assertEquals(2.5, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(5.0, hits.getAt(1).field("foo").getValue(), 0.0D); - assertEquals(1.5, hits.getAt(2).field("foo").getValue(), 0.0D); + assertNoFailuresAndResponse(buildRequest("doc['double0'].count() + doc['double1'].count()"), rsp -> { + SearchHits hits = rsp.getHits(); + assertEquals(3, hits.getTotalHits().value); + assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(2.0, hits.getAt(1).field("foo").getValue(), 0.0D); + assertEquals(5.0, hits.getAt(2).field("foo").getValue(), 0.0D); + }); + + assertNoFailuresAndResponse(buildRequest("doc['double0'].sum()"), rsp -> { + SearchHits hits = rsp.getHits(); + assertEquals(3, hits.getTotalHits().value); + assertEquals(7.5, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(5.0, hits.getAt(1).field("foo").getValue(), 0.0D); + assertEquals(6.0, hits.getAt(2).field("foo").getValue(), 0.0D); + }); + + assertNoFailuresAndResponse(buildRequest("doc['double0'].avg() + doc['double1'].avg()"), rsp -> { + SearchHits hits = rsp.getHits(); + assertEquals(3, hits.getTotalHits().value); + assertEquals(4.3, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(8.0, hits.getAt(1).field("foo").getValue(), 0.0D); + assertEquals(5.5, hits.getAt(2).field("foo").getValue(), 0.0D); + }); + + assertNoFailuresAndResponse(buildRequest("doc['double0'].median()"), rsp -> { + SearchHits hits = rsp.getHits(); + assertEquals(3, hits.getTotalHits().value); + assertEquals(1.5, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(5.0, hits.getAt(1).field("foo").getValue(), 0.0D); + assertEquals(1.25, hits.getAt(2).field("foo").getValue(), 0.0D); + }); + + assertNoFailuresAndResponse(buildRequest("doc['double0'].min()"), rsp -> { + SearchHits hits = rsp.getHits(); + assertEquals(3, hits.getTotalHits().value); + assertEquals(1.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(5.0, hits.getAt(1).field("foo").getValue(), 0.0D); + assertEquals(-1.5, hits.getAt(2).field("foo").getValue(), 0.0D); + }); + + assertNoFailuresAndResponse(buildRequest("doc['double0'].max()"), rsp -> { + SearchHits hits = rsp.getHits(); + assertEquals(3, hits.getTotalHits().value); + assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(5.0, hits.getAt(1).field("foo").getValue(), 0.0D); + assertEquals(5.0, hits.getAt(2).field("foo").getValue(), 0.0D); + }); + + assertNoFailuresAndResponse(buildRequest("doc['double0'].sum()/doc['double0'].count()"), rsp -> { + SearchHits hits = rsp.getHits(); + assertEquals(3, hits.getTotalHits().value); + assertEquals(2.5, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(5.0, hits.getAt(1).field("foo").getValue(), 0.0D); + assertEquals(1.5, hits.getAt(2).field("foo").getValue(), 0.0D); + }); // make sure count() works for missing - rsp = buildRequest("doc['double2'].count()").get(); - assertNoFailures(rsp); - hits = rsp.getHits(); - assertEquals(3, hits.getTotalHits().value); - assertEquals(1.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(0.0, hits.getAt(1).field("foo").getValue(), 0.0D); - assertEquals(0.0, hits.getAt(2).field("foo").getValue(), 0.0D); + assertNoFailuresAndResponse(buildRequest("doc['double2'].count()"), rsp -> { + SearchHits hits = rsp.getHits(); + assertEquals(3, hits.getTotalHits().value); + assertEquals(1.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(0.0, hits.getAt(1).field("foo").getValue(), 0.0D); + assertEquals(0.0, hits.getAt(2).field("foo").getValue(), 0.0D); + }); // make sure .empty works in the same way - rsp = buildRequest("doc['double2'].empty ? 5.0 : 2.0").get(); - assertNoFailures(rsp); - hits = rsp.getHits(); - assertEquals(3, hits.getTotalHits().value); - assertEquals(2.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(5.0, hits.getAt(1).field("foo").getValue(), 0.0D); - assertEquals(5.0, hits.getAt(2).field("foo").getValue(), 0.0D); + assertNoFailuresAndResponse(buildRequest("doc['double2'].empty ? 5.0 : 2.0"), rsp -> { + SearchHits hits = rsp.getHits(); + assertEquals(3, hits.getTotalHits().value); + assertEquals(2.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(5.0, hits.getAt(1).field("foo").getValue(), 0.0D); + assertEquals(5.0, hits.getAt(2).field("foo").getValue(), 0.0D); + }); } public void testInvalidDateMethodCall() throws Exception { @@ -325,12 +338,12 @@ public void testSparseField() throws Exception { client().prepareIndex("test").setId("1").setSource("id", 1, "x", 4), client().prepareIndex("test").setId("2").setSource("id", 2, "y", 2) ); - SearchResponse rsp = buildRequest("doc['x'] + 1").get(); - assertNoFailures(rsp); - SearchHits hits = rsp.getHits(); - assertEquals(2, rsp.getHits().getTotalHits().value); - assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(1.0, hits.getAt(1).field("foo").getValue(), 0.0D); + assertNoFailuresAndResponse(buildRequest("doc['x'] + 1"), rsp -> { + SearchHits hits = rsp.getHits(); + assertEquals(2, rsp.getHits().getTotalHits().value); + assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(1.0, hits.getAt(1).field("foo").getValue(), 0.0D); + }); } public void testMissingField() throws Exception { @@ -361,12 +374,13 @@ public void testParams() throws Exception { ); // a = int, b = double, c = long String script = "doc['x'] * a + b + ((c + doc['x']) > 5000000009 ? 1 : 0)"; - SearchResponse rsp = buildRequest(script, "a", 2, "b", 3.5, "c", 5000000000L).get(); - SearchHits hits = rsp.getHits(); - assertEquals(3, hits.getTotalHits().value); - assertEquals(24.5, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(9.5, hits.getAt(1).field("foo").getValue(), 0.0D); - assertEquals(13.5, hits.getAt(2).field("foo").getValue(), 0.0D); + assertResponse(buildRequest(script, "a", 2, "b", 3.5, "c", 5000000000L), rsp -> { + SearchHits hits = rsp.getHits(); + assertEquals(3, hits.getTotalHits().value); + assertEquals(24.5, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(9.5, hits.getAt(1).field("foo").getValue(), 0.0D); + assertEquals(13.5, hits.getAt(2).field("foo").getValue(), 0.0D); + }); } public void testCompileFailure() { @@ -484,21 +498,22 @@ public void testSpecialValueVariable() throws Exception { .script(new Script(ScriptType.INLINE, ExpressionScriptEngine.NAME, "3.0", Collections.emptyMap())) ); - SearchResponse rsp = req.get(); - assertEquals(3, rsp.getHits().getTotalHits().value); + assertResponse(req, rsp -> { + assertEquals(3, rsp.getHits().getTotalHits().value); - Stats stats = rsp.getAggregations().get("int_agg"); - assertEquals(39.0, stats.getMax(), 0.0001); - assertEquals(15.0, stats.getMin(), 0.0001); + Stats stats = rsp.getAggregations().get("int_agg"); + assertEquals(39.0, stats.getMax(), 0.0001); + assertEquals(15.0, stats.getMin(), 0.0001); - stats = rsp.getAggregations().get("double_agg"); - assertEquals(0.7, stats.getMax(), 0.0001); - assertEquals(0.1, stats.getMin(), 0.0001); + stats = rsp.getAggregations().get("double_agg"); + assertEquals(0.7, stats.getMax(), 0.0001); + assertEquals(0.1, stats.getMin(), 0.0001); - stats = rsp.getAggregations().get("const_agg"); - assertThat(stats.getMax(), equalTo(3.0)); - assertThat(stats.getMin(), equalTo(3.0)); - assertThat(stats.getAvg(), equalTo(3.0)); + stats = rsp.getAggregations().get("const_agg"); + assertThat(stats.getMax(), equalTo(3.0)); + assertThat(stats.getMin(), equalTo(3.0)); + assertThat(stats.getAvg(), equalTo(3.0)); + }); } public void testStringSpecialValueVariable() throws Exception { @@ -520,18 +535,19 @@ public void testStringSpecialValueVariable() throws Exception { .script(new Script(ScriptType.INLINE, ExpressionScriptEngine.NAME, "_value", Collections.emptyMap())) ); - String message; + AtomicReference message = new AtomicReference<>(); try { // shards that don't have docs with the "text" field will not fail, // so we may or may not get a total failure - SearchResponse rsp = req.get(); - assertThat(rsp.getShardFailures().length, greaterThan(0)); // at least the shards containing the docs should have failed - message = rsp.getShardFailures()[0].reason(); + assertResponse(req, rsp -> { + assertThat(rsp.getShardFailures().length, greaterThan(0)); // at least the shards containing the docs should have failed + message.set(rsp.getShardFailures()[0].reason()); + }); } catch (SearchPhaseExecutionException e) { - message = e.toString(); + message.set(e.toString()); } - assertThat(message + "should have contained ScriptException", message.contains("ScriptException"), equalTo(true)); - assertThat(message + "should have contained text variable error", message.contains("text variable"), equalTo(true)); + assertThat(message + "should have contained ScriptException", message.get().contains("ScriptException"), equalTo(true)); + assertThat(message + "should have contained text variable error", message.get().contains("text variable"), equalTo(true)); } // test to make sure expressions are not allowed to be used as update scripts @@ -565,44 +581,52 @@ public void testPipelineAggregationScript() throws Exception { client().prepareIndex("agg_index").setId("4").setSource("one", 4.0, "two", 2.0, "three", 3.0, "four", 4.0), client().prepareIndex("agg_index").setId("5").setSource("one", 5.0, "two", 2.0, "three", 3.0, "four", 4.0) ); - SearchResponse response = prepareSearch("agg_index").addAggregation( - histogram("histogram").field("one") - .interval(2) - .subAggregation(sum("twoSum").field("two")) - .subAggregation(sum("threeSum").field("three")) - .subAggregation(sum("fourSum").field("four")) - .subAggregation( - bucketScript( - "totalSum", - new Script(ScriptType.INLINE, ExpressionScriptEngine.NAME, "_value0 + _value1 + _value2", Collections.emptyMap()), - "twoSum", - "threeSum", - "fourSum" + assertResponse( + prepareSearch("agg_index").addAggregation( + histogram("histogram").field("one") + .interval(2) + .subAggregation(sum("twoSum").field("two")) + .subAggregation(sum("threeSum").field("three")) + .subAggregation(sum("fourSum").field("four")) + .subAggregation( + bucketScript( + "totalSum", + new Script( + ScriptType.INLINE, + ExpressionScriptEngine.NAME, + "_value0 + _value1 + _value2", + Collections.emptyMap() + ), + "twoSum", + "threeSum", + "fourSum" + ) ) - ) - ).execute().actionGet(); - - Histogram histogram = response.getAggregations().get("histogram"); - assertThat(histogram, notNullValue()); - assertThat(histogram.getName(), equalTo("histogram")); - List buckets = histogram.getBuckets(); - - for (int bucketCount = 0; bucketCount < buckets.size(); ++bucketCount) { - Histogram.Bucket bucket = buckets.get(bucketCount); - if (bucket.getDocCount() == 1) { - SimpleValue seriesArithmetic = bucket.getAggregations().get("totalSum"); - assertThat(seriesArithmetic, notNullValue()); - double seriesArithmeticValue = seriesArithmetic.value(); - assertEquals(9.0, seriesArithmeticValue, 0.001); - } else if (bucket.getDocCount() == 2) { - SimpleValue seriesArithmetic = bucket.getAggregations().get("totalSum"); - assertThat(seriesArithmetic, notNullValue()); - double seriesArithmeticValue = seriesArithmetic.value(); - assertEquals(18.0, seriesArithmeticValue, 0.001); - } else { - fail("Incorrect number of documents in a bucket in the histogram."); + ), + response -> { + Histogram histogram = response.getAggregations().get("histogram"); + assertThat(histogram, notNullValue()); + assertThat(histogram.getName(), equalTo("histogram")); + List buckets = histogram.getBuckets(); + + for (int bucketCount = 0; bucketCount < buckets.size(); ++bucketCount) { + Histogram.Bucket bucket = buckets.get(bucketCount); + if (bucket.getDocCount() == 1) { + SimpleValue seriesArithmetic = bucket.getAggregations().get("totalSum"); + assertThat(seriesArithmetic, notNullValue()); + double seriesArithmeticValue = seriesArithmetic.value(); + assertEquals(9.0, seriesArithmeticValue, 0.001); + } else if (bucket.getDocCount() == 2) { + SimpleValue seriesArithmetic = bucket.getAggregations().get("totalSum"); + assertThat(seriesArithmetic, notNullValue()); + double seriesArithmeticValue = seriesArithmetic.value(); + assertEquals(18.0, seriesArithmeticValue, 0.001); + } else { + fail("Incorrect number of documents in a bucket in the histogram."); + } + } } - } + ); } public void testGeo() throws Exception { @@ -630,25 +654,25 @@ public void testGeo() throws Exception { .actionGet(); refresh(); // access .lat - SearchResponse rsp = buildRequest("doc['location'].lat").get(); - assertNoFailures(rsp); - assertEquals(1, rsp.getHits().getTotalHits().value); - assertEquals(61.5240, rsp.getHits().getAt(0).field("foo").getValue(), 1.0D); + assertNoFailuresAndResponse(buildRequest("doc['location'].lat"), rsp -> { + assertEquals(1, rsp.getHits().getTotalHits().value); + assertEquals(61.5240, rsp.getHits().getAt(0).field("foo").getValue(), 1.0D); + }); // access .lon - rsp = buildRequest("doc['location'].lon").get(); - assertNoFailures(rsp); - assertEquals(1, rsp.getHits().getTotalHits().value); - assertEquals(105.3188, rsp.getHits().getAt(0).field("foo").getValue(), 1.0D); + assertNoFailuresAndResponse(buildRequest("doc['location'].lon"), rsp -> { + assertEquals(1, rsp.getHits().getTotalHits().value); + assertEquals(105.3188, rsp.getHits().getAt(0).field("foo").getValue(), 1.0D); + }); // access .empty - rsp = buildRequest("doc['location'].empty ? 1 : 0").get(); - assertNoFailures(rsp); - assertEquals(1, rsp.getHits().getTotalHits().value); - assertEquals(0, rsp.getHits().getAt(0).field("foo").getValue(), 1.0D); + assertNoFailuresAndResponse(buildRequest("doc['location'].empty ? 1 : 0"), rsp -> { + assertEquals(1, rsp.getHits().getTotalHits().value); + assertEquals(0, rsp.getHits().getAt(0).field("foo").getValue(), 1.0D); + }); // call haversin - rsp = buildRequest("haversin(38.9072, 77.0369, doc['location'].lat, doc['location'].lon)").get(); - assertNoFailures(rsp); - assertEquals(1, rsp.getHits().getTotalHits().value); - assertEquals(3170D, rsp.getHits().getAt(0).field("foo").getValue(), 50D); + assertNoFailuresAndResponse(buildRequest("haversin(38.9072, 77.0369, doc['location'].lat, doc['location'].lon)"), rsp -> { + assertEquals(1, rsp.getHits().getTotalHits().value); + assertEquals(3170D, rsp.getHits().getAt(0).field("foo").getValue(), 50D); + }); } public void testBoolean() throws Exception { @@ -668,27 +692,27 @@ public void testBoolean() throws Exception { client().prepareIndex("test").setId("3").setSource("id", 3, "price", 2.0, "vip", false) ); // access .value - SearchResponse rsp = buildRequest("doc['vip'].value").get(); - assertNoFailures(rsp); - assertEquals(3, rsp.getHits().getTotalHits().value); - assertEquals(1.0D, rsp.getHits().getAt(0).field("foo").getValue(), 1.0D); - assertEquals(0.0D, rsp.getHits().getAt(1).field("foo").getValue(), 1.0D); - assertEquals(0.0D, rsp.getHits().getAt(2).field("foo").getValue(), 1.0D); + assertNoFailuresAndResponse(buildRequest("doc['vip'].value"), rsp -> { + assertEquals(3, rsp.getHits().getTotalHits().value); + assertEquals(1.0D, rsp.getHits().getAt(0).field("foo").getValue(), 1.0D); + assertEquals(0.0D, rsp.getHits().getAt(1).field("foo").getValue(), 1.0D); + assertEquals(0.0D, rsp.getHits().getAt(2).field("foo").getValue(), 1.0D); + }); // access .empty - rsp = buildRequest("doc['vip'].empty ? 1 : 0").get(); - assertNoFailures(rsp); - assertEquals(3, rsp.getHits().getTotalHits().value); - assertEquals(0.0D, rsp.getHits().getAt(0).field("foo").getValue(), 1.0D); - assertEquals(0.0D, rsp.getHits().getAt(1).field("foo").getValue(), 1.0D); - assertEquals(1.0D, rsp.getHits().getAt(2).field("foo").getValue(), 1.0D); + assertNoFailuresAndResponse(buildRequest("doc['vip'].empty ? 1 : 0"), rsp -> { + assertEquals(3, rsp.getHits().getTotalHits().value); + assertEquals(0.0D, rsp.getHits().getAt(0).field("foo").getValue(), 1.0D); + assertEquals(0.0D, rsp.getHits().getAt(1).field("foo").getValue(), 1.0D); + assertEquals(1.0D, rsp.getHits().getAt(2).field("foo").getValue(), 1.0D); + }); // ternary operator // vip's have a 50% discount - rsp = buildRequest("doc['vip'] ? doc['price']/2 : doc['price']").get(); - assertNoFailures(rsp); - assertEquals(3, rsp.getHits().getTotalHits().value); - assertEquals(0.5D, rsp.getHits().getAt(0).field("foo").getValue(), 1.0D); - assertEquals(2.0D, rsp.getHits().getAt(1).field("foo").getValue(), 1.0D); - assertEquals(2.0D, rsp.getHits().getAt(2).field("foo").getValue(), 1.0D); + assertNoFailuresAndResponse(buildRequest("doc['vip'] ? doc['price']/2 : doc['price']"), rsp -> { + assertEquals(3, rsp.getHits().getTotalHits().value); + assertEquals(0.5D, rsp.getHits().getAt(0).field("foo").getValue(), 1.0D); + assertEquals(2.0D, rsp.getHits().getAt(1).field("foo").getValue(), 1.0D); + assertEquals(2.0D, rsp.getHits().getAt(2).field("foo").getValue(), 1.0D); + }); } public void testFilterScript() throws Exception { @@ -702,9 +726,9 @@ public void testFilterScript() throws Exception { SearchRequestBuilder builder = buildRequest("doc['foo'].value"); Script script = new Script(ScriptType.INLINE, "expression", "doc['foo'].value", Collections.emptyMap()); builder.setQuery(QueryBuilders.boolQuery().filter(QueryBuilders.scriptQuery(script))); - SearchResponse rsp = builder.get(); - assertNoFailures(rsp); - assertEquals(1, rsp.getHits().getTotalHits().value); - assertEquals(1.0D, rsp.getHits().getAt(0).field("foo").getValue(), 0.0D); + assertNoFailuresAndResponse(builder, rsp -> { + assertEquals(1, rsp.getHits().getTotalHits().value); + assertEquals(1.0D, rsp.getHits().getAt(0).field("foo").getValue(), 0.0D); + }); } } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportMultiSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportMultiSearchTemplateAction.java index d859fb509e915..4b0c365ba8b13 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportMultiSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportMultiSearchTemplateAction.java @@ -8,6 +8,9 @@ package org.elasticsearch.script.mustache; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.MultiSearchRequest; import org.elasticsearch.action.search.MultiSearchResponse; @@ -31,6 +34,8 @@ public class TransportMultiSearchTemplateAction extends HandledTransportAction { + private static final Logger logger = LogManager.getLogger(TransportMultiSearchTemplateAction.class); + private final ScriptService scriptService; private final NamedXContentRegistry xContentRegistry; private final NodeClient client; @@ -76,6 +81,9 @@ protected void doExecute(Task task, MultiSearchTemplateRequest request, ActionLi searchRequest = convert(searchTemplateRequest, searchTemplateResponse, scriptService, xContentRegistry, searchUsageHolder); } catch (Exception e) { items[i] = new MultiSearchTemplateResponse.Item(null, e); + if (ExceptionsHelper.status(e).getStatus() >= 500 && ExceptionsHelper.isNodeOrShardUnavailableTypeException(e) == false) { + logger.warn("MultiSearchTemplate convert failure", e); + } continue; } items[i] = new MultiSearchTemplateResponse.Item(searchTemplateResponse, null); diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java index 30937ebcbd773..1fcf776ac8428 100644 --- a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java @@ -8,7 +8,6 @@ package org.elasticsearch.painless.spi; -import java.util.Collections; import java.util.List; import java.util.Objects; @@ -47,11 +46,10 @@ public Whitelist( List whitelistClassBindings, List whitelistInstanceBindings ) { - this.classLoader = Objects.requireNonNull(classLoader); - this.whitelistClasses = Collections.unmodifiableList(Objects.requireNonNull(whitelistClasses)); - this.whitelistImportedMethods = Collections.unmodifiableList(Objects.requireNonNull(whitelistImportedMethods)); - this.whitelistClassBindings = Collections.unmodifiableList(Objects.requireNonNull(whitelistClassBindings)); - this.whitelistInstanceBindings = Collections.unmodifiableList(Objects.requireNonNull(whitelistInstanceBindings)); + this.whitelistClasses = List.copyOf(whitelistClasses); + this.whitelistImportedMethods = List.copyOf(whitelistImportedMethods); + this.whitelistClassBindings = List.copyOf(whitelistClassBindings); + this.whitelistInstanceBindings = List.copyOf(whitelistInstanceBindings); } } diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistClass.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistClass.java index 2130f9343dfa3..1daad59768a15 100644 --- a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistClass.java +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistClass.java @@ -8,11 +8,10 @@ package org.elasticsearch.painless.spi; -import java.util.AbstractMap; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.function.Function; import java.util.stream.Collectors; /** @@ -59,23 +58,12 @@ public WhitelistClass( List whitelistFields, List painlessAnnotations ) { - this.origin = Objects.requireNonNull(origin); this.javaClassName = Objects.requireNonNull(javaClassName); - - this.whitelistConstructors = Collections.unmodifiableList(Objects.requireNonNull(whitelistConstructors)); - this.whitelistMethods = Collections.unmodifiableList(Objects.requireNonNull(whitelistMethods)); - this.whitelistFields = Collections.unmodifiableList(Objects.requireNonNull(whitelistFields)); - - if (painlessAnnotations.isEmpty()) { - this.painlessAnnotations = Collections.emptyMap(); - } else { - this.painlessAnnotations = Collections.unmodifiableMap( - Objects.requireNonNull(painlessAnnotations) - .stream() - .map(painlessAnnotation -> new AbstractMap.SimpleEntry<>(painlessAnnotation.getClass(), painlessAnnotation)) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)) - ); - } + this.whitelistConstructors = List.copyOf(whitelistConstructors); + this.whitelistMethods = List.copyOf(whitelistMethods); + this.whitelistFields = List.copyOf(whitelistFields); + this.painlessAnnotations = painlessAnnotations.stream() + .collect(Collectors.toUnmodifiableMap(Object::getClass, Function.identity())); } } diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistField.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistField.java index c1a3c43196647..872482bcf6281 100644 --- a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistField.java +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistField.java @@ -8,11 +8,10 @@ package org.elasticsearch.painless.spi; -import java.util.AbstractMap; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.function.Function; import java.util.stream.Collectors; /** @@ -39,16 +38,7 @@ public WhitelistField(String origin, String fieldName, String canonicalTypeNameP this.origin = Objects.requireNonNull(origin); this.fieldName = Objects.requireNonNull(fieldName); this.canonicalTypeNameParameter = Objects.requireNonNull(canonicalTypeNameParameter); - - if (painlessAnnotations.isEmpty()) { - this.painlessAnnotations = Collections.emptyMap(); - } else { - this.painlessAnnotations = Collections.unmodifiableMap( - Objects.requireNonNull(painlessAnnotations) - .stream() - .map(painlessAnnotation -> new AbstractMap.SimpleEntry<>(painlessAnnotation.getClass(), painlessAnnotation)) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)) - ); - } + this.painlessAnnotations = painlessAnnotations.stream() + .collect(Collectors.toUnmodifiableMap(Object::getClass, Function.identity())); } } diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistMethod.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistMethod.java index 8451d1c9f3ef4..8927d290ecc77 100644 --- a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistMethod.java +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistMethod.java @@ -8,11 +8,10 @@ package org.elasticsearch.painless.spi; -import java.util.AbstractMap; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.function.Function; import java.util.stream.Collectors; /** @@ -69,22 +68,12 @@ public WhitelistMethod( List canonicalTypeNameParameters, List painlessAnnotations ) { - this.origin = Objects.requireNonNull(origin); this.augmentedCanonicalClassName = augmentedCanonicalClassName; this.methodName = methodName; this.returnCanonicalTypeName = Objects.requireNonNull(returnCanonicalTypeName); - this.canonicalTypeNameParameters = Collections.unmodifiableList(Objects.requireNonNull(canonicalTypeNameParameters)); - - if (painlessAnnotations.isEmpty()) { - this.painlessAnnotations = Collections.emptyMap(); - } else { - this.painlessAnnotations = Collections.unmodifiableMap( - Objects.requireNonNull(painlessAnnotations) - .stream() - .map(painlessAnnotation -> new AbstractMap.SimpleEntry<>(painlessAnnotation.getClass(), painlessAnnotation)) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)) - ); - } + this.canonicalTypeNameParameters = List.copyOf(canonicalTypeNameParameters); + this.painlessAnnotations = painlessAnnotations.stream() + .collect(Collectors.toUnmodifiableMap(Object::getClass, Function.identity())); } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClass.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClass.java index 3fc572d8446bc..d32639bf3968f 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClass.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClass.java @@ -45,7 +45,7 @@ public final class PainlessClass { this.staticFields = Map.copyOf(staticFields); this.fields = Map.copyOf(fields); this.functionalInterfaceMethod = functionalInterfaceMethod; - this.annotations = annotations; + this.annotations = Map.copyOf(annotations); this.getterMethodHandles = Map.copyOf(getterMethodHandles); this.setterMethodHandles = Map.copyOf(setterMethodHandles); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java index bf001c5e49db9..0c1497b541954 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java @@ -1680,6 +1680,7 @@ public PainlessLookup build() { ); } + classesToDirectSubClasses.replaceAll((key, set) -> Set.copyOf(set)); // save some memory, especially when set is empty return new PainlessLookup( javaClassNamesToClasses, canonicalClassNamesToClasses, diff --git a/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/TokenCountFieldMapperIntegrationIT.java b/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/TokenCountFieldMapperIntegrationIT.java index ed5d89ad1df8c..0bdfbc3d90ead 100644 --- a/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/TokenCountFieldMapperIntegrationIT.java +++ b/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/TokenCountFieldMapperIntegrationIT.java @@ -32,6 +32,7 @@ import java.util.Collection; import java.util.List; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; @@ -72,12 +73,12 @@ protected Collection> nodePlugins() { public void testSearchReturnsTokenCount() throws IOException { init(); - assertSearchReturns(searchById("single"), "single"); - assertSearchReturns(searchById("bulk1"), "bulk1"); - assertSearchReturns(searchById("bulk2"), "bulk2"); - assertSearchReturns(searchById("multi"), "multi"); - assertSearchReturns(searchById("multibulk1"), "multibulk1"); - assertSearchReturns(searchById("multibulk2"), "multibulk2"); + assertResponse(searchById("single"), resp -> assertSearchReturns(resp, "single")); + assertResponse(searchById("bulk1"), resp -> assertSearchReturns(resp, "bulk1")); + assertResponse(searchById("bulk2"), resp -> assertSearchReturns(resp, "bulk2")); + assertResponse(searchById("multi"), resp -> assertSearchReturns(resp, "multi")); + assertResponse(searchById("multibulk1"), resp -> assertSearchReturns(resp, "multibulk1")); + assertResponse(searchById("multibulk2"), resp -> assertSearchReturns(resp, "multibulk2")); } /** @@ -86,11 +87,14 @@ public void testSearchReturnsTokenCount() throws IOException { public void testSearchByTokenCount() throws IOException { init(); - assertSearchReturns(searchByNumericRange(4, 4).get(), "single"); - assertSearchReturns(searchByNumericRange(10, 10).get(), "multibulk2"); - assertSearchReturns(searchByNumericRange(7, 10).get(), "multi", "multibulk1", "multibulk2"); - assertSearchReturns(searchByNumericRange(1, 10).get(), "single", "bulk1", "bulk2", "multi", "multibulk1", "multibulk2"); - assertSearchReturns(searchByNumericRange(12, 12).get()); + assertResponse(searchByNumericRange(4, 4), response -> assertSearchReturns(response, "single")); + assertResponse(searchByNumericRange(10, 10), response -> assertSearchReturns(response, "multibulk2")); + assertResponse(searchByNumericRange(7, 10), response -> assertSearchReturns(response, "multi", "multibulk1", "multibulk2")); + assertResponse( + searchByNumericRange(1, 10), + response -> assertSearchReturns(response, "single", "bulk1", "bulk2", "multi", "multibulk1", "multibulk2") + ); + assertResponse(searchByNumericRange(12, 12), this::assertSearchReturns); } /** @@ -100,11 +104,12 @@ public void testFacetByTokenCount() throws IOException { init(); String facetField = randomFrom(Arrays.asList("foo.token_count", "foo.token_count_unstored", "foo.token_count_with_doc_values")); - SearchResponse result = searchByNumericRange(1, 10).addAggregation(AggregationBuilders.terms("facet").field(facetField)).get(); - assertSearchReturns(result, "single", "bulk1", "bulk2", "multi", "multibulk1", "multibulk2"); - assertThat(result.getAggregations().asList().size(), equalTo(1)); - Terms terms = (Terms) result.getAggregations().asList().get(0); - assertThat(terms.getBuckets().size(), equalTo(9)); + assertResponse(searchByNumericRange(1, 10).addAggregation(AggregationBuilders.terms("facet").field(facetField)), result -> { + assertSearchReturns(result, "single", "bulk1", "bulk2", "multi", "multibulk1", "multibulk2"); + assertThat(result.getAggregations().asList().size(), equalTo(1)); + Terms terms = (Terms) result.getAggregations().asList().get(0); + assertThat(terms.getBuckets().size(), equalTo(9)); + }); } private void init() throws IOException { @@ -174,8 +179,8 @@ private IndexRequestBuilder prepareIndex(String id, String... texts) throws IOEx return client().prepareIndex("test").setId(id).setSource("foo", texts); } - private SearchResponse searchById(String id) { - return prepareTokenCountFieldMapperSearch().setQuery(QueryBuilders.termQuery("_id", id)).get(); + private SearchRequestBuilder searchById(String id) { + return prepareTokenCountFieldMapperSearch().setQuery(QueryBuilders.termQuery("_id", id)); } private SearchRequestBuilder searchByNumericRange(int low, int high) { diff --git a/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ChildQuerySearchIT.java b/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ChildQuerySearchIT.java index 34ead2c21480b..cc9a3a1a248db 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ChildQuerySearchIT.java +++ b/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ChildQuerySearchIT.java @@ -806,8 +806,10 @@ public void testHasChildInnerHitsHighlighting() throws Exception { assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); SearchHit[] searchHits = response.getHits().getHits()[0].getInnerHits().get("child").getHits(); assertThat(searchHits.length, equalTo(1)); - assertThat(searchHits[0].getHighlightFields().get("c_field").getFragments().length, equalTo(1)); - assertThat(searchHits[0].getHighlightFields().get("c_field").getFragments()[0].string(), equalTo("foo bar")); + HighlightField highlightField1 = searchHits[0].getHighlightFields().get("c_field"); + assertThat(highlightField1.fragments().length, equalTo(1)); + HighlightField highlightField = searchHits[0].getHighlightFields().get("c_field"); + assertThat(highlightField.fragments()[0].string(), equalTo("foo bar")); } ); } @@ -1786,7 +1788,7 @@ public void testHighlightersIgnoreParentChild() throws IOException { assertHitCount(response, 1); assertThat(response.getHits().getAt(0).getId(), equalTo("parent-id")); HighlightField highlightField = response.getHits().getAt(0).getHighlightFields().get("searchText"); - assertThat(highlightField.getFragments()[0].string(), equalTo("quick brown fox")); + assertThat(highlightField.fragments()[0].string(), equalTo("quick brown fox")); } ); @@ -1799,7 +1801,7 @@ public void testHighlightersIgnoreParentChild() throws IOException { assertHitCount(response, 1); assertThat(response.getHits().getAt(0).getId(), equalTo("child-id")); HighlightField highlightField = response.getHits().getAt(0).getHighlightFields().get("searchText"); - assertThat(highlightField.getFragments()[0].string(), equalTo("quick brown fox")); + assertThat(highlightField.fragments()[0].string(), equalTo("quick brown fox")); } ); } diff --git a/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/InnerHitsIT.java b/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/InnerHitsIT.java index 39a84f2d16d7f..02eaacba0b1de 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/InnerHitsIT.java +++ b/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/InnerHitsIT.java @@ -26,6 +26,7 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; +import org.elasticsearch.search.fetch.subphase.highlight.HighlightField; import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.search.sort.SortOrder; @@ -183,10 +184,8 @@ public void testSimpleParentChild() throws Exception { response -> { SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comment"); assertThat(innerHits.getHits().length, equalTo(1)); - assertThat( - innerHits.getAt(0).getHighlightFields().get("message").getFragments()[0].string(), - equalTo("fox eat quick") - ); + HighlightField highlightField = innerHits.getAt(0).getHighlightFields().get("message"); + assertThat(highlightField.fragments()[0].string(), equalTo("fox eat quick")); assertThat(innerHits.getAt(0).getExplanation().toString(), containsString("weight(message:fox")); assertThat(innerHits.getAt(0).getFields().get("message").getValue().toString(), equalTo("fox eat quick")); assertThat(innerHits.getAt(0).getFields().get("script").getValue().toString(), equalTo("5")); diff --git a/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ParentChildTestCase.java b/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ParentChildTestCase.java index b4846a1c003a6..a67ebd4cbca22 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ParentChildTestCase.java +++ b/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ParentChildTestCase.java @@ -29,11 +29,6 @@ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE) public abstract class ParentChildTestCase extends ESIntegTestCase { - @Override - protected boolean ignoreExternalCluster() { - return true; - } - @Override protected Collection> nodePlugins() { return Arrays.asList(InternalSettingsPlugin.class, ParentJoinPlugin.class); diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregationBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregationBuilder.java index 57649129a638f..0de7b74759828 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregationBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregationBuilder.java @@ -24,7 +24,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; -import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry; import org.elasticsearch.search.aggregations.support.ValuesSourceType; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -175,11 +174,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return ValuesSourceRegistry.UNREGISTERED_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregationBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregationBuilder.java index d608efcba9b83..b130411e5e099 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregationBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregationBuilder.java @@ -24,7 +24,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; -import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry; import org.elasticsearch.search.aggregations.support.ValuesSourceType; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -178,11 +177,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return ValuesSourceRegistry.UNREGISTERED_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/modules/percolator/src/internalClusterTest/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java b/modules/percolator/src/internalClusterTest/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java index cad976411b8da..b00a73787b7c5 100644 --- a/modules/percolator/src/internalClusterTest/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java +++ b/modules/percolator/src/internalClusterTest/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.MultiSearchResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -58,6 +57,7 @@ import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xcontent.XContentFactory.yamlBuilder; @@ -101,52 +101,66 @@ public void testPercolatorQuery() throws Exception { BytesReference source = BytesReference.bytes(jsonBuilder().startObject().endObject()); logger.info("percolating empty doc"); - SearchResponse response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertResponse(prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)), response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + }); source = BytesReference.bytes(jsonBuilder().startObject().field("field1", "value").endObject()); logger.info("percolating doc with 1 field"); - response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)) - .addSort("id", SortOrder.ASC) - .get(); - assertHitCount(response, 2); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); - assertThat(response.getHits().getAt(1).getId(), equalTo("2")); - assertThat(response.getHits().getAt(1).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); + assertResponse( + prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).addSort("id", SortOrder.ASC), + response -> { + assertHitCount(response, 2); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(1).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); + } + ); source = BytesReference.bytes(jsonBuilder().startObject().field("field1", "value").field("field2", "value").endObject()); logger.info("percolating doc with 2 fields"); - response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)) - .addSort("id", SortOrder.ASC) - .get(); - assertHitCount(response, 3); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); - assertThat(response.getHits().getAt(1).getId(), equalTo("2")); - assertThat(response.getHits().getAt(1).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); - assertThat(response.getHits().getAt(2).getId(), equalTo("3")); - assertThat(response.getHits().getAt(2).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); - + assertResponse( + prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).addSort("id", SortOrder.ASC), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(1).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + assertThat(response.getHits().getAt(2).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); + } + ); logger.info("percolating doc with 2 fields"); - response = prepareSearch().setQuery( - new PercolateQueryBuilder( - "query", - Arrays.asList( - BytesReference.bytes(jsonBuilder().startObject().field("field1", "value").endObject()), - BytesReference.bytes(jsonBuilder().startObject().field("field1", "value").field("field2", "value").endObject()) - ), - XContentType.JSON - ) - ).addSort("id", SortOrder.ASC).get(); - assertHitCount(response, 3); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValues(), equalTo(Arrays.asList(0, 1))); - assertThat(response.getHits().getAt(1).getId(), equalTo("2")); - assertThat(response.getHits().getAt(1).getFields().get("_percolator_document_slot").getValues(), equalTo(Arrays.asList(0, 1))); - assertThat(response.getHits().getAt(2).getId(), equalTo("3")); - assertThat(response.getHits().getAt(2).getFields().get("_percolator_document_slot").getValues(), equalTo(Arrays.asList(1))); + assertResponse( + prepareSearch().setQuery( + new PercolateQueryBuilder( + "query", + Arrays.asList( + BytesReference.bytes(jsonBuilder().startObject().field("field1", "value").endObject()), + BytesReference.bytes(jsonBuilder().startObject().field("field1", "value").field("field2", "value").endObject()) + ), + XContentType.JSON + ) + ).addSort("id", SortOrder.ASC), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat( + response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValues(), + equalTo(Arrays.asList(0, 1)) + ); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat( + response.getHits().getAt(1).getFields().get("_percolator_document_slot").getValues(), + equalTo(Arrays.asList(0, 1)) + ); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + assertThat(response.getHits().getAt(2).getFields().get("_percolator_document_slot").getValues(), equalTo(Arrays.asList(1))); + } + ); } public void testPercolatorRangeQueries() throws Exception { @@ -236,46 +250,52 @@ public void testPercolatorRangeQueries() throws Exception { // Test long range: BytesReference source = BytesReference.bytes(jsonBuilder().startObject().field("field1", 12).endObject()); - SearchResponse response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); - logger.info("response={}", response); - assertHitCount(response, 2); - assertThat(response.getHits().getAt(0).getId(), equalTo("3")); - assertThat(response.getHits().getAt(1).getId(), equalTo("1")); + assertResponse(prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)), response -> { + logger.info("response={}", response); + assertHitCount(response, 2); + assertThat(response.getHits().getAt(0).getId(), equalTo("3")); + assertThat(response.getHits().getAt(1).getId(), equalTo("1")); + }); source = BytesReference.bytes(jsonBuilder().startObject().field("field1", 11).endObject()); - response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - + assertResponse(prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)), response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + }); // Test double range: source = BytesReference.bytes(jsonBuilder().startObject().field("field2", 12).endObject()); - response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); - assertHitCount(response, 2); - assertThat(response.getHits().getAt(0).getId(), equalTo("6")); - assertThat(response.getHits().getAt(1).getId(), equalTo("4")); + assertResponse(prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)), response -> { + assertHitCount(response, 2); + assertThat(response.getHits().getAt(0).getId(), equalTo("6")); + assertThat(response.getHits().getAt(1).getId(), equalTo("4")); + }); source = BytesReference.bytes(jsonBuilder().startObject().field("field2", 11).endObject()); - response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("4")); + assertResponse(prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)), response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("4")); + }); // Test IP range: source = BytesReference.bytes(jsonBuilder().startObject().field("field3", "192.168.1.5").endObject()); - response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); - assertHitCount(response, 2); - assertThat(response.getHits().getAt(0).getId(), equalTo("9")); - assertThat(response.getHits().getAt(1).getId(), equalTo("7")); + assertResponse(prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)), response -> { + assertHitCount(response, 2); + assertThat(response.getHits().getAt(0).getId(), equalTo("9")); + assertThat(response.getHits().getAt(1).getId(), equalTo("7")); + }); source = BytesReference.bytes(jsonBuilder().startObject().field("field3", "192.168.1.4").endObject()); - response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("7")); + assertResponse(prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)), response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("7")); + }); // Test date range: source = BytesReference.bytes(jsonBuilder().startObject().field("field4", "2016-05-15").endObject()); - response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("10")); + assertResponse(prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)), response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("10")); + }); } public void testPercolatorGeoQueries() throws Exception { @@ -323,13 +343,15 @@ public void testPercolatorGeoQueries() throws Exception { BytesReference source = BytesReference.bytes( jsonBuilder().startObject().startObject("field1").field("lat", 52.20).field("lon", 4.51).endObject().endObject() ); - SearchResponse response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)) - .addSort("id", SortOrder.ASC) - .get(); - assertHitCount(response, 3); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(1).getId(), equalTo("2")); - assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + assertResponse( + prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).addSort("id", SortOrder.ASC), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + } + ); } public void testPercolatorQueryExistingDocument() throws Exception { @@ -362,26 +384,31 @@ public void testPercolatorQueryExistingDocument() throws Exception { indicesAdmin().prepareRefresh().get(); logger.info("percolating empty doc"); - SearchResponse response = prepareSearch().setQuery(new PercolateQueryBuilder("query", "test", "1", null, null, null)).get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertResponse(prepareSearch().setQuery(new PercolateQueryBuilder("query", "test", "1", null, null, null)), response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + }); logger.info("percolating doc with 1 field"); - response = prepareSearch().setQuery(new PercolateQueryBuilder("query", "test", "5", null, null, null)) - .addSort("id", SortOrder.ASC) - .get(); - assertHitCount(response, 2); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertResponse( + prepareSearch().setQuery(new PercolateQueryBuilder("query", "test", "5", null, null, null)).addSort("id", SortOrder.ASC), + response -> { + assertHitCount(response, 2); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + } + ); logger.info("percolating doc with 2 fields"); - response = prepareSearch().setQuery(new PercolateQueryBuilder("query", "test", "6", null, null, null)) - .addSort("id", SortOrder.ASC) - .get(); - assertHitCount(response, 3); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(1).getId(), equalTo("2")); - assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + assertResponse( + prepareSearch().setQuery(new PercolateQueryBuilder("query", "test", "6", null, null, null)).addSort("id", SortOrder.ASC), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + } + ); } public void testPercolatorQueryExistingDocumentSourceDisabled() throws Exception { @@ -481,16 +508,18 @@ public void testPercolatorSpecificQueries() throws Exception { .field("field2", "the quick brown fox falls down into the well") .endObject() ); - SearchResponse response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)) - .addSort("id", SortOrder.ASC) - .get(); - assertHitCount(response, 3); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(0).getScore(), equalTo(Float.NaN)); - assertThat(response.getHits().getAt(1).getId(), equalTo("2")); - assertThat(response.getHits().getAt(1).getScore(), equalTo(Float.NaN)); - assertThat(response.getHits().getAt(2).getId(), equalTo("3")); - assertThat(response.getHits().getAt(2).getScore(), equalTo(Float.NaN)); + assertResponse( + prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).addSort("id", SortOrder.ASC), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(0).getScore(), equalTo(Float.NaN)); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(1).getScore(), equalTo(Float.NaN)); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + assertThat(response.getHits().getAt(2).getScore(), equalTo(Float.NaN)); + } + ); } public void testPercolatorQueryWithHighlighting() throws Exception { @@ -534,198 +563,225 @@ public void testPercolatorQueryWithHighlighting() throws Exception { BytesReference document = BytesReference.bytes( jsonBuilder().startObject().field("field1", "The quick brown fox jumps over the lazy dog").endObject() ); - SearchResponse searchResponse = prepareSearch().setQuery(new PercolateQueryBuilder("query", document, XContentType.JSON)) - .highlighter(new HighlightBuilder().field("field1")) - .addSort("id", SortOrder.ASC) - .get(); - assertHitCount(searchResponse, 5); - - assertThat( - searchResponse.getHits().getAt(0).getHighlightFields().get("field1").fragments()[0].string(), - equalTo("The quick brown fox jumps over the lazy dog") - ); - assertThat( - searchResponse.getHits().getAt(1).getHighlightFields().get("field1").fragments()[0].string(), - equalTo("The quick brown fox jumps over the lazy dog") - ); - assertThat( - searchResponse.getHits().getAt(2).getHighlightFields().get("field1").fragments()[0].string(), - equalTo("The quick brown fox jumps over the lazy dog") - ); - assertThat( - searchResponse.getHits().getAt(3).getHighlightFields().get("field1").fragments()[0].string(), - equalTo("The quick brown fox jumps over the lazy dog") - ); - assertThat( - searchResponse.getHits().getAt(4).getHighlightFields().get("field1").fragments()[0].string(), - equalTo("The quick brown fox jumps over the lazy dog") + assertResponse( + prepareSearch().setQuery(new PercolateQueryBuilder("query", document, XContentType.JSON)) + .highlighter(new HighlightBuilder().field("field1")) + .addSort("id", SortOrder.ASC), + searchResponse -> { + assertHitCount(searchResponse, 5); + + assertThat( + searchResponse.getHits().getAt(0).getHighlightFields().get("field1").fragments()[0].string(), + equalTo("The quick brown fox jumps over the lazy dog") + ); + assertThat( + searchResponse.getHits().getAt(1).getHighlightFields().get("field1").fragments()[0].string(), + equalTo("The quick brown fox jumps over the lazy dog") + ); + assertThat( + searchResponse.getHits().getAt(2).getHighlightFields().get("field1").fragments()[0].string(), + equalTo("The quick brown fox jumps over the lazy dog") + ); + assertThat( + searchResponse.getHits().getAt(3).getHighlightFields().get("field1").fragments()[0].string(), + equalTo("The quick brown fox jumps over the lazy dog") + ); + assertThat( + searchResponse.getHits().getAt(4).getHighlightFields().get("field1").fragments()[0].string(), + equalTo("The quick brown fox jumps over the lazy dog") + ); + } ); BytesReference document1 = BytesReference.bytes( jsonBuilder().startObject().field("field1", "The quick brown fox jumps").endObject() ); BytesReference document2 = BytesReference.bytes(jsonBuilder().startObject().field("field1", "over the lazy dog").endObject()); - searchResponse = prepareSearch().setQuery( - boolQuery().should(new PercolateQueryBuilder("query", document1, XContentType.JSON).setName("query1")) - .should(new PercolateQueryBuilder("query", document2, XContentType.JSON).setName("query2")) - ).highlighter(new HighlightBuilder().field("field1")).addSort("id", SortOrder.ASC).get(); - logger.info("searchResponse={}", searchResponse); - assertHitCount(searchResponse, 5); - - assertThat( - searchResponse.getHits().getAt(0).getHighlightFields().get("query1_field1").fragments()[0].string(), - equalTo("The quick brown fox jumps") - ); - assertThat( - searchResponse.getHits().getAt(1).getHighlightFields().get("query2_field1").fragments()[0].string(), - equalTo("over the lazy dog") - ); - assertThat( - searchResponse.getHits().getAt(2).getHighlightFields().get("query1_field1").fragments()[0].string(), - equalTo("The quick brown fox jumps") - ); - assertThat( - searchResponse.getHits().getAt(3).getHighlightFields().get("query2_field1").fragments()[0].string(), - equalTo("over the lazy dog") - ); - assertThat( - searchResponse.getHits().getAt(4).getHighlightFields().get("query1_field1").fragments()[0].string(), - equalTo("The quick brown fox jumps") - ); - - searchResponse = prepareSearch().setQuery( - new PercolateQueryBuilder( - "query", - Arrays.asList( - BytesReference.bytes(jsonBuilder().startObject().field("field1", "dog").endObject()), - BytesReference.bytes(jsonBuilder().startObject().field("field1", "fox").endObject()), - BytesReference.bytes(jsonBuilder().startObject().field("field1", "jumps").endObject()), - BytesReference.bytes(jsonBuilder().startObject().field("field1", "brown fox").endObject()) - ), - XContentType.JSON - ) - ).highlighter(new HighlightBuilder().field("field1")).addSort("id", SortOrder.ASC).get(); - assertHitCount(searchResponse, 5); - assertThat( - searchResponse.getHits().getAt(0).getFields().get("_percolator_document_slot").getValues(), - equalTo(Arrays.asList(1, 3)) - ); - assertThat(searchResponse.getHits().getAt(0).getHighlightFields().get("1_field1").fragments()[0].string(), equalTo("fox")); - assertThat( - searchResponse.getHits().getAt(0).getHighlightFields().get("3_field1").fragments()[0].string(), - equalTo("brown fox") - ); - assertThat( - searchResponse.getHits().getAt(1).getFields().get("_percolator_document_slot").getValues(), - equalTo(Collections.singletonList(0)) - ); - assertThat(searchResponse.getHits().getAt(1).getHighlightFields().get("0_field1").fragments()[0].string(), equalTo("dog")); - assertThat( - searchResponse.getHits().getAt(2).getFields().get("_percolator_document_slot").getValues(), - equalTo(Collections.singletonList(2)) - ); - assertThat( - searchResponse.getHits().getAt(2).getHighlightFields().get("2_field1").fragments()[0].string(), - equalTo("jumps") - ); - assertThat( - searchResponse.getHits().getAt(3).getFields().get("_percolator_document_slot").getValues(), - equalTo(Collections.singletonList(0)) - ); - assertThat(searchResponse.getHits().getAt(3).getHighlightFields().get("0_field1").fragments()[0].string(), equalTo("dog")); - assertThat( - searchResponse.getHits().getAt(4).getFields().get("_percolator_document_slot").getValues(), - equalTo(Arrays.asList(1, 3)) - ); - assertThat(searchResponse.getHits().getAt(4).getHighlightFields().get("1_field1").fragments()[0].string(), equalTo("fox")); - assertThat( - searchResponse.getHits().getAt(4).getHighlightFields().get("3_field1").fragments()[0].string(), - equalTo("brown fox") + assertResponse( + prepareSearch().setQuery( + boolQuery().should(new PercolateQueryBuilder("query", document1, XContentType.JSON).setName("query1")) + .should(new PercolateQueryBuilder("query", document2, XContentType.JSON).setName("query2")) + ).highlighter(new HighlightBuilder().field("field1")).addSort("id", SortOrder.ASC), + searchResponse -> { + logger.info("searchResponse={}", searchResponse); + assertHitCount(searchResponse, 5); + + assertThat( + searchResponse.getHits().getAt(0).getHighlightFields().get("query1_field1").fragments()[0].string(), + equalTo("The quick brown fox jumps") + ); + assertThat( + searchResponse.getHits().getAt(1).getHighlightFields().get("query2_field1").fragments()[0].string(), + equalTo("over the lazy dog") + ); + assertThat( + searchResponse.getHits().getAt(2).getHighlightFields().get("query1_field1").fragments()[0].string(), + equalTo("The quick brown fox jumps") + ); + assertThat( + searchResponse.getHits().getAt(3).getHighlightFields().get("query2_field1").fragments()[0].string(), + equalTo("over the lazy dog") + ); + assertThat( + searchResponse.getHits().getAt(4).getHighlightFields().get("query1_field1").fragments()[0].string(), + equalTo("The quick brown fox jumps") + ); + } ); - searchResponse = prepareSearch().setQuery( - boolQuery().should( + assertResponse( + prepareSearch().setQuery( new PercolateQueryBuilder( "query", Arrays.asList( BytesReference.bytes(jsonBuilder().startObject().field("field1", "dog").endObject()), - BytesReference.bytes(jsonBuilder().startObject().field("field1", "fox").endObject()) + BytesReference.bytes(jsonBuilder().startObject().field("field1", "fox").endObject()), + BytesReference.bytes(jsonBuilder().startObject().field("field1", "jumps").endObject()), + BytesReference.bytes(jsonBuilder().startObject().field("field1", "brown fox").endObject()) ), XContentType.JSON - ).setName("query1") - ) - .should( + ) + ).highlighter(new HighlightBuilder().field("field1")).addSort("id", SortOrder.ASC), + searchResponse -> { + assertHitCount(searchResponse, 5); + assertThat( + searchResponse.getHits().getAt(0).getFields().get("_percolator_document_slot").getValues(), + equalTo(Arrays.asList(1, 3)) + ); + assertThat( + searchResponse.getHits().getAt(0).getHighlightFields().get("1_field1").fragments()[0].string(), + equalTo("fox") + ); + assertThat( + searchResponse.getHits().getAt(0).getHighlightFields().get("3_field1").fragments()[0].string(), + equalTo("brown fox") + ); + assertThat( + searchResponse.getHits().getAt(1).getFields().get("_percolator_document_slot").getValues(), + equalTo(Collections.singletonList(0)) + ); + assertThat( + searchResponse.getHits().getAt(1).getHighlightFields().get("0_field1").fragments()[0].string(), + equalTo("dog") + ); + assertThat( + searchResponse.getHits().getAt(2).getFields().get("_percolator_document_slot").getValues(), + equalTo(Collections.singletonList(2)) + ); + assertThat( + searchResponse.getHits().getAt(2).getHighlightFields().get("2_field1").fragments()[0].string(), + equalTo("jumps") + ); + assertThat( + searchResponse.getHits().getAt(3).getFields().get("_percolator_document_slot").getValues(), + equalTo(Collections.singletonList(0)) + ); + assertThat( + searchResponse.getHits().getAt(3).getHighlightFields().get("0_field1").fragments()[0].string(), + equalTo("dog") + ); + assertThat( + searchResponse.getHits().getAt(4).getFields().get("_percolator_document_slot").getValues(), + equalTo(Arrays.asList(1, 3)) + ); + assertThat( + searchResponse.getHits().getAt(4).getHighlightFields().get("1_field1").fragments()[0].string(), + equalTo("fox") + ); + assertThat( + searchResponse.getHits().getAt(4).getHighlightFields().get("3_field1").fragments()[0].string(), + equalTo("brown fox") + ); + } + ); + + assertResponse( + prepareSearch().setQuery( + boolQuery().should( new PercolateQueryBuilder( "query", Arrays.asList( - BytesReference.bytes(jsonBuilder().startObject().field("field1", "jumps").endObject()), - BytesReference.bytes(jsonBuilder().startObject().field("field1", "brown fox").endObject()) + BytesReference.bytes(jsonBuilder().startObject().field("field1", "dog").endObject()), + BytesReference.bytes(jsonBuilder().startObject().field("field1", "fox").endObject()) ), XContentType.JSON - ).setName("query2") + ).setName("query1") ) - ).highlighter(new HighlightBuilder().field("field1")).addSort("id", SortOrder.ASC).get(); - logger.info("searchResponse={}", searchResponse); - assertHitCount(searchResponse, 5); - assertThat( - searchResponse.getHits().getAt(0).getFields().get("_percolator_document_slot_query1").getValues(), - equalTo(Collections.singletonList(1)) - ); - assertThat( - searchResponse.getHits().getAt(0).getFields().get("_percolator_document_slot_query2").getValues(), - equalTo(Collections.singletonList(1)) - ); - assertThat( - searchResponse.getHits().getAt(0).getHighlightFields().get("query1_1_field1").fragments()[0].string(), - equalTo("fox") - ); - assertThat( - searchResponse.getHits().getAt(0).getHighlightFields().get("query2_1_field1").fragments()[0].string(), - equalTo("brown fox") - ); - - assertThat( - searchResponse.getHits().getAt(1).getFields().get("_percolator_document_slot_query1").getValues(), - equalTo(Collections.singletonList(0)) - ); - assertThat( - searchResponse.getHits().getAt(1).getHighlightFields().get("query1_0_field1").fragments()[0].string(), - equalTo("dog") - ); - - assertThat( - searchResponse.getHits().getAt(2).getFields().get("_percolator_document_slot_query2").getValues(), - equalTo(Collections.singletonList(0)) - ); - assertThat( - searchResponse.getHits().getAt(2).getHighlightFields().get("query2_0_field1").fragments()[0].string(), - equalTo("jumps") - ); - - assertThat( - searchResponse.getHits().getAt(3).getFields().get("_percolator_document_slot_query1").getValues(), - equalTo(Collections.singletonList(0)) - ); - assertThat( - searchResponse.getHits().getAt(3).getHighlightFields().get("query1_0_field1").fragments()[0].string(), - equalTo("dog") - ); - - assertThat( - searchResponse.getHits().getAt(4).getFields().get("_percolator_document_slot_query1").getValues(), - equalTo(Collections.singletonList(1)) - ); - assertThat( - searchResponse.getHits().getAt(4).getFields().get("_percolator_document_slot_query2").getValues(), - equalTo(Collections.singletonList(1)) - ); - assertThat( - searchResponse.getHits().getAt(4).getHighlightFields().get("query1_1_field1").fragments()[0].string(), - equalTo("fox") - ); - assertThat( - searchResponse.getHits().getAt(4).getHighlightFields().get("query2_1_field1").fragments()[0].string(), - equalTo("brown fox") + .should( + new PercolateQueryBuilder( + "query", + Arrays.asList( + BytesReference.bytes(jsonBuilder().startObject().field("field1", "jumps").endObject()), + BytesReference.bytes(jsonBuilder().startObject().field("field1", "brown fox").endObject()) + ), + XContentType.JSON + ).setName("query2") + ) + ).highlighter(new HighlightBuilder().field("field1")).addSort("id", SortOrder.ASC), + searchResponse -> { + logger.info("searchResponse={}", searchResponse); + assertHitCount(searchResponse, 5); + assertThat( + searchResponse.getHits().getAt(0).getFields().get("_percolator_document_slot_query1").getValues(), + equalTo(Collections.singletonList(1)) + ); + assertThat( + searchResponse.getHits().getAt(0).getFields().get("_percolator_document_slot_query2").getValues(), + equalTo(Collections.singletonList(1)) + ); + assertThat( + searchResponse.getHits().getAt(0).getHighlightFields().get("query1_1_field1").fragments()[0].string(), + equalTo("fox") + ); + assertThat( + searchResponse.getHits().getAt(0).getHighlightFields().get("query2_1_field1").fragments()[0].string(), + equalTo("brown fox") + ); + + assertThat( + searchResponse.getHits().getAt(1).getFields().get("_percolator_document_slot_query1").getValues(), + equalTo(Collections.singletonList(0)) + ); + assertThat( + searchResponse.getHits().getAt(1).getHighlightFields().get("query1_0_field1").fragments()[0].string(), + equalTo("dog") + ); + + assertThat( + searchResponse.getHits().getAt(2).getFields().get("_percolator_document_slot_query2").getValues(), + equalTo(Collections.singletonList(0)) + ); + assertThat( + searchResponse.getHits().getAt(2).getHighlightFields().get("query2_0_field1").fragments()[0].string(), + equalTo("jumps") + ); + + assertThat( + searchResponse.getHits().getAt(3).getFields().get("_percolator_document_slot_query1").getValues(), + equalTo(Collections.singletonList(0)) + ); + assertThat( + searchResponse.getHits().getAt(3).getHighlightFields().get("query1_0_field1").fragments()[0].string(), + equalTo("dog") + ); + + assertThat( + searchResponse.getHits().getAt(4).getFields().get("_percolator_document_slot_query1").getValues(), + equalTo(Collections.singletonList(1)) + ); + assertThat( + searchResponse.getHits().getAt(4).getFields().get("_percolator_document_slot_query2").getValues(), + equalTo(Collections.singletonList(1)) + ); + assertThat( + searchResponse.getHits().getAt(4).getHighlightFields().get("query1_1_field1").fragments()[0].string(), + equalTo("fox") + ); + assertThat( + searchResponse.getHits().getAt(4).getHighlightFields().get("query2_1_field1").fragments()[0].string(), + equalTo("brown fox") + ); + } ); } @@ -743,11 +799,15 @@ public void testTakePositionOffsetGapIntoAccount() throws Exception { .get(); indicesAdmin().prepareRefresh().get(); - SearchResponse response = prepareSearch().setQuery( - new PercolateQueryBuilder("query", new BytesArray("{\"field\" : [\"brown\", \"fox\"]}"), XContentType.JSON) - ).get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("2")); + assertResponse( + prepareSearch().setQuery( + new PercolateQueryBuilder("query", new BytesArray("{\"field\" : [\"brown\", \"fox\"]}"), XContentType.JSON) + ), + response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("2")); + } + ); } public void testManyPercolatorFields() throws Exception { @@ -825,19 +885,24 @@ public void testWithMultiplePercolatorFields() throws Exception { indicesAdmin().prepareRefresh().get(); BytesReference source = BytesReference.bytes(jsonBuilder().startObject().field("field", "value").endObject()); - SearchResponse response = prepareSearch().setQuery(new PercolateQueryBuilder(queryFieldName, source, XContentType.JSON)) - .setIndices("test1") - .get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(0).getIndex(), equalTo("test1")); + assertResponse( + prepareSearch().setQuery(new PercolateQueryBuilder(queryFieldName, source, XContentType.JSON)).setIndices("test1"), + response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(0).getIndex(), equalTo("test1")); + } + ); - response = prepareSearch().setQuery(new PercolateQueryBuilder("object_field." + queryFieldName, source, XContentType.JSON)) - .setIndices("test2") - .get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(0).getIndex(), equalTo("test2")); + assertResponse( + prepareSearch().setQuery(new PercolateQueryBuilder("object_field." + queryFieldName, source, XContentType.JSON)) + .setIndices("test2"), + response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(0).getIndex(), equalTo("test2")); + } + ); // Unacceptable: DocumentParsingException e = expectThrows(DocumentParsingException.class, () -> { @@ -919,67 +984,10 @@ public void testPercolateQueryWithNestedDocuments() throws Exception { .get(); indicesAdmin().prepareRefresh().get(); - SearchResponse response = prepareSearch().setQuery( - new PercolateQueryBuilder( - "query", - BytesReference.bytes( - XContentFactory.jsonBuilder() - .startObject() - .field("companyname", "stark") - .startArray("employee") - .startObject() - .field("name", "virginia potts") - .endObject() - .startObject() - .field("name", "tony stark") - .endObject() - .endArray() - .endObject() - ), - XContentType.JSON - ) - ).addSort("id", SortOrder.ASC).get(); - assertHitCount(response, 2); - assertThat(response.getHits().getAt(0).getId(), equalTo("q1")); - assertThat(response.getHits().getAt(1).getId(), equalTo("q3")); - - response = prepareSearch().setQuery( - new PercolateQueryBuilder( - "query", - BytesReference.bytes( - XContentFactory.jsonBuilder() - .startObject() - .field("companyname", "notstark") - .startArray("employee") - .startObject() - .field("name", "virginia stark") - .endObject() - .startObject() - .field("name", "tony stark") - .endObject() - .endArray() - .endObject() - ), - XContentType.JSON - ) - ).addSort("id", SortOrder.ASC).get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("q3")); - - response = prepareSearch().setQuery( - new PercolateQueryBuilder( - "query", - BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("companyname", "notstark").endObject()), - XContentType.JSON - ) - ).addSort("id", SortOrder.ASC).get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("q3")); - - response = prepareSearch().setQuery( - new PercolateQueryBuilder( - "query", - Arrays.asList( + assertResponse( + prepareSearch().setQuery( + new PercolateQueryBuilder( + "query", BytesReference.bytes( XContentFactory.jsonBuilder() .startObject() @@ -994,40 +1002,119 @@ public void testPercolateQueryWithNestedDocuments() throws Exception { .endArray() .endObject() ), + XContentType.JSON + ) + ).addSort("id", SortOrder.ASC), + response -> { + assertHitCount(response, 2); + assertThat(response.getHits().getAt(0).getId(), equalTo("q1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("q3")); + } + ); + + assertResponse( + prepareSearch().setQuery( + new PercolateQueryBuilder( + "query", BytesReference.bytes( XContentFactory.jsonBuilder() .startObject() - .field("companyname", "stark") + .field("companyname", "notstark") .startArray("employee") .startObject() - .field("name", "peter parker") + .field("name", "virginia stark") .endObject() .startObject() - .field("name", "virginia potts") + .field("name", "tony stark") .endObject() .endArray() .endObject() ), - BytesReference.bytes( - XContentFactory.jsonBuilder() - .startObject() - .field("companyname", "stark") - .startArray("employee") - .startObject() - .field("name", "peter parker") - .endObject() - .endArray() - .endObject() - ) - ), - XContentType.JSON - ) - ).addSort("id", SortOrder.ASC).get(); - assertHitCount(response, 2); - assertThat(response.getHits().getAt(0).getId(), equalTo("q1")); - assertThat(response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValues(), equalTo(Arrays.asList(0, 1))); - assertThat(response.getHits().getAt(1).getId(), equalTo("q3")); - assertThat(response.getHits().getAt(1).getFields().get("_percolator_document_slot").getValues(), equalTo(Arrays.asList(0, 1, 2))); + XContentType.JSON + ) + ).addSort("id", SortOrder.ASC), + response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("q3")); + } + ); + + assertResponse( + prepareSearch().setQuery( + new PercolateQueryBuilder( + "query", + BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("companyname", "notstark").endObject()), + XContentType.JSON + ) + ).addSort("id", SortOrder.ASC), + response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("q3")); + } + ); + + assertResponse( + prepareSearch().setQuery( + new PercolateQueryBuilder( + "query", + Arrays.asList( + BytesReference.bytes( + XContentFactory.jsonBuilder() + .startObject() + .field("companyname", "stark") + .startArray("employee") + .startObject() + .field("name", "virginia potts") + .endObject() + .startObject() + .field("name", "tony stark") + .endObject() + .endArray() + .endObject() + ), + BytesReference.bytes( + XContentFactory.jsonBuilder() + .startObject() + .field("companyname", "stark") + .startArray("employee") + .startObject() + .field("name", "peter parker") + .endObject() + .startObject() + .field("name", "virginia potts") + .endObject() + .endArray() + .endObject() + ), + BytesReference.bytes( + XContentFactory.jsonBuilder() + .startObject() + .field("companyname", "stark") + .startArray("employee") + .startObject() + .field("name", "peter parker") + .endObject() + .endArray() + .endObject() + ) + ), + XContentType.JSON + ) + ).addSort("id", SortOrder.ASC), + response -> { + assertHitCount(response, 2); + assertThat(response.getHits().getAt(0).getId(), equalTo("q1")); + assertThat( + response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValues(), + equalTo(Arrays.asList(0, 1)) + ); + assertThat(response.getHits().getAt(1).getId(), equalTo("q3")); + assertThat( + response.getHits().getAt(1).getFields().get("_percolator_document_slot").getValues(), + equalTo(Arrays.asList(0, 1, 2)) + ); + } + ); } public void testPercolatorQueryViaMultiSearch() throws Exception { @@ -1153,10 +1240,11 @@ public void testDisallowExpensiveQueries() throws IOException { // Execute with search.allow_expensive_queries = null => default value = false => success BytesReference source = BytesReference.bytes(jsonBuilder().startObject().field("field1", "value").endObject()); - SearchResponse response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); + assertResponse(prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)), response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); + }); // Set search.allow_expensive_queries to "false" => assert failure updateClusterSettings(Settings.builder().put("search.allow_expensive_queries", false)); @@ -1173,10 +1261,11 @@ public void testDisallowExpensiveQueries() throws IOException { // Set search.allow_expensive_queries setting to "true" ==> success updateClusterSettings(Settings.builder().put("search.allow_expensive_queries", true)); - response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); + assertResponse(prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)), response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); + }); } finally { updateClusterSettings(Settings.builder().putNull("search.allow_expensive_queries")); } @@ -1200,35 +1289,40 @@ public void testWrappedWithConstantScore() throws Exception { indicesAdmin().prepareRefresh().get(); - SearchResponse response = prepareSearch("test").setQuery( - new PercolateQueryBuilder( - "q", - BytesReference.bytes(jsonBuilder().startObject().field("d", "2020-02-01T15:00:00.000+11:00").endObject()), - XContentType.JSON - ) - ).get(); - assertEquals(1, response.getHits().getTotalHits().value); - - response = prepareSearch("test").setQuery( - new PercolateQueryBuilder( - "q", - BytesReference.bytes(jsonBuilder().startObject().field("d", "2020-02-01T15:00:00.000+11:00").endObject()), - XContentType.JSON - ) - ).addSort("_doc", SortOrder.ASC).get(); - assertEquals(1, response.getHits().getTotalHits().value); + assertHitCount( + prepareSearch("test").setQuery( + new PercolateQueryBuilder( + "q", + BytesReference.bytes(jsonBuilder().startObject().field("d", "2020-02-01T15:00:00.000+11:00").endObject()), + XContentType.JSON + ) + ), + 1 + ); - response = prepareSearch("test").setQuery( - constantScoreQuery( + assertHitCount( + prepareSearch("test").setQuery( new PercolateQueryBuilder( "q", BytesReference.bytes(jsonBuilder().startObject().field("d", "2020-02-01T15:00:00.000+11:00").endObject()), XContentType.JSON ) - ) - ).get(); - assertEquals(1, response.getHits().getTotalHits().value); + ).addSort("_doc", SortOrder.ASC), + 1 + ); + assertHitCount( + prepareSearch("test").setQuery( + constantScoreQuery( + new PercolateQueryBuilder( + "q", + BytesReference.bytes(jsonBuilder().startObject().field("d", "2020-02-01T15:00:00.000+11:00").endObject()), + XContentType.JSON + ) + ) + ), + 1 + ); } public void testWithWildcardFieldNames() throws Exception { @@ -1262,41 +1356,49 @@ public void testWithWildcardFieldNames() throws Exception { .execute() .actionGet(); - SearchResponse response = prepareSearch("test").setQuery( - new PercolateQueryBuilder( - "q_simple", - BytesReference.bytes(jsonBuilder().startObject().field("text_1", "yada").endObject()), - XContentType.JSON - ) - ).get(); - assertEquals(1, response.getHits().getTotalHits().value); - - response = prepareSearch("test").setQuery( - new PercolateQueryBuilder( - "q_string", - BytesReference.bytes(jsonBuilder().startObject().field("text_1", "yada").endObject()), - XContentType.JSON - ) - ).get(); - assertEquals(1, response.getHits().getTotalHits().value); - - response = prepareSearch("test").setQuery( - new PercolateQueryBuilder( - "q_match", - BytesReference.bytes(jsonBuilder().startObject().field("text_1", "yada").endObject()), - XContentType.JSON - ) - ).get(); - assertEquals(1, response.getHits().getTotalHits().value); - - response = prepareSearch("test").setQuery( - new PercolateQueryBuilder( - "q_combo", - BytesReference.bytes(jsonBuilder().startObject().field("text_1", "yada").endObject()), - XContentType.JSON - ) - ).get(); - assertEquals(1, response.getHits().getTotalHits().value); + assertHitCount( + prepareSearch("test").setQuery( + new PercolateQueryBuilder( + "q_simple", + BytesReference.bytes(jsonBuilder().startObject().field("text_1", "yada").endObject()), + XContentType.JSON + ) + ), + 1 + ); + + assertHitCount( + prepareSearch("test").setQuery( + new PercolateQueryBuilder( + "q_string", + BytesReference.bytes(jsonBuilder().startObject().field("text_1", "yada").endObject()), + XContentType.JSON + ) + ), + 1 + ); + + assertHitCount( + prepareSearch("test").setQuery( + new PercolateQueryBuilder( + "q_match", + BytesReference.bytes(jsonBuilder().startObject().field("text_1", "yada").endObject()), + XContentType.JSON + ) + ), + 1 + ); + + assertHitCount( + prepareSearch("test").setQuery( + new PercolateQueryBuilder( + "q_combo", + BytesReference.bytes(jsonBuilder().startObject().field("text_1", "yada").endObject()), + XContentType.JSON + ) + ), + 1 + ); } public void testKnnQueryNotSupportedInPercolator() throws IOException { diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java index 9947d8a727d28..50284008eef48 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java @@ -56,11 +56,6 @@ public class ReindexDocumentationIT extends ESIntegTestCase { private static final Semaphore ALLOWED_OPERATIONS = new Semaphore(0); private static final String INDEX_NAME = "source_index"; - @Override - protected boolean ignoreExternalCluster() { - return true; - } - @Override protected Collection> nodePlugins() { return Arrays.asList(ReindexPlugin.class, ReindexCancellationPlugin.class); diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/BulkByScrollUsesAllScrollDocumentsAfterConflictsIntegTests.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/BulkByScrollUsesAllScrollDocumentsAfterConflictsIntegTests.java index d7f71fcc510ab..8cb7c14671845 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/BulkByScrollUsesAllScrollDocumentsAfterConflictsIntegTests.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/BulkByScrollUsesAllScrollDocumentsAfterConflictsIntegTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; @@ -53,6 +52,7 @@ import static org.elasticsearch.common.lucene.uid.Versions.MATCH_DELETED; import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -201,16 +201,18 @@ public void testDeleteByQuery() throws Exception { // Ensure that the write thread blocking task is currently executing barrier.await(); - final SearchResponse searchResponse = prepareSearch(sourceIndex).setSize(numDocs) // Get all indexed docs - .addSort(SORTING_FIELD, SortOrder.DESC) - .execute() - .actionGet(); - - // Modify a subset of the target documents concurrently - final List originalDocs = Arrays.asList(searchResponse.getHits().getHits()); int conflictingOps = randomIntBetween(maxDocs, numDocs); - final List docsModifiedConcurrently = randomSubsetOf(conflictingOps, originalDocs); - + final int finalConflictingOps = conflictingOps; + final List docsModifiedConcurrently = new ArrayList<>(); + assertResponse( + prepareSearch(sourceIndex).setSize(numDocs) // Get all indexed docs + .addSort(SORTING_FIELD, SortOrder.DESC), + response -> { + // Modify a subset of the target documents concurrently + final List originalDocs = Arrays.asList(response.getHits().getHits()); + docsModifiedConcurrently.addAll(randomSubsetOf(finalConflictingOps, originalDocs)); + } + ); BulkRequest conflictingUpdatesBulkRequest = new BulkRequest(); for (SearchHit searchHit : docsModifiedConcurrently) { if (scriptEnabled && searchHit.getSourceAsMap().containsKey(RETURN_NOOP_FIELD)) { diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java index 14647820e71f6..0c1a0e41206c7 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java @@ -396,27 +396,29 @@ private void migrateWithTemplatesV2(String prefix, SystemIndexDescriptor... desc ); client().execute(PutComponentTemplateAction.INSTANCE, new PutComponentTemplateAction.Request("a-ct").componentTemplate(ct)).get(); - ComposableIndexTemplate cit = new ComposableIndexTemplate( - Collections.singletonList(prefix + "*"), - new Template( - null, - new CompressedXContent( - "{\n" - + " \"dynamic\": false,\n" - + " \"properties\": {\n" - + " \"field2\": {\n" - + " \"type\": \"keyword\"\n" - + " }\n" - + " }\n" - + " }" - ), - null - ), - Collections.singletonList("a-ct"), - 4L, - 5L, - Collections.singletonMap("baz", "thud") - ); + ComposableIndexTemplate cit = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList(prefix + "*")) + .template( + new Template( + null, + new CompressedXContent( + "{\n" + + " \"dynamic\": false,\n" + + " \"properties\": {\n" + + " \"field2\": {\n" + + " \"type\": \"keyword\"\n" + + " }\n" + + " }\n" + + " }" + ), + null + ) + ) + .componentTemplates(Collections.singletonList("a-ct")) + .priority(4L) + .version(5L) + .metadata(Collections.singletonMap("baz", "thud")) + .build(); client().execute(PutComposableIndexTemplateAction.INSTANCE, new PutComposableIndexTemplateAction.Request("a-it").indexTemplate(cit)) .get(); diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java index 8878e988eb4fb..3c5a3eb2e40f9 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java @@ -126,6 +126,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { private PlainActionFuture listener; private String scrollId; private ThreadPool threadPool; + private ThreadPool clientThreadPool; private TaskManager taskManager; private BulkByScrollTask testTask; private WorkerBulkByScrollTaskState worker; @@ -154,16 +155,18 @@ public void setupForTest() { } private void setupClient(ThreadPool threadPool) { - if (client != null) { - client.close(); + if (clientThreadPool != null) { + terminate(clientThreadPool); } + clientThreadPool = threadPool; client = new MyMockClient(new NoOpClient(threadPool)); client.threadPool().getThreadContext().putHeader(expectedHeaders); } @After public void tearDownAndVerifyCommonStuff() throws Exception { - client.close(); + terminate(clientThreadPool); + clientThreadPool = null; terminate(threadPool); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java index 58bda3229cb42..406b815bdd44d 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java @@ -111,26 +111,30 @@ private void dotestBasicsWithRetry(int retries, int minFailures, int maxFailures } client.validateRequest(SearchAction.INSTANCE, (SearchRequest r) -> assertTrue(r.allowPartialSearchResults() == Boolean.FALSE)); SearchResponse searchResponse = createSearchResponse(); - client.respond(SearchAction.INSTANCE, searchResponse); - - for (int i = 0; i < randomIntBetween(1, 10); ++i) { - ScrollableHitSource.AsyncResponse asyncResponse = responses.poll(10, TimeUnit.SECONDS); - assertNotNull(asyncResponse); - assertEquals(responses.size(), 0); - assertSameHits(asyncResponse.response().getHits(), searchResponse.getHits().getHits()); - asyncResponse.done(TimeValue.ZERO); - - for (int retry = 0; retry < randomIntBetween(minFailures, maxFailures); ++retry) { - client.fail(SearchScrollAction.INSTANCE, new EsRejectedExecutionException()); - client.awaitOperation(); - ++expectedSearchRetries; + try { + client.respond(SearchAction.INSTANCE, searchResponse); + + for (int i = 0; i < randomIntBetween(1, 10); ++i) { + ScrollableHitSource.AsyncResponse asyncResponse = responses.poll(10, TimeUnit.SECONDS); + assertNotNull(asyncResponse); + assertEquals(responses.size(), 0); + assertSameHits(asyncResponse.response().getHits(), searchResponse.getHits().getHits()); + asyncResponse.done(TimeValue.ZERO); + + for (int retry = 0; retry < randomIntBetween(minFailures, maxFailures); ++retry) { + client.fail(SearchScrollAction.INSTANCE, new EsRejectedExecutionException()); + client.awaitOperation(); + ++expectedSearchRetries; + } + + searchResponse = createSearchResponse(); + client.respond(SearchScrollAction.INSTANCE, searchResponse); } - searchResponse = createSearchResponse(); - client.respond(SearchScrollAction.INSTANCE, searchResponse); + assertEquals(actualSearchRetries.get(), expectedSearchRetries); + } finally { + searchResponse.decRef(); } - - assertEquals(actualSearchRetries.get(), expectedSearchRetries); } public void testScrollKeepAlive() { @@ -267,9 +271,6 @@ public void val ((ExecuteRequest) executeRequest).validateRequest(action, validator); } - @Override - public void close() {} - public synchronized void awaitOperation() throws InterruptedException { if (executeRequest == null) { wait(10000); diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexIdTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexIdTests.java index 34db459539323..644787446547e 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexIdTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexIdTests.java @@ -59,9 +59,12 @@ private ClusterState stateWithTemplate(Settings.Builder settings) { Template template = new Template(settings.build(), null, null); if (randomBoolean()) { metadata.put("c", new ComponentTemplate(template, null, null)); - metadata.put("c", new ComposableIndexTemplate(List.of("dest_index"), null, List.of("c"), null, null, null)); + metadata.put( + "c", + ComposableIndexTemplate.builder().indexPatterns(List.of("dest_index")).componentTemplates(List.of("c")).build() + ); } else { - metadata.put("c", new ComposableIndexTemplate(List.of("dest_index"), template, null, null, null, null)); + metadata.put("c", ComposableIndexTemplate.builder().indexPatterns(List.of("dest_index")).template(template).build()); } return ClusterState.builder(ClusterState.EMPTY_STATE).metadata(metadata).build(); } diff --git a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java index 2cb4476f528b9..64f20453e1cee 100644 --- a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java +++ b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java @@ -105,7 +105,7 @@ protected void createRepository(String repoName) { private void ensureSasTokenPermissions() { final BlobStoreRepository repository = getRepository(); - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); repository.threadPool().generic().execute(ActionRunnable.wrap(future, l -> { final AzureBlobStore blobStore = (AzureBlobStore) repository.blobStore(); final AzureBlobServiceClient azureBlobServiceClient = blobStore.getService().client("default", LocationMode.PRIMARY_ONLY); @@ -136,7 +136,7 @@ public void testMultiBlockUpload() throws Exception { final BlobStoreRepository repo = getRepository(); // The configured threshold for this test suite is 1mb final int blobSize = ByteSizeUnit.MB.toIntBytes(2); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); repo.threadPool().generic().execute(ActionRunnable.run(future, () -> { final BlobContainer blobContainer = repo.blobStore().blobContainer(repo.basePath().add("large_write")); blobContainer.writeBlob( diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RequestRetryStats.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RequestRetryStats.java index ae2441c2e705d..b7c37c6d95fde 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RequestRetryStats.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RequestRetryStats.java @@ -24,6 +24,7 @@ * This class emit aws s3 metrics as logs until we have a proper apm integration */ public class S3RequestRetryStats { + public static final String MESSAGE_FIELD = "message"; private static final Logger logger = LogManager.getLogger(S3RequestRetryStats.class); @@ -65,7 +66,8 @@ private static long getCounter(TimingInfo info, AWSRequestMetrics.Field field) { public void emitMetrics() { if (logger.isDebugEnabled()) { - var metrics = Maps.newMapWithExpectedSize(3); + var metrics = Maps.newMapWithExpectedSize(4); + metrics.put(MESSAGE_FIELD, "S3 Request Retry Stats"); metrics.put("elasticsearch.metrics.s3.requests", requests.get()); metrics.put("elasticsearch.metrics.s3.exceptions", exceptions.get()); metrics.put("elasticsearch.metrics.s3.throttles", throttles.get()); diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java index 291cf84019cd1..25bba12db6952 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java @@ -9,6 +9,7 @@ package org.elasticsearch.repositories.s3; import com.amazonaws.ClientConfiguration; +import com.amazonaws.SDKGlobalConfiguration; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.AWSCredentialsProviderChain; @@ -320,6 +321,7 @@ static class CustomWebIdentityTokenCredentialsProvider implements AWSCredentials private STSAssumeRoleWithWebIdentitySessionCredentialsProvider credentialsProvider; private AWSSecurityTokenService stsClient; + private String stsRegion; CustomWebIdentityTokenCredentialsProvider( Environment environment, @@ -361,10 +363,24 @@ static class CustomWebIdentityTokenCredentialsProvider implements AWSCredentials ); AWSSecurityTokenServiceClientBuilder stsClientBuilder = AWSSecurityTokenServiceClient.builder(); - // Custom system property used for specifying a mocked version of the STS for testing - String customStsEndpoint = jvmEnvironment.getProperty("com.amazonaws.sdk.stsMetadataServiceEndpointOverride", STS_HOSTNAME); - // Set the region explicitly via the endpoint URL, so the AWS SDK doesn't make any guesses internally. - stsClientBuilder.withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(customStsEndpoint, null)); + // Check if we need to use regional STS endpoints + // https://docs.aws.amazon.com/sdkref/latest/guide/feature-sts-regionalized-endpoints.html + if ("regional".equalsIgnoreCase(systemEnvironment.getEnv("AWS_STS_REGIONAL_ENDPOINTS"))) { + // AWS_REGION should be injected by the EKS pod identity webhook: + // https://github.com/aws/amazon-eks-pod-identity-webhook/pull/41 + stsRegion = systemEnvironment.getEnv(SDKGlobalConfiguration.AWS_REGION_ENV_VAR); + if (stsRegion != null) { + stsClientBuilder.withRegion(stsRegion); + } else { + LOGGER.warn("Unable to use regional STS endpoints because the AWS_REGION environment variable is not set"); + } + } + if (stsRegion == null) { + // Custom system property used for specifying a mocked version of the STS for testing + String customStsEndpoint = jvmEnvironment.getProperty("com.amazonaws.sdk.stsMetadataServiceEndpointOverride", STS_HOSTNAME); + // Set the region explicitly via the endpoint URL, so the AWS SDK doesn't make any guesses internally. + stsClientBuilder.withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(customStsEndpoint, null)); + } stsClientBuilder.withCredentials(new AWSStaticCredentialsProvider(new AnonymousAWSCredentials())); stsClient = SocketAccess.doPrivileged(stsClientBuilder::build); try { @@ -383,6 +399,10 @@ boolean isActive() { return credentialsProvider != null; } + String getStsRegion() { + return stsRegion; + } + @Override public AWSCredentials getCredentials() { Objects.requireNonNull(credentialsProvider, "credentialsProvider is not set"); diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/CustomWebIdentityTokenCredentialsProviderTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/CustomWebIdentityTokenCredentialsProviderTests.java index 04c47bb9b55e6..cecb0cd147897 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/CustomWebIdentityTokenCredentialsProviderTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/CustomWebIdentityTokenCredentialsProviderTests.java @@ -22,6 +22,7 @@ import org.junit.Assert; import org.mockito.Mockito; +import java.io.IOException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.URLDecoder; @@ -42,6 +43,15 @@ public class CustomWebIdentityTokenCredentialsProviderTests extends ESTestCase { private static final String ROLE_ARN = "arn:aws:iam::123456789012:role/FederatedWebIdentityRole"; private static final String ROLE_NAME = "aws-sdk-java-1651084775908"; + private static Environment getEnvironment() throws IOException { + Path configDirectory = createTempDir("web-identity-token-test"); + Files.createDirectory(configDirectory.resolve("repository-s3")); + Files.writeString(configDirectory.resolve("repository-s3/aws-web-identity-token-file"), "YXdzLXdlYi1pZGVudGl0eS10b2tlbi1maWxl"); + Environment environment = Mockito.mock(Environment.class); + Mockito.when(environment.configFile()).thenReturn(configDirectory); + return environment; + } + @SuppressForbidden(reason = "HTTP server is used for testing") public void testCreateWebIdentityTokenCredentialsProvider() throws Exception { HttpServer httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress().getHostAddress(), 0), 0); @@ -88,11 +98,7 @@ public void testCreateWebIdentityTokenCredentialsProvider() throws Exception { }); httpServer.start(); - Path configDirectory = Files.createTempDirectory("web-identity-token-test"); - Files.createDirectory(configDirectory.resolve("repository-s3")); - Files.writeString(configDirectory.resolve("repository-s3/aws-web-identity-token-file"), "YXdzLXdlYi1pZGVudGl0eS10b2tlbi1maWxl"); - Environment environment = Mockito.mock(Environment.class); - Mockito.when(environment.configFile()).thenReturn(configDirectory); + Environment environment = getEnvironment(); // No region is set, but the SDK shouldn't fail because of that Map environmentVariables = Map.of( @@ -125,4 +131,32 @@ public void testCreateWebIdentityTokenCredentialsProvider() throws Exception { httpServer.stop(0); } } + + public void testSupportRegionalizedEndpoints() throws Exception { + Map environmentVariables = Map.of( + "AWS_WEB_IDENTITY_TOKEN_FILE", + "/var/run/secrets/eks.amazonaws.com/serviceaccount/token", + "AWS_ROLE_ARN", + ROLE_ARN, + "AWS_STS_REGIONAL_ENDPOINTS", + "regional", + "AWS_REGION", + "us-west-2" + ); + Map systemProperties = Map.of(); + + var webIdentityTokenCredentialsProvider = new S3Service.CustomWebIdentityTokenCredentialsProvider( + getEnvironment(), + environmentVariables::get, + systemProperties::getOrDefault, + Clock.systemUTC() + ); + // We can't verify that webIdentityTokenCredentialsProvider's STS client uses the "https://sts.us-west-2.amazonaws.com" + // endpoint in a unit test. The client depends on hardcoded RegionalEndpointsOptionResolver that in turn depends + // on the system environment that we can't change in the test. So we just verify we that we called `withRegion` + // on stsClientBuilder which should internally correctly configure the endpoint when the STS client is built. + assertEquals("us-west-2", webIdentityTokenCredentialsProvider.getStsRegion()); + + webIdentityTokenCredentialsProvider.shutdown(); + } } diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/ESNetty4IntegTestCase.java b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/ESNetty4IntegTestCase.java index 09c6b3d50a380..c996f55198bf6 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/ESNetty4IntegTestCase.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/ESNetty4IntegTestCase.java @@ -19,11 +19,6 @@ public abstract class ESNetty4IntegTestCase extends ESIntegTestCase { - @Override - protected boolean ignoreExternalCluster() { - return true; - } - @Override protected boolean addMockTransportService() { return false; diff --git a/plugins/analysis-icu/src/internalClusterTest/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperIT.java b/plugins/analysis-icu/src/internalClusterTest/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperIT.java index e6f91efad0162..ae6a0cc71789f 100644 --- a/plugins/analysis-icu/src/internalClusterTest/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperIT.java +++ b/plugins/analysis-icu/src/internalClusterTest/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperIT.java @@ -12,7 +12,6 @@ import com.ibm.icu.util.ULocale; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugin.analysis.icu.AnalysisICUPlugin; import org.elasticsearch.plugins.Plugin; @@ -31,6 +30,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; public class ICUCollationKeywordFieldMapperIT extends ESIntegTestCase { @@ -82,10 +82,11 @@ public void testBasicUsage() throws Exception { .sort("id", SortOrder.DESC) // secondary sort should kick in because both will collate to same value ); - SearchResponse response = client().search(request).actionGet(); - assertNoFailures(response); - assertHitCount(response, 2L); - assertOrderedSearchHits(response, "2", "1"); + assertResponse(client().search(request), response -> { + assertNoFailures(response); + assertHitCount(response, 2L); + assertOrderedSearchHits(response, "2", "1"); + }); } public void testMultipleValues() throws Exception { @@ -126,10 +127,11 @@ public void testMultipleValues() throws Exception { .sort("id", SortOrder.DESC) // will be ignored ); - SearchResponse response = client().search(request).actionGet(); - assertNoFailures(response); - assertHitCount(response, 2L); - assertOrderedSearchHits(response, "1", "2"); + assertResponse(client().search(request), response -> { + assertNoFailures(response); + assertHitCount(response, 2L); + assertOrderedSearchHits(response, "1", "2"); + }); // same thing, using different sort mode that will use a for both docs request = new SearchRequest().indices(index) @@ -141,10 +143,11 @@ public void testMultipleValues() throws Exception { .sort("id", SortOrder.DESC) // will NOT be ignored and will determine order ); - response = client().search(request).actionGet(); - assertNoFailures(response); - assertHitCount(response, 2L); - assertOrderedSearchHits(response, "2", "1"); + assertResponse(client().search(request), response -> { + assertNoFailures(response); + assertHitCount(response, 2L); + assertOrderedSearchHits(response, "2", "1"); + }); } /* @@ -186,10 +189,11 @@ public void testNormalization() throws Exception { .sort("id", SortOrder.DESC) // secondary sort should kick in because both will collate to same value ); - SearchResponse response = client().search(request).actionGet(); - assertNoFailures(response); - assertHitCount(response, 2L); - assertOrderedSearchHits(response, "2", "1"); + assertResponse(client().search(request), response -> { + assertNoFailures(response); + assertHitCount(response, 2L); + assertOrderedSearchHits(response, "2", "1"); + }); } /* @@ -230,10 +234,11 @@ public void testSecondaryStrength() throws Exception { .sort("id", SortOrder.DESC) // secondary sort should kick in because both will collate to same value ); - SearchResponse response = client().search(request).actionGet(); - assertNoFailures(response); - assertHitCount(response, 2L); - assertOrderedSearchHits(response, "2", "1"); + assertResponse(client().search(request), response -> { + assertNoFailures(response); + assertHitCount(response, 2L); + assertOrderedSearchHits(response, "2", "1"); + }); } /* @@ -275,10 +280,11 @@ public void testIgnorePunctuation() throws Exception { .sort("id", SortOrder.DESC) // secondary sort should kick in because both will collate to same value ); - SearchResponse response = client().search(request).actionGet(); - assertNoFailures(response); - assertHitCount(response, 2L); - assertOrderedSearchHits(response, "2", "1"); + assertResponse(client().search(request), response -> { + assertNoFailures(response); + assertHitCount(response, 2L); + assertOrderedSearchHits(response, "2", "1"); + }); } /* @@ -321,10 +327,11 @@ public void testIgnoreWhitespace() throws Exception { .sort("id", SortOrder.ASC) ); - SearchResponse response = client().search(request).actionGet(); - assertNoFailures(response); - assertHitCount(response, 3L); - assertOrderedSearchHits(response, "3", "1", "2"); + assertResponse(client().search(request), response -> { + assertNoFailures(response); + assertHitCount(response, 3L); + assertOrderedSearchHits(response, "3", "1", "2"); + }); } /* @@ -354,10 +361,11 @@ public void testNumerics() throws Exception { SearchRequest request = new SearchRequest().indices(index) .source(new SearchSourceBuilder().fetchSource(false).sort("collate", SortOrder.ASC)); - SearchResponse response = client().search(request).actionGet(); - assertNoFailures(response); - assertHitCount(response, 2L); - assertOrderedSearchHits(response, "2", "1"); + assertResponse(client().search(request), response -> { + assertNoFailures(response); + assertHitCount(response, 2L); + assertOrderedSearchHits(response, "2", "1"); + }); } /* @@ -393,10 +401,11 @@ public void testIgnoreAccentsButNotCase() throws Exception { SearchRequest request = new SearchRequest().indices(index) .source(new SearchSourceBuilder().fetchSource(false).sort("collate", SortOrder.ASC).sort("id", SortOrder.DESC)); - SearchResponse response = client().search(request).actionGet(); - assertNoFailures(response); - assertHitCount(response, 4L); - assertOrderedSearchHits(response, "3", "1", "4", "2"); + assertResponse(client().search(request), response -> { + assertNoFailures(response); + assertHitCount(response, 4L); + assertOrderedSearchHits(response, "3", "1", "4", "2"); + }); } /* @@ -429,10 +438,11 @@ public void testUpperCaseFirst() throws Exception { SearchRequest request = new SearchRequest().indices(index) .source(new SearchSourceBuilder().fetchSource(false).sort("collate", SortOrder.ASC)); - SearchResponse response = client().search(request).actionGet(); - assertNoFailures(response); - assertHitCount(response, 2L); - assertOrderedSearchHits(response, "2", "1"); + assertResponse(client().search(request), response -> { + assertNoFailures(response); + assertHitCount(response, 2L); + assertOrderedSearchHits(response, "2", "1"); + }); } /* @@ -482,9 +492,10 @@ public void testCustomRules() throws Exception { .sort("id", SortOrder.DESC) // secondary sort should kick in because both will collate to same value ); - SearchResponse response = client().search(request).actionGet(); - assertNoFailures(response); - assertHitCount(response, 2L); - assertOrderedSearchHits(response, "2", "1"); + assertResponse(client().search(request), response -> { + assertNoFailures(response); + assertHitCount(response, 2L); + assertOrderedSearchHits(response, "2", "1"); + }); } } diff --git a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextHighlighter.java b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextHighlighter.java index 45c2a9208b8d6..9b7c6afbb9f10 100644 --- a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextHighlighter.java +++ b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextHighlighter.java @@ -56,7 +56,7 @@ protected Analyzer wrapAnalyzer(Analyzer analyzer, Integer maxAnalyzedOffset) { } @Override - protected PassageFormatter getPassageFormatter(HitContext hitContext, SearchHighlightContext.Field field, Encoder encoder) { + protected PassageFormatter getPassageFormatter(SearchHighlightContext.Field field, Encoder encoder) { return new AnnotatedPassageFormatter(encoder); } diff --git a/plugins/mapper-size/src/internalClusterTest/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java b/plugins/mapper-size/src/internalClusterTest/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java index e92c7ca4bdebb..026dabd64eb0b 100644 --- a/plugins/mapper-size/src/internalClusterTest/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java +++ b/plugins/mapper-size/src/internalClusterTest/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java @@ -9,7 +9,6 @@ import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.plugin.mapper.MapperSizePlugin; import org.elasticsearch.plugins.Plugin; @@ -24,6 +23,7 @@ import java.util.Map; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.is; @@ -110,43 +110,64 @@ public void testGetWithFields() throws Exception { assertAcked(prepareCreate("test").setMapping("_size", "enabled=true")); final String source = "{\"f\":\"" + randomAlphaOfLengthBetween(1, 100) + "\"}"; indexRandom(true, client().prepareIndex("test").setId("1").setSource(source, XContentType.JSON)); - SearchResponse searchResponse = prepareSearch("test").addFetchField("_size").get(); - assertEquals(source.length(), ((Long) searchResponse.getHits().getHits()[0].getFields().get("_size").getValue()).intValue()); + assertResponse( + prepareSearch("test").addFetchField("_size"), + response -> assertEquals( + source.length(), + ((Long) response.getHits().getHits()[0].getFields().get("_size").getValue()).intValue() + ) + ); // this should not work when requesting fields via wildcard expression - searchResponse = prepareSearch("test").addFetchField("*").get(); - assertNull(searchResponse.getHits().getHits()[0].getFields().get("_size")); + assertResponse( + prepareSearch("test").addFetchField("*"), + response -> assertNull(response.getHits().getHits()[0].getFields().get("_size")) + ); // This should STILL work - searchResponse = prepareSearch("test").addStoredField("*").get(); - assertNotNull(searchResponse.getHits().getHits()[0].getFields().get("_size")); + assertResponse( + prepareSearch("test").addStoredField("*"), + response -> assertNotNull(response.getHits().getHits()[0].getFields().get("_size")) + ); } public void testWildCardWithFieldsWhenDisabled() throws Exception { assertAcked(prepareCreate("test").setMapping("_size", "enabled=false")); final String source = "{\"f\":\"" + randomAlphaOfLengthBetween(1, 100) + "\"}"; indexRandom(true, client().prepareIndex("test").setId("1").setSource(source, XContentType.JSON)); - SearchResponse searchResponse = prepareSearch("test").addFetchField("_size").get(); - assertNull(searchResponse.getHits().getHits()[0].getFields().get("_size")); + assertResponse( + prepareSearch("test").addFetchField("_size"), + response -> assertNull(response.getHits().getHits()[0].getFields().get("_size")) + ); - searchResponse = prepareSearch("test").addFetchField("*").get(); - assertNull(searchResponse.getHits().getHits()[0].getFields().get("_size")); + assertResponse( + prepareSearch("test").addFetchField("*"), + response -> assertNull(response.getHits().getHits()[0].getFields().get("_size")) + ); - searchResponse = prepareSearch("test").addStoredField("*").get(); - assertNull(searchResponse.getHits().getHits()[0].getFields().get("_size")); + assertResponse( + prepareSearch("test").addStoredField("*"), + response -> assertNull(response.getHits().getHits()[0].getFields().get("_size")) + ); } public void testWildCardWithFieldsWhenNotProvided() throws Exception { assertAcked(prepareCreate("test")); final String source = "{\"f\":\"" + randomAlphaOfLengthBetween(1, 100) + "\"}"; indexRandom(true, client().prepareIndex("test").setId("1").setSource(source, XContentType.JSON)); - SearchResponse searchResponse = prepareSearch("test").addFetchField("_size").get(); - assertNull(searchResponse.getHits().getHits()[0].getFields().get("_size")); + assertResponse( + prepareSearch("test").addFetchField("_size"), + response -> assertNull(response.getHits().getHits()[0].getFields().get("_size")) + ); - searchResponse = prepareSearch("test").addFetchField("*").get(); - assertNull(searchResponse.getHits().getHits()[0].getFields().get("_size")); + assertResponse( + prepareSearch("test").addFetchField("*"), + response -> assertNull(response.getHits().getHits()[0].getFields().get("_size")) + ); - searchResponse = prepareSearch("test").addStoredField("*").get(); - assertNull(searchResponse.getHits().getHits()[0].getFields().get("_size")); + assertResponse( + prepareSearch("test").addStoredField("*"), + response -> assertNull(response.getHits().getHits()[0].getFields().get("_size")) + ); } } diff --git a/plugins/store-smb/src/internalClusterTest/java/org/elasticsearch/index/store/smb/AbstractAzureFsTestCase.java b/plugins/store-smb/src/internalClusterTest/java/org/elasticsearch/index/store/smb/AbstractAzureFsTestCase.java index 4d1f6426821c4..4a35779a42166 100644 --- a/plugins/store-smb/src/internalClusterTest/java/org/elasticsearch/index/store/smb/AbstractAzureFsTestCase.java +++ b/plugins/store-smb/src/internalClusterTest/java/org/elasticsearch/index/store/smb/AbstractAzureFsTestCase.java @@ -8,7 +8,6 @@ package org.elasticsearch.index.store.smb; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.plugin.store.smb.SMBStorePlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; @@ -16,7 +15,7 @@ import java.util.Arrays; import java.util.Collection; -import static org.hamcrest.Matchers.is; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; public abstract class AbstractAzureFsTestCase extends ESIntegTestCase { @Override @@ -32,7 +31,6 @@ public void testAzureFs() { indexDoc("test", "" + i, "foo", "bar"); } refresh(); - SearchResponse response = prepareSearch("test").get(); - assertThat(response.getHits().getTotalHits().value, is(nbDocs)); + assertHitCount(prepareSearch("test"), nbDocs); } } diff --git a/qa/ccs-rolling-upgrade-remote-cluster/src/test/java/org/elasticsearch/upgrades/SearchStatesIT.java b/qa/ccs-rolling-upgrade-remote-cluster/src/test/java/org/elasticsearch/upgrades/SearchStatesIT.java index 1bb2116cc680a..63860c6355630 100644 --- a/qa/ccs-rolling-upgrade-remote-cluster/src/test/java/org/elasticsearch/upgrades/SearchStatesIT.java +++ b/qa/ccs-rolling-upgrade-remote-cluster/src/test/java/org/elasticsearch/upgrades/SearchStatesIT.java @@ -175,8 +175,12 @@ void verifySearch(String localIndex, int localNumDocs, String remoteIndex, int r ) ) { SearchResponse searchResponse = SearchResponse.fromXContent(parser); - ElasticsearchAssertions.assertNoFailures(searchResponse); - ElasticsearchAssertions.assertHitCount(searchResponse, expectedDocs); + try { + ElasticsearchAssertions.assertNoFailures(searchResponse); + ElasticsearchAssertions.assertHitCount(searchResponse, expectedDocs); + } finally { + searchResponse.decRef(); + } } } catch (IOException e) { throw new UncheckedIOException(e); diff --git a/qa/ccs-unavailable-clusters/src/javaRestTest/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java b/qa/ccs-unavailable-clusters/src/javaRestTest/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java index 3279777c793ba..c93ed4e39829e 100644 --- a/qa/ccs-unavailable-clusters/src/javaRestTest/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java +++ b/qa/ccs-unavailable-clusters/src/javaRestTest/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java @@ -12,26 +12,20 @@ import org.apache.http.entity.ContentType; import org.apache.http.nio.entity.NStringEntity; import org.apache.lucene.search.TotalHits; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.TransportVersion; import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.action.search.SearchShardsAction; import org.elasticsearch.action.search.SearchShardsRequest; import org.elasticsearch.action.search.SearchShardsResponse; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.client.Request; -import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; -import org.elasticsearch.client.RestClient; -import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -45,13 +39,12 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.ObjectPath; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; -import org.junit.AfterClass; -import org.junit.Before; import java.io.IOException; import java.util.Collections; @@ -62,28 +55,13 @@ import java.util.concurrent.TimeUnit; import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.equalTo; @SuppressWarnings("removal") public class CrossClusterSearchUnavailableClusterIT extends ESRestTestCase { - private static RestHighLevelClient restHighLevelClient; - private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); - @Before - public void initHighLevelClient() throws IOException { - super.initClient(); - if (restHighLevelClient == null) { - restHighLevelClient = new HighLevelClient(client()); - } - } - - @AfterClass - public static void cleanupClient() throws IOException { - restHighLevelClient.close(); - restHighLevelClient = null; - } - @Override public void tearDown() throws Exception { super.tearDown(); @@ -176,57 +154,74 @@ public void testSearchSkipUnavailable() throws IOException { updateRemoteClusterSettings(Collections.singletonMap("seeds", remoteNode.getAddress().toString())); for (int i = 0; i < 10; i++) { - restHighLevelClient.index(new IndexRequest("index").id(String.valueOf(i)).source("field", "value"), RequestOptions.DEFAULT); + Request request = new Request("POST", "/index/_doc"); + request.setJsonEntity("{ \"field\" : \"value\" }"); + Response response = client().performRequest(request); + assertEquals(201, response.getStatusLine().getStatusCode()); } Response refreshResponse = client().performRequest(new Request("POST", "/index/_refresh")); assertEquals(200, refreshResponse.getStatusLine().getStatusCode()); { - SearchResponse response = restHighLevelClient.search(new SearchRequest("index"), RequestOptions.DEFAULT); - assertSame(SearchResponse.Clusters.EMPTY, response.getClusters()); - assertEquals(10, response.getHits().getTotalHits().value); - assertEquals(10, response.getHits().getHits().length); + Response response = client().performRequest(new Request("GET", "/index/_search")); + assertEquals(200, response.getStatusLine().getStatusCode()); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + assertNull(objectPath.evaluate("_clusters")); + assertThat(objectPath.evaluate("hits.total.value"), equalTo(10)); + assertThat(objectPath.evaluateArraySize("hits.hits"), equalTo(10)); } { - SearchResponse response = restHighLevelClient.search(new SearchRequest("index", "remote1:index"), RequestOptions.DEFAULT); - assertEquals(2, response.getClusters().getTotal()); - assertEquals(2, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.RUNNING)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.FAILED)); - assertEquals(10, response.getHits().getTotalHits().value); - assertEquals(10, response.getHits().getHits().length); + Response response = client().performRequest(new Request("GET", "/index,remote1:index/_search")); + assertEquals(200, response.getStatusLine().getStatusCode()); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + assertNotNull(objectPath.evaluate("_clusters")); + assertThat(objectPath.evaluate("_clusters.total"), equalTo(2)); + assertThat(objectPath.evaluate("_clusters.successful"), equalTo(2)); + assertThat(objectPath.evaluate("_clusters.skipped"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.running"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.partial"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.failed"), equalTo(0)); + assertThat(objectPath.evaluate("hits.total.value"), equalTo(10)); + assertThat(objectPath.evaluateArraySize("hits.hits"), equalTo(10)); } { - SearchResponse response = restHighLevelClient.search(new SearchRequest("remote1:index"), RequestOptions.DEFAULT); - assertEquals(1, response.getClusters().getTotal()); - assertEquals(1, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.RUNNING)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.FAILED)); - assertEquals(0, response.getHits().getTotalHits().value); + Response response = client().performRequest(new Request("GET", "/remote1:index/_search")); + assertEquals(200, response.getStatusLine().getStatusCode()); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + assertNotNull(objectPath.evaluate("_clusters")); + assertThat(objectPath.evaluate("_clusters.total"), equalTo(1)); + assertThat(objectPath.evaluate("_clusters.successful"), equalTo(1)); + assertThat(objectPath.evaluate("_clusters.skipped"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.running"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.partial"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.failed"), equalTo(0)); + assertThat(objectPath.evaluate("hits.total.value"), equalTo(0)); + assertThat(objectPath.evaluateArraySize("hits.hits"), equalTo(0)); } { - SearchResponse response = restHighLevelClient.search( - new SearchRequest("index", "remote1:index").scroll("1m"), - RequestOptions.DEFAULT - ); - assertEquals(2, response.getClusters().getTotal()); - assertEquals(2, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.RUNNING)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.FAILED)); - assertEquals(10, response.getHits().getTotalHits().value); - assertEquals(10, response.getHits().getHits().length); - String scrollId = response.getScrollId(); - SearchResponse scrollResponse = restHighLevelClient.scroll(new SearchScrollRequest(scrollId), RequestOptions.DEFAULT); - assertSame(SearchResponse.Clusters.EMPTY, scrollResponse.getClusters()); - assertEquals(10, scrollResponse.getHits().getTotalHits().value); - assertEquals(0, scrollResponse.getHits().getHits().length); + Response response = client().performRequest(new Request("GET", "/index,remote1:index/_search?scroll=1m")); + assertEquals(200, response.getStatusLine().getStatusCode()); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + assertNotNull(objectPath.evaluate("_clusters")); + assertThat(objectPath.evaluate("_clusters.total"), equalTo(2)); + assertThat(objectPath.evaluate("_clusters.successful"), equalTo(2)); + assertThat(objectPath.evaluate("_clusters.skipped"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.running"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.partial"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.failed"), equalTo(0)); + assertThat(objectPath.evaluate("hits.total.value"), equalTo(10)); + assertThat(objectPath.evaluateArraySize("hits.hits"), equalTo(10)); + String scrollId = objectPath.evaluate("_scroll_id"); + assertNotNull(scrollId); + Request scrollRequest = new Request("POST", "/_search/scroll"); + scrollRequest.setJsonEntity("{ \"scroll_id\" : \"" + scrollId + "\" }"); + Response scrollResponse = client().performRequest(scrollRequest); + assertEquals(200, scrollResponse.getStatusLine().getStatusCode()); + ObjectPath scrollObjectPath = ObjectPath.createFromResponse(scrollResponse); + assertNull(scrollObjectPath.evaluate("_clusters")); + assertThat(scrollObjectPath.evaluate("hits.total.value"), equalTo(10)); + assertThat(scrollObjectPath.evaluateArraySize("hits.hits"), equalTo(0)); } remoteTransport.close(); @@ -234,45 +229,57 @@ public void testSearchSkipUnavailable() throws IOException { updateRemoteClusterSettings(Collections.singletonMap("skip_unavailable", true)); { - SearchResponse response = restHighLevelClient.search(new SearchRequest("index", "remote1:index"), RequestOptions.DEFAULT); - assertEquals(2, response.getClusters().getTotal()); - assertEquals(1, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertEquals(1, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.RUNNING)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.FAILED)); - assertEquals(10, response.getHits().getTotalHits().value); - assertEquals(10, response.getHits().getHits().length); + Response response = client().performRequest(new Request("GET", "/index,remote1:index/_search")); + assertEquals(200, response.getStatusLine().getStatusCode()); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + assertNotNull(objectPath.evaluate("_clusters")); + assertThat(objectPath.evaluate("_clusters.total"), equalTo(2)); + assertThat(objectPath.evaluate("_clusters.successful"), equalTo(1)); + assertThat(objectPath.evaluate("_clusters.skipped"), equalTo(1)); + assertThat(objectPath.evaluate("_clusters.running"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.partial"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.failed"), equalTo(0)); + assertThat(objectPath.evaluate("hits.total.value"), equalTo(10)); + assertThat(objectPath.evaluateArraySize("hits.hits"), equalTo(10)); } { - SearchResponse response = restHighLevelClient.search(new SearchRequest("remote1:index"), RequestOptions.DEFAULT); - assertEquals(1, response.getClusters().getTotal()); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertEquals(1, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.RUNNING)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.FAILED)); - assertEquals(0, response.getHits().getTotalHits().value); + Response response = client().performRequest(new Request("GET", "/remote1:index/_search")); + assertEquals(200, response.getStatusLine().getStatusCode()); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + assertNotNull(objectPath.evaluate("_clusters")); + assertThat(objectPath.evaluate("_clusters.total"), equalTo(1)); + assertThat(objectPath.evaluate("_clusters.successful"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.skipped"), equalTo(1)); + assertThat(objectPath.evaluate("_clusters.running"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.partial"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.failed"), equalTo(0)); + assertThat(objectPath.evaluate("hits.total.value"), equalTo(0)); + assertThat(objectPath.evaluateArraySize("hits.hits"), equalTo(0)); } { - SearchResponse response = restHighLevelClient.search( - new SearchRequest("index", "remote1:index").scroll("1m"), - RequestOptions.DEFAULT - ); - assertEquals(2, response.getClusters().getTotal()); - assertEquals(1, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertEquals(1, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.RUNNING)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.FAILED)); - assertEquals(10, response.getHits().getTotalHits().value); - assertEquals(10, response.getHits().getHits().length); - String scrollId = response.getScrollId(); - SearchResponse scrollResponse = restHighLevelClient.scroll(new SearchScrollRequest(scrollId), RequestOptions.DEFAULT); - assertSame(SearchResponse.Clusters.EMPTY, scrollResponse.getClusters()); - assertEquals(10, scrollResponse.getHits().getTotalHits().value); - assertEquals(0, scrollResponse.getHits().getHits().length); + Response response = client().performRequest(new Request("GET", "/index,remote1:index/_search?scroll=1m")); + assertEquals(200, response.getStatusLine().getStatusCode()); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + assertNotNull(objectPath.evaluate("_clusters")); + assertThat(objectPath.evaluate("_clusters.total"), equalTo(2)); + assertThat(objectPath.evaluate("_clusters.successful"), equalTo(1)); + assertThat(objectPath.evaluate("_clusters.skipped"), equalTo(1)); + assertThat(objectPath.evaluate("_clusters.running"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.partial"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.failed"), equalTo(0)); + assertThat(objectPath.evaluate("hits.total.value"), equalTo(10)); + assertThat(objectPath.evaluateArraySize("hits.hits"), equalTo(10)); + String scrollId = objectPath.evaluate("_scroll_id"); + assertNotNull(scrollId); + Request scrollRequest = new Request("POST", "/_search/scroll"); + scrollRequest.setJsonEntity("{ \"scroll_id\" : \"" + scrollId + "\" }"); + Response scrollResponse = client().performRequest(scrollRequest); + assertEquals(200, scrollResponse.getStatusLine().getStatusCode()); + ObjectPath scrollObjectPath = ObjectPath.createFromResponse(scrollResponse); + assertNull(scrollObjectPath.evaluate("_clusters")); + assertThat(scrollObjectPath.evaluate("hits.total.value"), equalTo(10)); + assertThat(scrollObjectPath.evaluateArraySize("hits.hits"), equalTo(0)); } updateRemoteClusterSettings(Collections.singletonMap("skip_unavailable", false)); @@ -344,28 +351,25 @@ public void testSkipUnavailableDependsOnSeeds() throws IOException { private static void assertSearchConnectFailure() { { - ElasticsearchException exception = expectThrows( - ElasticsearchException.class, - () -> restHighLevelClient.search(new SearchRequest("index", "remote1:index"), RequestOptions.DEFAULT) + ResponseException exception = expectThrows( + ResponseException.class, + () -> client().performRequest(new Request("POST", "/index,remote1:index/_search")) ); - ElasticsearchException rootCause = (ElasticsearchException) exception.getRootCause(); - assertThat(rootCause.getMessage(), containsString("connect_exception")); + assertThat(exception.getMessage(), containsString("connect_exception")); } { - ElasticsearchException exception = expectThrows( - ElasticsearchException.class, - () -> restHighLevelClient.search(new SearchRequest("remote1:index"), RequestOptions.DEFAULT) + ResponseException exception = expectThrows( + ResponseException.class, + () -> client().performRequest(new Request("POST", "/remote1:index/_search")) ); - ElasticsearchException rootCause = (ElasticsearchException) exception.getRootCause(); - assertThat(rootCause.getMessage(), containsString("connect_exception")); + assertThat(exception.getMessage(), containsString("connect_exception")); } { - ElasticsearchException exception = expectThrows( - ElasticsearchException.class, - () -> restHighLevelClient.search(new SearchRequest("remote1:index").scroll("1m"), RequestOptions.DEFAULT) + ResponseException exception = expectThrows( + ResponseException.class, + () -> client().performRequest(new Request("POST", "/remote1:index/_search?scroll=1m")) ); - ElasticsearchException rootCause = (ElasticsearchException) exception.getRootCause(); - assertThat(rootCause.getMessage(), containsString("connect_exception")); + assertThat(exception.getMessage(), containsString("connect_exception")); } } @@ -399,12 +403,6 @@ private static HttpEntity buildUpdateSettingsRequestBody(Map set return new NStringEntity(requestBody, ContentType.APPLICATION_JSON); } - private static class HighLevelClient extends RestHighLevelClient { - private HighLevelClient(RestClient restClient) { - super(restClient, (client) -> {}, Collections.emptyList()); - } - } - @Override protected Settings restClientSettings() { String token = basicAuthHeaderValue("admin", new SecureString("admin-password".toCharArray())); diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 6af9bc9b11723..06b92b8138cf7 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -27,6 +27,7 @@ import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.rest.action.admin.indices.RestPutIndexTemplateAction; import org.elasticsearch.test.NotEqualMessageBuilder; @@ -889,7 +890,7 @@ public void testRecovery() throws Exception { if (isRunningAgainstOldCluster()) { count = between(200, 300); Settings.Builder settings = Settings.builder(); - if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) { + if (minimumIndexVersion().before(IndexVersions.V_8_0_0) && randomBoolean()) { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); } final String mappings = randomBoolean() ? "\"_source\": { \"enabled\": false}" : null; @@ -941,7 +942,7 @@ public void testSnapshotRestore() throws IOException { // Create the index count = between(200, 300); Settings.Builder settings = Settings.builder(); - if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) { + if (minimumIndexVersion().before(IndexVersions.V_8_0_0) && randomBoolean()) { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); } createIndex(index, settings.build()); @@ -1435,7 +1436,7 @@ public void testPeerRecoveryRetentionLeases() throws Exception { public void testOperationBasedRecovery() throws Exception { if (isRunningAgainstOldCluster()) { Settings.Builder settings = indexSettings(1, 1); - if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) { + if (minimumIndexVersion().before(IndexVersions.V_8_0_0) && randomBoolean()) { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); } final String mappings = randomBoolean() ? "\"_source\": { \"enabled\": false}" : null; @@ -1498,7 +1499,7 @@ public void testResize() throws Exception { final Settings.Builder settings = Settings.builder() .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 3) .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1); - if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) { + if (minimumIndexVersion().before(IndexVersions.V_8_0_0) && randomBoolean()) { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false); } final String mappings = randomBoolean() ? "\"_source\": { \"enabled\": false}" : null; diff --git a/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java index 5255cbf401c9a..275a41849d353 100644 --- a/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java +++ b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java @@ -1050,53 +1050,65 @@ private static Map duelSearchSync(SearchRequest searchRequest, C throw new AssertionError("one of the two requests returned an exception", exception2.get()); } SearchResponse minimizeRoundtripsSearchResponse = minimizeRoundtripsResponse.get(); + SearchResponse fanOutSearchResponse = null; + try { + responseChecker.accept(minimizeRoundtripsSearchResponse); + + // if only the remote cluster was searched, then only one reduce phase is expected + int expectedReducePhasesMinRoundTrip = 1; + if (searchRequest.indices().length > 1) { + expectedReducePhasesMinRoundTrip = searchRequest.indices().length + 1; + } - responseChecker.accept(minimizeRoundtripsSearchResponse); - - // if only the remote cluster was searched, then only one reduce phase is expected - int expectedReducePhasesMinRoundTrip = 1; - if (searchRequest.indices().length > 1) { - expectedReducePhasesMinRoundTrip = searchRequest.indices().length + 1; - } - - assertEquals(expectedReducePhasesMinRoundTrip, minimizeRoundtripsSearchResponse.getNumReducePhases()); - SearchResponse fanOutSearchResponse = fanOutResponse.get(); - responseChecker.accept(fanOutSearchResponse); - assertEquals(1, fanOutSearchResponse.getNumReducePhases()); + assertEquals(expectedReducePhasesMinRoundTrip, minimizeRoundtripsSearchResponse.getNumReducePhases()); + fanOutSearchResponse = fanOutResponse.get(); + responseChecker.accept(fanOutSearchResponse); + assertEquals(1, fanOutSearchResponse.getNumReducePhases()); - // compare Clusters objects - SearchResponse.Clusters clustersMRT = minimizeRoundtripsSearchResponse.getClusters(); - SearchResponse.Clusters clustersMRTFalse = fanOutSearchResponse.getClusters(); + // compare Clusters objects + SearchResponse.Clusters clustersMRT = minimizeRoundtripsSearchResponse.getClusters(); + SearchResponse.Clusters clustersMRTFalse = fanOutSearchResponse.getClusters(); - assertEquals(clustersMRT.getTotal(), clustersMRTFalse.getTotal()); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL) - ); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED) - ); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING) - ); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL) - ); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.FAILED) - ); + assertEquals(clustersMRT.getTotal(), clustersMRTFalse.getTotal()); + assertEquals( + clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), + clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL) + ); + assertEquals( + clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), + clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED) + ); + assertEquals( + clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), + clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING) + ); + assertEquals( + clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), + clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL) + ); + assertEquals( + clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), + clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.FAILED) + ); - Map minimizeRoundtripsResponseMap = responseToMap(minimizeRoundtripsSearchResponse); - if (clustersMRT.hasClusterObjects() && clustersMRTFalse.hasClusterObjects()) { - Map fanOutResponseMap = responseToMap(fanOutSearchResponse); - compareResponseMaps(minimizeRoundtripsResponseMap, fanOutResponseMap, "Comparing sync_search minimizeRoundTrip vs. fanOut"); - assertThat(minimizeRoundtripsSearchResponse.getSkippedShards(), lessThanOrEqualTo(fanOutSearchResponse.getSkippedShards())); + Map minimizeRoundtripsResponseMap = responseToMap(minimizeRoundtripsSearchResponse); + if (clustersMRT.hasClusterObjects() && clustersMRTFalse.hasClusterObjects()) { + Map fanOutResponseMap = responseToMap(fanOutSearchResponse); + compareResponseMaps( + minimizeRoundtripsResponseMap, + fanOutResponseMap, + "Comparing sync_search minimizeRoundTrip vs. fanOut" + ); + assertThat( + minimizeRoundtripsSearchResponse.getSkippedShards(), + lessThanOrEqualTo(fanOutSearchResponse.getSkippedShards()) + ); + } + return minimizeRoundtripsResponseMap; + } finally { + if (fanOutSearchResponse != null) fanOutSearchResponse.decRef(); + if (minimizeRoundtripsSearchResponse != null) minimizeRoundtripsSearchResponse.decRef(); } - return minimizeRoundtripsResponseMap; } } @@ -1139,54 +1151,65 @@ private static Map duelSearchAsync(SearchRequest searchRequest, } finally { deleteAsyncSearch(fanOutResponse.getId()); } - SearchResponse minimizeRoundtripsSearchResponse = minimizeRoundtripsResponse.getSearchResponse(); - SearchResponse fanOutSearchResponse = fanOutResponse.getSearchResponse(); + SearchResponse minimizeRoundtripsSearchResponse = null; + SearchResponse fanOutSearchResponse = null; + try { + fanOutSearchResponse = fanOutResponse.getSearchResponse(); + minimizeRoundtripsSearchResponse = minimizeRoundtripsResponse.getSearchResponse(); - responseChecker.accept(minimizeRoundtripsSearchResponse); + responseChecker.accept(minimizeRoundtripsSearchResponse); - // if only the remote cluster was searched, then only one reduce phase is expected - int expectedReducePhasesMinRoundTrip = 1; - if (searchRequest.indices().length > 1) { - expectedReducePhasesMinRoundTrip = searchRequest.indices().length + 1; - } - assertEquals(expectedReducePhasesMinRoundTrip, minimizeRoundtripsSearchResponse.getNumReducePhases()); + // if only the remote cluster was searched, then only one reduce phase is expected + int expectedReducePhasesMinRoundTrip = 1; + if (searchRequest.indices().length > 1) { + expectedReducePhasesMinRoundTrip = searchRequest.indices().length + 1; + } + assertEquals(expectedReducePhasesMinRoundTrip, minimizeRoundtripsSearchResponse.getNumReducePhases()); - responseChecker.accept(fanOutSearchResponse); - assertEquals(1, fanOutSearchResponse.getNumReducePhases()); + responseChecker.accept(fanOutSearchResponse); + assertEquals(1, fanOutSearchResponse.getNumReducePhases()); - // compare Clusters objects - SearchResponse.Clusters clustersMRT = minimizeRoundtripsSearchResponse.getClusters(); - SearchResponse.Clusters clustersMRTFalse = fanOutSearchResponse.getClusters(); + // compare Clusters objects + SearchResponse.Clusters clustersMRT = minimizeRoundtripsSearchResponse.getClusters(); + SearchResponse.Clusters clustersMRTFalse = fanOutSearchResponse.getClusters(); - assertEquals(clustersMRT.getTotal(), clustersMRTFalse.getTotal()); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL) - ); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED) - ); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING) - ); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL) - ); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.FAILED) - ); + assertEquals(clustersMRT.getTotal(), clustersMRTFalse.getTotal()); + assertEquals( + clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), + clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL) + ); + assertEquals( + clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), + clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED) + ); + assertEquals( + clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), + clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING) + ); + assertEquals( + clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), + clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL) + ); + assertEquals( + clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), + clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.FAILED) + ); - Map minimizeRoundtripsResponseMap = responseToMap(minimizeRoundtripsSearchResponse); - if (clustersMRT.hasClusterObjects() && clustersMRTFalse.hasClusterObjects()) { - Map fanOutResponseMap = responseToMap(fanOutSearchResponse); - compareResponseMaps(minimizeRoundtripsResponseMap, fanOutResponseMap, "Comparing async_search minimizeRoundTrip vs. fanOut"); - assertThat(minimizeRoundtripsSearchResponse.getSkippedShards(), lessThanOrEqualTo(fanOutSearchResponse.getSkippedShards())); + Map minimizeRoundtripsResponseMap = responseToMap(minimizeRoundtripsSearchResponse); + if (clustersMRT.hasClusterObjects() && clustersMRTFalse.hasClusterObjects()) { + Map fanOutResponseMap = responseToMap(fanOutSearchResponse); + compareResponseMaps( + minimizeRoundtripsResponseMap, + fanOutResponseMap, + "Comparing async_search minimizeRoundTrip vs. fanOut" + ); + assertThat(minimizeRoundtripsSearchResponse.getSkippedShards(), lessThanOrEqualTo(fanOutSearchResponse.getSkippedShards())); + } + return minimizeRoundtripsResponseMap; + } finally { + if (minimizeRoundtripsSearchResponse != null) minimizeRoundtripsSearchResponse.decRef(); + if (fanOutSearchResponse != null) fanOutSearchResponse.decRef(); } - return minimizeRoundtripsResponseMap; } private static void compareResponseMaps(Map responseMap1, Map responseMap2, String info) { diff --git a/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java index eb05d331af033..c3ee7307bf821 100644 --- a/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.upgrades; import org.apache.http.util.EntityUtils; -import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.Version; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.Request; @@ -54,7 +53,6 @@ /** * In depth testing of the recovery mechanism during a rolling restart. */ -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99778") public class RecoveryIT extends AbstractRollingTestCase { private static String CLUSTER_NAME = System.getProperty("tests.clustername"); @@ -307,7 +305,7 @@ public void testRecovery() throws Exception { // before timing out .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms") .put(SETTING_ALLOCATION_MAX_RETRY.getKey(), "0"); // fail faster - if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) { + if (minimumIndexVersion().before(IndexVersions.V_8_0_0) && randomBoolean()) { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); } createIndex(index, settings.build()); @@ -342,7 +340,7 @@ public void testRetentionLeasesEstablishedWhenPromotingPrimary() throws Exceptio .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), between(1, 2)) // triggers nontrivial promotion .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms") .put(SETTING_ALLOCATION_MAX_RETRY.getKey(), "0"); // fail faster - if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) { + if (minimumIndexVersion().before(IndexVersions.V_8_0_0) && randomBoolean()) { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); } createIndex(index, settings.build()); @@ -365,7 +363,7 @@ public void testRetentionLeasesEstablishedWhenRelocatingPrimary() throws Excepti .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), between(0, 1)) .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms") .put(SETTING_ALLOCATION_MAX_RETRY.getKey(), "0"); // fail faster - if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) { + if (minimumIndexVersion().before(IndexVersions.V_8_0_0) && randomBoolean()) { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); } createIndex(index, settings.build()); @@ -463,7 +461,7 @@ public void testCloseIndexDuringRollingUpgrade() throws Exception { closeIndex(indexName); } - if (minimumNodeVersion.onOrAfter(Version.V_7_2_0)) { + if (minimumIndexVersion().onOrAfter(IndexVersions.V_7_2_0)) { // index is created on a version that supports the replication of closed indices, // so we expect the index to be closed and replicated ensureGreen(indexName); @@ -503,7 +501,7 @@ public void testClosedIndexNoopRecovery() throws Exception { if (indexVersionCreated(indexName).onOrAfter(IndexVersions.V_7_2_0)) { // index was created on a version that supports the replication of closed indices, so we expect it to be closed and replicated - assertTrue(minimumNodeVersion().onOrAfter(Version.V_7_2_0)); + assertTrue(minimumIndexVersion().onOrAfter(IndexVersions.V_7_2_0)); ensureGreen(indexName); assertClosedIndex(indexName, true); if (CLUSTER_TYPE != ClusterType.OLD) { @@ -650,7 +648,7 @@ public void testOperationBasedRecovery() throws Exception { final Settings.Builder settings = Settings.builder() .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2); - if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) { + if (minimumIndexVersion().before(IndexVersions.V_8_0_0) && randomBoolean()) { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); } final String mappings = randomBoolean() ? "\"_source\": { \"enabled\": false}" : null; @@ -735,7 +733,7 @@ public void testSoftDeletesDisabledWarning() throws Exception { if (CLUSTER_TYPE == ClusterType.OLD) { boolean softDeletesEnabled = true; Settings.Builder settings = Settings.builder(); - if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) { + if (minimumIndexVersion().before(IndexVersions.V_8_0_0) && randomBoolean()) { softDeletesEnabled = randomBoolean(); settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), softDeletesEnabled); } diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java index 15fc1e68196e1..230ab39610b1e 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java @@ -16,10 +16,10 @@ import org.elasticsearch.client.ResponseException; import org.elasticsearch.cluster.metadata.DesiredNode; import org.elasticsearch.cluster.metadata.DesiredNodeWithStatus; +import org.elasticsearch.cluster.metadata.MetadataFeatures; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.Processors; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.xcontent.json.JsonXContent; @@ -50,9 +50,11 @@ private enum ProcessorsPrecision { public void testUpgradeDesiredNodes() throws Exception { assumeTrue("Desired nodes was introduced in 8.1", getOldClusterVersion().onOrAfter(Version.V_8_1_0)); - if (getOldClusterVersion().onOrAfter(Processors.DOUBLE_PROCESSORS_SUPPORT_VERSION)) { + var featureVersions = new MetadataFeatures().getHistoricalFeatures(); + + if (getOldClusterVersion().onOrAfter(featureVersions.get(DesiredNode.DOUBLE_PROCESSORS_SUPPORTED))) { assertUpgradedNodesCanReadDesiredNodes(); - } else if (getOldClusterVersion().onOrAfter(DesiredNode.RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION)) { + } else if (getOldClusterVersion().onOrAfter(featureVersions.get(DesiredNode.RANGE_FLOAT_PROCESSORS_SUPPORTED))) { assertDesiredNodesUpdatedWithRoundedUpFloatsAreIdempotent(); } else { assertDesiredNodesWithFloatProcessorsAreRejectedInOlderVersions(); diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/ClusterHealthRestCancellationIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/ClusterHealthRestCancellationIT.java index cf76d86c9298f..755bbce93c95b 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/ClusterHealthRestCancellationIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/ClusterHealthRestCancellationIT.java @@ -18,9 +18,11 @@ import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; +import org.elasticsearch.test.junit.annotations.TestIssueLogging; import java.util.concurrent.CancellationException; import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.TimeUnit; import static org.elasticsearch.action.support.ActionTestUtils.wrapAsRestResponseListener; import static org.elasticsearch.test.TaskAssertions.assertAllCancellableTasksAreCancelled; @@ -28,6 +30,10 @@ public class ClusterHealthRestCancellationIT extends HttpSmokeTestCase { + @TestIssueLogging( + issueUrl = "https://github.com/elastic/elasticsearch/issues/100062", + value = "org.elasticsearch.test.TaskAssertions:TRACE" + ) public void testClusterHealthRestCancellation() throws Exception { final var barrier = new CyclicBarrier(2); @@ -37,7 +43,18 @@ public void testClusterHealthRestCancellation() throws Exception { @Override public ClusterState execute(ClusterState currentState) { safeAwait(barrier); - safeAwait(barrier); + // safeAwait(barrier); + + // temporarily lengthen timeout on safeAwait while investigating #100062 + try { + barrier.await(60, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new AssertionError("unexpected", e); + } catch (Exception e) { + throw new AssertionError("unexpected", e); + } + return currentState; } diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/HttpSmokeTestCase.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/HttpSmokeTestCase.java index 2533b213d469c..4536e2ee25fd6 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/HttpSmokeTestCase.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/HttpSmokeTestCase.java @@ -41,11 +41,6 @@ protected Collection> nodePlugins() { return List.of(getTestTransportPlugin(), MainRestPlugin.class); } - @Override - protected boolean ignoreExternalCluster() { - return true; - } - public static void assertOK(Response response) { assertThat(response.getStatusLine().getStatusCode(), oneOf(200, 201)); } diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/IndicesRecoveryRestCancellationIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/IndicesRecoveryRestCancellationIT.java deleted file mode 100644 index 55870bed5e851..0000000000000 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/IndicesRecoveryRestCancellationIT.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.http; - -import org.apache.http.client.methods.HttpGet; -import org.elasticsearch.action.admin.indices.recovery.RecoveryAction; -import org.elasticsearch.action.admin.indices.recovery.TransportRecoveryAction; -import org.elasticsearch.action.admin.indices.recovery.TransportRecoveryActionHelper; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.client.Cancellable; -import org.elasticsearch.client.Request; -import org.elasticsearch.client.Response; -import org.elasticsearch.core.Releasable; -import org.elasticsearch.core.Releasables; - -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CancellationException; -import java.util.concurrent.Semaphore; - -import static org.elasticsearch.action.support.ActionTestUtils.wrapAsRestResponseListener; -import static org.elasticsearch.test.TaskAssertions.assertAllCancellableTasksAreCancelled; -import static org.elasticsearch.test.TaskAssertions.assertAllTasksHaveFinished; -import static org.elasticsearch.test.TaskAssertions.awaitTaskWithPrefix; -import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.not; - -public class IndicesRecoveryRestCancellationIT extends HttpSmokeTestCase { - - public void testIndicesRecoveryRestCancellation() throws Exception { - runTest(new Request(HttpGet.METHOD_NAME, "/_recovery")); - } - - public void testCatRecoveryRestCancellation() throws Exception { - runTest(new Request(HttpGet.METHOD_NAME, "/_cat/recovery")); - } - - private void runTest(Request request) throws Exception { - - createIndex("test"); - ensureGreen("test"); - - final List operationBlocks = new ArrayList<>(); - for (final TransportRecoveryAction transportRecoveryAction : internalCluster().getInstances(TransportRecoveryAction.class)) { - final Semaphore operationBlock = new Semaphore(1); - operationBlocks.add(operationBlock); - TransportRecoveryActionHelper.setOnShardOperation(transportRecoveryAction, () -> { - try { - operationBlock.acquire(); - } catch (InterruptedException e) { - throw new AssertionError(e); - } - operationBlock.release(); - }); - } - assertThat(operationBlocks, not(empty())); - - final List releasables = new ArrayList<>(); - try { - for (final Semaphore operationBlock : operationBlocks) { - operationBlock.acquire(); - releasables.add(operationBlock::release); - } - - final PlainActionFuture future = new PlainActionFuture<>(); - logger.info("--> sending request"); - final Cancellable cancellable = getRestClient().performRequestAsync(request, wrapAsRestResponseListener(future)); - - awaitTaskWithPrefix(RecoveryAction.NAME); - - logger.info("--> waiting for at least one task to hit a block"); - assertBusy(() -> assertTrue(operationBlocks.stream().anyMatch(Semaphore::hasQueuedThreads))); - - logger.info("--> cancelling request"); - cancellable.cancel(); - expectThrows(CancellationException.class, future::actionGet); - - assertAllCancellableTasksAreCancelled(RecoveryAction.NAME); - } finally { - Releasables.close(releasables); - } - - assertAllTasksHaveFinished(RecoveryAction.NAME); - } - -} diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestActionCancellationIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestActionCancellationIT.java new file mode 100644 index 0000000000000..d46868094907d --- /dev/null +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestActionCancellationIT.java @@ -0,0 +1,144 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.http; + +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction; +import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesAction; +import org.elasticsearch.action.admin.indices.recovery.RecoveryAction; +import org.elasticsearch.action.support.CancellableActionTestPlugin; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.RefCountingListener; +import org.elasticsearch.action.support.SubscribableListener; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.rest.ObjectPath; + +import java.util.Collection; +import java.util.concurrent.CancellationException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.action.support.ActionTestUtils.wrapAsRestResponseListener; +import static org.elasticsearch.test.TaskAssertions.assertAllTasksHaveFinished; +import static org.hamcrest.Matchers.greaterThan; + +public class RestActionCancellationIT extends HttpSmokeTestCase { + + public void testIndicesRecoveryRestCancellation() { + createIndex("test"); + ensureGreen("test"); + runRestActionCancellationTest(new Request(HttpGet.METHOD_NAME, "/_recovery"), RecoveryAction.NAME); + } + + public void testCatRecoveryRestCancellation() { + createIndex("test"); + ensureGreen("test"); + runRestActionCancellationTest(new Request(HttpGet.METHOD_NAME, "/_cat/recovery"), RecoveryAction.NAME); + } + + public void testClusterHealthRestCancellation() { + runRestActionCancellationTest(new Request(HttpGet.METHOD_NAME, "/_cluster/health"), ClusterHealthAction.NAME); + } + + public void testClusterStateRestCancellation() { + runRestActionCancellationTest(new Request(HttpGet.METHOD_NAME, "/_cluster/state"), ClusterStateAction.NAME); + } + + public void testGetAliasesCancellation() { + runRestActionCancellationTest(new Request(HttpGet.METHOD_NAME, "/_alias"), GetAliasesAction.NAME); + } + + public void testCatAliasesCancellation() { + runRestActionCancellationTest(new Request(HttpGet.METHOD_NAME, "/_cat/aliases"), GetAliasesAction.NAME); + } + + private void runRestActionCancellationTest(Request request, String actionName) { + final var node = usually() ? internalCluster().getRandomNodeName() : internalCluster().startCoordinatingOnlyNode(Settings.EMPTY); + + try ( + var restClient = createRestClient(node); + var capturingAction = CancellableActionTestPlugin.capturingActionOnNode(actionName, node) + ) { + final var responseFuture = new PlainActionFuture(); + final var restInvocation = restClient.performRequestAsync(request, wrapAsRestResponseListener(responseFuture)); + + if (randomBoolean()) { + // cancel by aborting the REST request + capturingAction.captureAndCancel(restInvocation::cancel); + expectThrows(ExecutionException.class, CancellationException.class, () -> responseFuture.get(10, TimeUnit.SECONDS)); + } else { + // cancel via the task management API + final var cancelFuture = new PlainActionFuture(); + capturingAction.captureAndCancel( + () -> SubscribableListener + + .newForked( + l -> restClient.performRequestAsync( + getListTasksRequest(node, actionName), + wrapAsRestResponseListener(l.map(ObjectPath::createFromResponse)) + ) + ) + + .andThen((l, listTasksResponse) -> { + final var taskCount = listTasksResponse.evaluateArraySize("tasks"); + assertThat(taskCount, greaterThan(0)); + try (var listeners = new RefCountingListener(l)) { + for (int i = 0; i < taskCount; i++) { + final var taskPrefix = "tasks." + i + "."; + assertTrue(listTasksResponse.evaluate(taskPrefix + "cancellable")); + assertFalse(listTasksResponse.evaluate(taskPrefix + "cancelled")); + restClient.performRequestAsync( + getCancelTaskRequest( + listTasksResponse.evaluate(taskPrefix + "node"), + listTasksResponse.evaluate(taskPrefix + "id") + ), + wrapAsRestResponseListener(listeners.acquire(HttpSmokeTestCase::assertOK)) + ); + } + } + }) + + .addListener(cancelFuture) + ); + cancelFuture.get(10, TimeUnit.SECONDS); + expectThrows(Exception.class, () -> responseFuture.get(10, TimeUnit.SECONDS)); + } + + assertAllTasksHaveFinished(actionName); + } catch (Exception e) { + fail(e); + } + } + + private static Request getListTasksRequest(String taskNode, String actionName) { + final var listTasksRequest = new Request(HttpGet.METHOD_NAME, "/_tasks"); + listTasksRequest.addParameter("nodes", taskNode); + listTasksRequest.addParameter("actions", actionName); + listTasksRequest.addParameter("group_by", "none"); + return listTasksRequest; + } + + private static Request getCancelTaskRequest(String taskNode, int taskId) { + final var cancelTaskRequest = new Request(HttpPost.METHOD_NAME, Strings.format("/_tasks/%s:%d/_cancel", taskNode, taskId)); + cancelTaskRequest.addParameter("wait_for_completion", null); + return cancelTaskRequest; + } + + @Override + protected Collection> nodePlugins() { + return CollectionUtils.appendToCopy(super.nodePlugins(), CancellableActionTestPlugin.class); + } +} diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestClusterInfoActionCancellationIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestClusterInfoActionCancellationIT.java index 43d7630199bb2..896da65fa83dd 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestClusterInfoActionCancellationIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestClusterInfoActionCancellationIT.java @@ -114,7 +114,7 @@ public TimeValue masterNodeTimeout() { } }; - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); internalCluster().getAnyMasterNodeInstance(ClusterService.class) .submitUnbatchedStateUpdateTask("get_mappings_cancellation_test", new AckedClusterStateUpdateTask(ackedRequest, future) { @Override diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/SearchRestCancellationIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/SearchRestCancellationIT.java index a860b0855e158..b3fe22d09fa87 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/SearchRestCancellationIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/SearchRestCancellationIT.java @@ -98,7 +98,7 @@ void verifyCancellationDuringQueryPhase(String searchAction, Request searchReque List plugins = initBlockFactory(); indexTestData(); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); Cancellable cancellable = getRestClient().performRequestAsync(searchRequest, wrapAsRestResponseListener(future)); awaitForBlock(plugins); @@ -141,7 +141,7 @@ void verifyCancellationDuringFetchPhase(String searchAction, Request searchReque List plugins = initBlockFactory(); indexTestData(); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); Cancellable cancellable = getRestClient().performRequestAsync(searchRequest, wrapAsRestResponseListener(future)); awaitForBlock(plugins); diff --git a/qa/smoke-test-multinode/src/yamlRestTest/resources/rest-api-spec/test/smoke_test_multinode/30_desired_balance.yml b/qa/smoke-test-multinode/src/yamlRestTest/resources/rest-api-spec/test/smoke_test_multinode/30_desired_balance.yml index f8b1de5155527..afe66594a490b 100644 --- a/qa/smoke-test-multinode/src/yamlRestTest/resources/rest-api-spec/test/smoke_test_multinode/30_desired_balance.yml +++ b/qa/smoke-test-multinode/src/yamlRestTest/resources/rest-api-spec/test/smoke_test_multinode/30_desired_balance.yml @@ -186,3 +186,18 @@ setup: - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.max' - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.average' - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.std_dev' + +--- +"Test unassigned_shards, total_allocations, undesired_allocations and undesired_allocations_fraction": + + - skip: + version: " - 8.11.99" + reason: "undesired_shard_allocation_count added in in 8.12.0" + + - do: + _internal.get_desired_balance: { } + + - gte: { 'stats.unassigned_shards' : 0 } + - gte: { 'stats.total_allocations' : 0 } + - gte: { 'stats.undesired_allocations' : 0 } + - gte: { 'stats.undesired_allocations_fraction' : 0.0 } diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index e484b98d3188e..787d684c3779e 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -228,8 +228,9 @@ tasks.named("yamlRestTestV7CompatTransform").configure { task -> } tasks.register('enforceYamlTestConvention').configure { + def tree = fileTree('src/main/resources/rest-api-spec/test') doLast { - if (fileTree('src/main/resources/rest-api-spec/test').files) { + if (tree.files) { throw new GradleException("There are YAML tests in src/main source set. These should be moved to src/yamlRestTest.") } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_settings.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_settings.json index c1f3079995de9..08134e211a312 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_settings.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_settings.json @@ -45,6 +45,10 @@ "type":"boolean", "description":"Whether to update existing settings. If set to `true` existing settings on an index remain unchanged, the default is `false`" }, + "reopen":{ + "type":"boolean", + "description":"Whether to close and reopen the index to apply non-dynamic settings. If set to `true` the indices to which the settings are being applied will be closed temporarily and then reopened in order to apply the changes. The default is `false`" + }, "ignore_unavailable":{ "type":"boolean", "description":"Whether specified concrete indices should be ignored when unavailable (missing or closed)" diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml index 49f5958ad8da1..96998a2a6218e 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml @@ -496,3 +496,16 @@ test_alias \s+ test_index\n my_alias \s+ test_index\n $/ + +--- +"Deprecated local parameter": + - skip: + version: "- 8.11.99" + features: ["warnings"] + reason: verifying deprecation warnings from 8.12.0 onwards + + - do: + cat.aliases: + local: true + warnings: + - "the [?local=true] query parameter to cat-aliases requests has no effect and will be removed in a future version" diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.allocation/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.allocation/10_basic.yml index f56a1945b2d7c..ed519438f1b1e 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.allocation/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.allocation/10_basic.yml @@ -26,6 +26,10 @@ --- "One index": + - skip: + version: " - 8.11.99" + reason: "node.role column shown by default from 8.12.0 onwards" + - do: indices.create: index: test @@ -45,13 +49,14 @@ (\d+ \s+) #always should return value since we filter out non data nodes by default [-\w.]+ \s+ \d+(\.\d+){3} \s+ - [-\w.]+ + [-\w.]+ \s+ + [\w]+ \n )+ ( \s* #allow leading spaces to account for right-justified text \d+ \s+ - UNASSIGNED + UNASSIGNED \s* \n )? $/ @@ -59,6 +64,10 @@ --- "Node ID": + - skip: + version: " - 8.11.99" + reason: "node.role column shown by default from 8.12.0 onwards" + - do: cat.allocation: node_id: _master @@ -74,7 +83,8 @@ (\d+ \s+)? #no value from client nodes [-\w.]+ \s+ \d+(\.\d+){3} \s+ - [-\w.]+ + [-\w.]+ \s+ + [\w]+ \n ) $/ @@ -92,6 +102,10 @@ "All Nodes": + - skip: + version: " - 8.11.99" + reason: "node.role column shown by default from 8.12.0 onwards" + - do: cat.allocation: node_id: "*" @@ -108,13 +122,14 @@ (\d+ \s+)? #no value from client nodes [-\w.]+ \s+ \d+(\.\d+){3} \s+ - [-\w.]+ + [-\w.]+ \s+ + [\w]+ \n )+ ( \s* #allow leading spaces to account for right-justified text \d+ \s+ - UNASSIGNED + UNASSIGNED \s* \n )? $/ @@ -122,6 +137,10 @@ --- "Column headers": + - skip: + version: " - 8.11.99" + reason: "node.role column shown by default from 8.12.0 onwards" + - do: cat.allocation: v: true @@ -136,7 +155,8 @@ disk.percent \s+ host \s+ ip \s+ - node + node \s+ + node.role \n ( \s* #allow leading spaces to account for right-justified text @@ -148,7 +168,8 @@ (\d+ \s+) #always should return value since we filter out non data nodes by default [-\w.]+ \s+ \d+(\.\d+){3} \s+ - [-\w.]+ + [-\w.]+ \s+ + [\w]+ \n )+ $/ @@ -193,6 +214,10 @@ "Bytes": + - skip: + version: " - 8.11.99" + reason: "node.role column shown by default from 8.12.0 onwards" + - do: cat.allocation: bytes: gb @@ -208,7 +233,8 @@ (\d+ \s+) #always should return value since we filter out non data nodes by default [-\w.]+ \s+ \d+(\.\d+){3} \s+ - [-\w.]+ + [-\w.]+ \s+ + [\w]+ \n )+ $/ diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_balance/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_balance/10_basic.yml index 8e1d3431069cf..4647c85ba9caf 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_balance/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_balance/10_basic.yml @@ -221,3 +221,18 @@ setup: - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.max' - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.average' - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.std_dev' + +--- +"Test unassigned_shards, total_allocations, undesired_allocations and undesired_allocations_fraction": + + - skip: + version: " - 8.11.99" + reason: "undesired_shard_allocation_count added in in 8.12.0" + + - do: + _internal.get_desired_balance: { } + + - gte: { 'stats.unassigned_shards' : 0 } + - gte: { 'stats.total_allocations' : 0 } + - gte: { 'stats.undesired_allocations' : 0 } + - gte: { 'stats.undesired_allocations_fraction' : 0.0 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.exists_alias/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.exists_alias/10_basic.yml index fba0512ca372f..bf499de8463bd 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.exists_alias/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.exists_alias/10_basic.yml @@ -37,9 +37,14 @@ --- "Test indices.exists_alias with local flag": + - skip: + features: ["allowed_warnings"] + - do: indices.exists_alias: name: test_alias local: true + allowed_warnings: + - "the [?local=true] query parameter to get-aliases requests has no effect and will be removed in a future version" - is_false: '' diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_alias/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_alias/10_basic.yml index 721c7bc709032..d765decda68a8 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_alias/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_alias/10_basic.yml @@ -291,10 +291,14 @@ setup: --- "Get alias with local flag": + - skip: + features: ["allowed_warnings"] - do: indices.get_alias: local: true + allowed_warnings: + - "the [?local=true] query parameter to get-aliases requests has no effect and will be removed in a future version" - is_true: test_index @@ -325,3 +329,17 @@ setup: - is_true: test_index - is_false: test_index_2 + + +--- +"Deprecated local parameter": + - skip: + version: "- 8.11.99" + features: ["warnings"] + reason: verifying deprecation warnings from 8.12.0 onwards + + - do: + indices.get_alias: + local: true + warnings: + - "the [?local=true] query parameter to get-aliases requests has no effect and will be removed in a future version" diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_settings/20_update_non_dynamic_settings.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_settings/20_update_non_dynamic_settings.yml new file mode 100644 index 0000000000000..07c0e8b7a8b2a --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_settings/20_update_non_dynamic_settings.yml @@ -0,0 +1,58 @@ +setup: + - skip: + version: ' - 8.11.99' + reason: 'ability to update non-dynamic settings added in 8.12' + + - do: + indices.create: + index: test-index + body: + settings: + index: + number_of_replicas: 0 + +--- +"Test update non dynamic settings": + - do: + indices.put_settings: + index: test-index + body: + number_of_replicas: 1 + + - do: + catch: bad_request + indices.put_settings: + index: test-index + body: + index.codec: best_compression + + - do: + catch: bad_request + indices.put_settings: + index: test-index + reopen: false + body: + index.codec: best_compression + + - do: + indices.get_settings: + index: test-index + flat_settings: false + - match: + test-index.settings.index.codec: null + + - do: + indices.put_settings: + index: test-index + reopen: true + body: + index.codec: best_compression + - match: { acknowledged: true } + + - do: + indices.get_settings: + index: test-index + flat_settings: false + - match: + test-index.settings.index.codec: "best_compression" + diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/60_dense_vector_dynamic_mapping.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/60_dense_vector_dynamic_mapping.yml index 151698482368a..62d752b1efe88 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/60_dense_vector_dynamic_mapping.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/60_dense_vector_dynamic_mapping.yml @@ -2,7 +2,34 @@ setup: - skip: version: ' - 8.10.99' reason: 'Dynamic mapping of floats to dense_vector was added in 8.11' +--- +"Fields indexed as strings won't be transformed into dense_vector": + - do: + index: + index: strings-are-not-floats + refresh: true + body: + obviously_string: ["foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", + "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", + "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", + "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", + "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", + "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", + "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", + "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", + "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", + "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", + "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", + "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", + "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo"] + - do: + cluster.health: + wait_for_events: languid + - do: + indices.get_mapping: + index: strings-are-not-floats + - match: { strings-are-not-floats.mappings.properties.obviously_string.type: text } --- "Fields with float arrays below the threshold still map as float": diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java index 271984fd1ae5e..5df0e374ee9db 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java @@ -55,7 +55,6 @@ import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchTransportService; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.support.replication.TransportReplicationActionTests; @@ -102,7 +101,7 @@ import java.util.function.Function; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.hamcrest.Matchers.emptyIterable; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -560,9 +559,10 @@ public void testSearchQueryThenFetch() throws Exception { refresh(); SearchRequest searchRequest = new SearchRequest(randomIndicesOrAliases).searchType(SearchType.QUERY_THEN_FETCH); - SearchResponse searchResponse = internalCluster().coordOnlyNodeClient().search(searchRequest).actionGet(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, greaterThan(0L)); + assertNoFailuresAndResponse( + internalCluster().coordOnlyNodeClient().search(searchRequest), + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, greaterThan(0L)) + ); clearInterceptedActions(); assertIndicesSubset( @@ -589,9 +589,10 @@ public void testSearchDfsQueryThenFetch() throws Exception { refresh(); SearchRequest searchRequest = new SearchRequest(randomIndicesOrAliases).searchType(SearchType.DFS_QUERY_THEN_FETCH); - SearchResponse searchResponse = internalCluster().coordOnlyNodeClient().search(searchRequest).actionGet(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, greaterThan(0L)); + assertNoFailuresAndResponse( + internalCluster().coordOnlyNodeClient().search(searchRequest), + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, greaterThan(0L)) + ); clearInterceptedActions(); assertIndicesSubset( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java index 46737571a15ab..d17ae1c7fce0d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java @@ -27,7 +27,6 @@ import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.index.IndexAction; import org.elasticsearch.action.search.SearchAction; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchTransportService; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.replication.ReplicationResponse; @@ -83,6 +82,7 @@ import static org.elasticsearch.core.TimeValue.timeValueSeconds; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFutureThrows; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; @@ -771,17 +771,19 @@ public void testTaskStoringSuccessfulResult() throws Exception { assertNoFailures(indicesAdmin().prepareRefresh(TaskResultsService.TASK_INDEX).get()); - SearchResponse searchResponse = prepareSearch(TaskResultsService.TASK_INDEX).setSource( - SearchSourceBuilder.searchSource().query(QueryBuilders.termQuery("task.action", taskInfo.action())) - ).get(); - - assertEquals(1L, searchResponse.getHits().getTotalHits().value); - - searchResponse = prepareSearch(TaskResultsService.TASK_INDEX).setSource( - SearchSourceBuilder.searchSource().query(QueryBuilders.termQuery("task.node", taskInfo.taskId().getNodeId())) - ).get(); + assertHitCount( + prepareSearch(TaskResultsService.TASK_INDEX).setSource( + SearchSourceBuilder.searchSource().query(QueryBuilders.termQuery("task.action", taskInfo.action())) + ), + 1L + ); - assertEquals(1L, searchResponse.getHits().getTotalHits().value); + assertHitCount( + prepareSearch(TaskResultsService.TASK_INDEX).setSource( + SearchSourceBuilder.searchSource().query(QueryBuilders.termQuery("task.node", taskInfo.taskId().getNodeId())) + ), + 1L + ); GetTaskResponse getResponse = expectFinishedTask(taskId); assertEquals(result, getResponse.getTask().getResponseAsMap()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java index e5edeccbad55d..2c5c7a8c103b9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java @@ -205,18 +205,20 @@ public void testAutoCreateSystemAliasViaV1TemplateAllowsTemplates() throws Excep } private String autoCreateSystemAliasViaComposableTemplate(String indexName) throws Exception { - ComposableIndexTemplate cit = new ComposableIndexTemplate( - Collections.singletonList(indexName + "*"), - new Template( - null, - null, - Map.of(indexName + "-composable-alias", AliasMetadata.builder(indexName + "-composable-alias").build()) - ), - Collections.emptyList(), - 4L, - 5L, - Collections.emptyMap() - ); + ComposableIndexTemplate cit = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList(indexName + "*")) + .template( + new Template( + null, + null, + Map.of(indexName + "-composable-alias", AliasMetadata.builder(indexName + "-composable-alias").build()) + ) + ) + .componentTemplates(Collections.emptyList()) + .priority(4L) + .version(5L) + .metadata(Collections.emptyMap()) + .build(); assertAcked( client().execute( PutComposableIndexTemplateAction.INSTANCE, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java index 4b395ec6856e5..d19c61f97efd9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java @@ -206,7 +206,6 @@ public void testInvalidShardCountSettingsWithoutPrefix() throws Exception { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/96578") public void testCreateAndDeleteIndexConcurrently() throws InterruptedException { createIndex("test"); final AtomicInteger indexVersion = new AtomicInteger(0); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateSystemIndicesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateSystemIndicesIT.java index a0dffa8b7caa8..1c075442d99e6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateSystemIndicesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateSystemIndicesIT.java @@ -194,18 +194,20 @@ public void testCreateSystemAliasViaV1TemplateAllowsTemplates() throws Exception } private void createIndexWithComposableTemplates(String indexName, String primaryIndexName) throws Exception { - ComposableIndexTemplate cit = new ComposableIndexTemplate( - Collections.singletonList(indexName + "*"), - new Template( - null, - null, - Map.of(indexName + "-composable-alias", AliasMetadata.builder(indexName + "-composable-alias").build()) - ), - Collections.emptyList(), - 4L, - 5L, - Collections.emptyMap() - ); + ComposableIndexTemplate cit = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList(indexName + "*")) + .template( + new Template( + null, + null, + Map.of(indexName + "-composable-alias", AliasMetadata.builder(indexName + "-composable-alias").build()) + ) + ) + .componentTemplates(Collections.emptyList()) + .priority(4L) + .version(5L) + .metadata(Collections.emptyMap()) + .build(); assertAcked( client().execute( PutComposableIndexTemplateAction.INSTANCE, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java index 54add487a3dd4..e5ff2a6ce1cc5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java @@ -24,7 +24,7 @@ import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -60,7 +60,8 @@ import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -243,27 +244,28 @@ private void splitToN(int sourceShards, int firstSplitShards, int secondSplitSha assertNested("first_split", numDocs); assertNested("second_split", numDocs); } - assertAllUniqueDocs(prepareSearch("second_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); - assertAllUniqueDocs(prepareSearch("first_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); - assertAllUniqueDocs(prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + assertAllUniqueDocs(prepareSearch("second_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")), numDocs); + assertAllUniqueDocs(prepareSearch("first_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")), numDocs); + assertAllUniqueDocs(prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")), numDocs); } public void assertNested(String index, int numDocs) { // now, do a nested query - SearchResponse searchResponse = prepareSearch(index).setQuery( - nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"), ScoreMode.Avg) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) numDocs)); + assertNoFailuresAndResponse( + prepareSearch(index).setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"), ScoreMode.Avg)), + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) numDocs)) + ); } - public void assertAllUniqueDocs(SearchResponse response, int numDocs) { - Set ids = new HashSet<>(); - for (int i = 0; i < response.getHits().getHits().length; i++) { - String id = response.getHits().getHits()[i].getId(); - assertTrue("found ID " + id + " more than once", ids.add(id)); - } - assertEquals(numDocs, ids.size()); + public void assertAllUniqueDocs(SearchRequestBuilder request, int numDocs) { + assertResponse(request, response -> { + Set ids = new HashSet<>(); + for (int i = 0; i < response.getHits().getHits().length; i++) { + String id = response.getHits().getHits()[i].getId(); + assertTrue("found ID " + id + " more than once", ids.add(id)); + } + assertEquals(numDocs, ids.size()); + }); } public void testSplitIndexPrimaryTerm() throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerIT.java index 94c08bd7e8162..678d7b53d9640 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerIT.java @@ -127,7 +127,7 @@ public void testSimple() throws Exception { } // Force merge to ensure that there are more than one numeric value to justify doc value. client().admin().indices().prepareForceMerge(index).setMaxNumSegments(1).get(); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); client().execute( AnalyzeIndexDiskUsageAction.INSTANCE, new AnalyzeIndexDiskUsageRequest(new String[] { index }, AnalyzeIndexDiskUsageRequest.DEFAULT_INDICES_OPTIONS, true), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java index d7e4e42b73554..7ae7fc5c4a180 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java @@ -794,7 +794,9 @@ public void testRolloverConcurrently() throws Exception { null, null ); - putTemplateRequest.indexTemplate(new ComposableIndexTemplate(List.of("test-*"), template, null, 100L, null, null)); + putTemplateRequest.indexTemplate( + ComposableIndexTemplate.builder().indexPatterns(List.of("test-*")).template(template).priority(100L).build() + ); assertAcked(client().execute(PutComposableIndexTemplateAction.INSTANCE, putTemplateRequest).actionGet()); final CyclicBarrier barrier = new CyclicBarrier(numOfThreads); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessor2RetryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessor2RetryIT.java index 18a8ae2dd2800..f1d4f6958f7f0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessor2RetryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessor2RetryIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.query.QueryBuilders; @@ -26,6 +25,7 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -136,15 +136,17 @@ public void afterBulk(long executionId, BulkRequest request, Exception failure) indicesAdmin().refresh(new RefreshRequest()).get(); - SearchResponse results = prepareSearch(INDEX_NAME).setQuery(QueryBuilders.matchAllQuery()).setSize(0).get(); - assertThat(bulkProcessor.getTotalBytesInFlight(), equalTo(0L)); - if (rejectedExecutionExpected) { - assertThat((int) results.getHits().getTotalHits().value, lessThanOrEqualTo(numberOfAsyncOps)); - } else if (rejectedAfterAllRetries) { - assertThat((int) results.getHits().getTotalHits().value, lessThan(numberOfAsyncOps)); - } else { - assertThat((int) results.getHits().getTotalHits().value, equalTo(numberOfAsyncOps)); - } + final boolean finalRejectedAfterAllRetries = rejectedAfterAllRetries; + assertResponse(prepareSearch(INDEX_NAME).setQuery(QueryBuilders.matchAllQuery()).setSize(0), results -> { + assertThat(bulkProcessor.getTotalBytesInFlight(), equalTo(0L)); + if (rejectedExecutionExpected) { + assertThat((int) results.getHits().getTotalHits().value, lessThanOrEqualTo(numberOfAsyncOps)); + } else if (finalRejectedAfterAllRetries) { + assertThat((int) results.getHits().getTotalHits().value, lessThan(numberOfAsyncOps)); + } else { + assertThat((int) results.getHits().getTotalHits().value, equalTo(numberOfAsyncOps)); + } + }); } private static void indexDocs(BulkProcessor2 processor, int numDocs) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java index e664f6e6bb42f..93f066d35bbc4 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java @@ -9,7 +9,6 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.QueryBuilders; @@ -25,6 +24,7 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -131,15 +131,16 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure) indicesAdmin().refresh(new RefreshRequest()).get(); - SearchResponse results = prepareSearch(INDEX_NAME).setQuery(QueryBuilders.matchAllQuery()).setSize(0).get(); - - if (rejectedExecutionExpected) { - assertThat((int) results.getHits().getTotalHits().value, lessThanOrEqualTo(numberOfAsyncOps)); - } else if (rejectedAfterAllRetries) { - assertThat((int) results.getHits().getTotalHits().value, lessThan(numberOfAsyncOps)); - } else { - assertThat((int) results.getHits().getTotalHits().value, equalTo(numberOfAsyncOps)); - } + final boolean finalRejectedAfterAllRetries = rejectedAfterAllRetries; + assertResponse(prepareSearch(INDEX_NAME).setQuery(QueryBuilders.matchAllQuery()).setSize(0), results -> { + if (rejectedExecutionExpected) { + assertThat((int) results.getHits().getTotalHits().value, lessThanOrEqualTo(numberOfAsyncOps)); + } else if (finalRejectedAfterAllRetries) { + assertThat((int) results.getHits().getTotalHits().value, lessThan(numberOfAsyncOps)); + } else { + assertThat((int) results.getHits().getTotalHits().value, equalTo(numberOfAsyncOps)); + } + }); } private void assertRetriedCorrectly(CorrelatingBackoffPolicy internalPolicy, Object bulkResponse, Throwable failure) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/WriteAckDelayIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/WriteAckDelayIT.java index 6ec01c3be5626..61f624c19f567 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/WriteAckDelayIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/WriteAckDelayIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESIntegTestCase; @@ -18,6 +17,8 @@ import java.util.ArrayList; import java.util.List; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; + public class WriteAckDelayIT extends ESIntegTestCase { @Override @@ -42,17 +43,18 @@ public void testIndexWithWriteDelayEnabled() throws Exception { for (int j = 0; j < numOfChecks; j++) { try { logger.debug("running search"); - SearchResponse response = prepareSearch("test").get(); - if (response.getHits().getTotalHits().value != numOfDocs) { - final String message = "Count is " - + response.getHits().getTotalHits().value - + " but " - + numOfDocs - + " was expected. " - + ElasticsearchAssertions.formatShardStatus(response); - logger.error("{}. search response: \n{}", message, response); - fail(message); - } + assertResponse(prepareSearch("test"), response -> { + if (response.getHits().getTotalHits().value != numOfDocs) { + final String message = "Count is " + + response.getHits().getTotalHits().value + + " but " + + numOfDocs + + " was expected. " + + ElasticsearchAssertions.formatShardStatus(response); + logger.error("{}. search response: \n{}", message, response); + fail(message); + } + }); } catch (Exception e) { logger.error("search failed", e); throw e; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/CCSPointInTimeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/CCSPointInTimeIT.java index b9e8b40c70cb8..36e544af90bc6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/CCSPointInTimeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/CCSPointInTimeIT.java @@ -29,7 +29,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -95,31 +96,32 @@ public void testBasic() { remoteClient.prepareIndex("remote_test").setId("remote_new").setSource().get(); remoteClient.admin().indices().prepareRefresh().get(); } - SearchResponse resp = localClient.prepareSearch() - .setPreference(null) - .setQuery(new MatchAllQueryBuilder()) - .setPointInTime(new PointInTimeBuilder(pitId)) - .setSize(1000) - .get(); - assertNoFailures(resp); - assertHitCount(resp, (includeLocalIndex ? localNumDocs : 0) + remoteNumDocs); - - SearchResponse.Clusters clusters = resp.getClusters(); - int expectedNumClusters = 1 + (includeLocalIndex ? 1 : 0); - assertThat(clusters.getTotal(), equalTo(expectedNumClusters)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(expectedNumClusters)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); - - if (includeLocalIndex) { - SearchResponse.Cluster localCluster = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); - assertNotNull(localCluster); - assertOneSuccessfulShard(localCluster); - } - - SearchResponse.Cluster remoteCluster = clusters.getCluster(REMOTE_CLUSTER); - assertNotNull(remoteCluster); - assertOneSuccessfulShard(remoteCluster); - + assertNoFailuresAndResponse( + localClient.prepareSearch() + .setPreference(null) + .setQuery(new MatchAllQueryBuilder()) + .setPointInTime(new PointInTimeBuilder(pitId)) + .setSize(1000), + resp -> { + assertHitCount(resp, (includeLocalIndex ? localNumDocs : 0) + remoteNumDocs); + + SearchResponse.Clusters clusters = resp.getClusters(); + int expectedNumClusters = 1 + (includeLocalIndex ? 1 : 0); + assertThat(clusters.getTotal(), equalTo(expectedNumClusters)); + assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(expectedNumClusters)); + assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); + + if (includeLocalIndex) { + SearchResponse.Cluster localCluster = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertNotNull(localCluster); + assertOneSuccessfulShard(localCluster); + } + + SearchResponse.Cluster remoteCluster = clusters.getCluster(REMOTE_CLUSTER); + assertNotNull(remoteCluster); + assertOneSuccessfulShard(remoteCluster); + } + ); } finally { closePointInTime(pitId); } @@ -157,24 +159,22 @@ public void testFailuresOnOneShardsWithPointInTime() throws ExecutionException, ThrowingQueryBuilder queryBuilder = new ThrowingQueryBuilder(randomLong(), new IllegalStateException("index corrupted"), 0); SearchRequest searchRequest = new SearchRequest(); searchRequest.source(new SearchSourceBuilder().query(queryBuilder).size(10).pointInTimeBuilder(new PointInTimeBuilder(pitId))); - SearchResponse searchResponse = client(LOCAL_CLUSTER).search(searchRequest).get(); - - SearchResponse.Clusters clusters = searchResponse.getClusters(); - int expectedNumClusters = 1 + (includeLocalIndex ? 1 : 0); - assertThat(clusters.getTotal(), equalTo(expectedNumClusters)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), equalTo(expectedNumClusters)); - - if (includeLocalIndex) { - SearchResponse.Cluster localCluster = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); - assertNotNull(localCluster); - assertOneFailedShard(localCluster, numShards); - } - SearchResponse.Cluster remoteCluster = clusters.getCluster(REMOTE_CLUSTER); - assertNotNull(remoteCluster); - assertOneFailedShard(remoteCluster, numShards); - + assertResponse(client(LOCAL_CLUSTER).search(searchRequest), searchResponse -> { + SearchResponse.Clusters clusters = searchResponse.getClusters(); + int expectedNumClusters = 1 + (includeLocalIndex ? 1 : 0); + assertThat(clusters.getTotal(), equalTo(expectedNumClusters)); + assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(0)); + assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); + assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), equalTo(expectedNumClusters)); + if (includeLocalIndex) { + SearchResponse.Cluster localCluster = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertNotNull(localCluster); + assertOneFailedShard(localCluster, numShards); + } + SearchResponse.Cluster remoteCluster = clusters.getCluster(REMOTE_CLUSTER); + assertNotNull(remoteCluster); + assertOneFailedShard(remoteCluster, numShards); + }); } finally { closePointInTime(pitId); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java index c1ca4c60f176e..2074e38f891c0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java @@ -17,7 +17,6 @@ import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -64,7 +63,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.emptyArray; @@ -269,66 +269,89 @@ public void testSearchingFilteringAliasesSingleIndex() throws Exception { ).actionGet(); logger.info("--> checking single filtering alias search"); - SearchResponse searchResponse = prepareSearch("foos").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "1"); + assertResponse( + prepareSearch("foos").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "1") + ); logger.info("--> checking single filtering alias wildcard search"); - searchResponse = prepareSearch("fo*").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "1"); + assertResponse( + prepareSearch("fo*").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "1") + ); - searchResponse = prepareSearch("tests").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "1", "2", "3"); + assertResponse( + prepareSearch("tests").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "1", "2", "3") + ); logger.info("--> checking single filtering alias search with sort"); - searchResponse = prepareSearch("tests").setQuery(QueryBuilders.matchAllQuery()).addSort("_index", SortOrder.ASC).get(); - assertHits(searchResponse.getHits(), "1", "2", "3"); + assertResponse( + prepareSearch("tests").setQuery(QueryBuilders.matchAllQuery()).addSort("_index", SortOrder.ASC), + searchResponse -> assertHits(searchResponse.getHits(), "1", "2", "3") + ); logger.info("--> checking single filtering alias search with global facets"); - searchResponse = prepareSearch("tests").setQuery(QueryBuilders.matchQuery("name", "bar")) - .addAggregation(AggregationBuilders.global("global").subAggregation(AggregationBuilders.terms("test").field("name"))) - .get(); - assertNoFailures(searchResponse); - Global global = searchResponse.getAggregations().get("global"); - Terms terms = global.getAggregations().get("test"); - assertThat(terms.getBuckets().size(), equalTo(4)); + assertNoFailuresAndResponse( + prepareSearch("tests").setQuery(QueryBuilders.matchQuery("name", "bar")) + .addAggregation(AggregationBuilders.global("global").subAggregation(AggregationBuilders.terms("test").field("name"))), + searchResponse -> { + Global global = searchResponse.getAggregations().get("global"); + Terms terms = global.getAggregations().get("test"); + assertThat(terms.getBuckets().size(), equalTo(4)); + } + ); logger.info("--> checking single filtering alias search with global facets and sort"); - searchResponse = prepareSearch("tests").setQuery(QueryBuilders.matchQuery("name", "bar")) - .addAggregation(AggregationBuilders.global("global").subAggregation(AggregationBuilders.terms("test").field("name"))) - .addSort("_index", SortOrder.ASC) - .get(); - assertNoFailures(searchResponse); - global = searchResponse.getAggregations().get("global"); - terms = global.getAggregations().get("test"); - assertThat(terms.getBuckets().size(), equalTo(4)); + assertNoFailuresAndResponse( + prepareSearch("tests").setQuery(QueryBuilders.matchQuery("name", "bar")) + .addAggregation(AggregationBuilders.global("global").subAggregation(AggregationBuilders.terms("test").field("name"))) + .addSort("_index", SortOrder.ASC), + searchResponse -> { + Global global = searchResponse.getAggregations().get("global"); + Terms terms = global.getAggregations().get("test"); + assertThat(terms.getBuckets().size(), equalTo(4)); + } + ); logger.info("--> checking single filtering alias search with non-global facets"); - searchResponse = prepareSearch("tests").setQuery(QueryBuilders.matchQuery("name", "bar")) - .addAggregation(AggregationBuilders.terms("test").field("name")) - .addSort("_index", SortOrder.ASC) - .get(); - assertNoFailures(searchResponse); - terms = searchResponse.getAggregations().get("test"); - assertThat(terms.getBuckets().size(), equalTo(2)); - - searchResponse = prepareSearch("foos", "bars").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "1", "2"); + assertNoFailuresAndResponse( + prepareSearch("tests").setQuery(QueryBuilders.matchQuery("name", "bar")) + .addAggregation(AggregationBuilders.terms("test").field("name")) + .addSort("_index", SortOrder.ASC), + searchResponse -> { + Terms terms = searchResponse.getAggregations().get("test"); + assertThat(terms.getBuckets().size(), equalTo(2)); + } + ); + assertResponse( + prepareSearch("foos", "bars").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "1", "2") + ); logger.info("--> checking single non-filtering alias search"); - searchResponse = prepareSearch("alias1").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "1", "2", "3", "4"); + assertResponse( + prepareSearch("alias1").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "1", "2", "3", "4") + ); logger.info("--> checking non-filtering alias and filtering alias search"); - searchResponse = prepareSearch("alias1", "foos").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "1", "2", "3", "4"); + assertResponse( + prepareSearch("alias1", "foos").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "1", "2", "3", "4") + ); logger.info("--> checking index and filtering alias search"); - searchResponse = prepareSearch("test", "foos").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "1", "2", "3", "4"); + assertResponse( + prepareSearch("test", "foos").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "1", "2", "3", "4") + ); logger.info("--> checking index and alias wildcard search"); - searchResponse = prepareSearch("te*", "fo*").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "1", "2", "3", "4"); + assertResponse( + prepareSearch("te*", "fo*").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "1", "2", "3", "4") + ); } public void testSearchingFilteringAliasesTwoIndices() throws Exception { @@ -373,55 +396,63 @@ public void testSearchingFilteringAliasesTwoIndices() throws Exception { refresh(); logger.info("--> checking filtering alias for two indices"); - SearchResponse searchResponse = prepareSearch("foos").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "1", "5"); - assertThat( - prepareSearch("foos").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, - equalTo(2L) + assertResponse( + prepareSearch("foos").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "1", "5") + ); + assertResponse( + prepareSearch("foos").setSize(0).setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)) ); logger.info("--> checking filtering alias for one index"); - searchResponse = prepareSearch("bars").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "2"); - assertThat( - prepareSearch("bars").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, - equalTo(1L) + assertResponse( + prepareSearch("bars").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "2") + ); + assertResponse( + prepareSearch("bars").setSize(0).setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)) ); logger.info("--> checking filtering alias for two indices and one complete index"); - searchResponse = prepareSearch("foos", "test1").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "1", "2", "3", "4", "5"); - assertThat( - prepareSearch("foos", "test1").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, - equalTo(5L) + assertResponse( + prepareSearch("foos", "test1").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "1", "2", "3", "4", "5") + ); + assertResponse( + prepareSearch("foos", "test1").setSize(0).setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, equalTo(5L)) ); logger.info("--> checking filtering alias for two indices and non-filtering alias for one index"); - searchResponse = prepareSearch("foos", "aliasToTest1").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "1", "2", "3", "4", "5"); - assertThat( - prepareSearch("foos", "aliasToTest1").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, - equalTo(5L) + assertResponse( + prepareSearch("foos", "aliasToTest1").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "1", "2", "3", "4", "5") + ); + assertResponse( + prepareSearch("foos", "aliasToTest1").setSize(0).setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, equalTo(5L)) ); logger.info("--> checking filtering alias for two indices and non-filtering alias for both indices"); - searchResponse = prepareSearch("foos", "aliasToTests").setQuery(QueryBuilders.matchAllQuery()).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(8L)); - assertThat( - prepareSearch("foos", "aliasToTests").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, - equalTo(8L) + assertResponse( + prepareSearch("foos", "aliasToTests").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, equalTo(8L)) + ); + assertResponse( + prepareSearch("foos", "aliasToTests").setSize(0).setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, equalTo(8L)) ); logger.info("--> checking filtering alias for two indices and non-filtering alias for both indices"); - searchResponse = prepareSearch("foos", "aliasToTests").setQuery(QueryBuilders.termQuery("name", "something")).get(); - assertHits(searchResponse.getHits(), "4", "8"); - assertThat( - prepareSearch("foos", "aliasToTests").setSize(0) - .setQuery(QueryBuilders.termQuery("name", "something")) - .get() - .getHits() - .getTotalHits().value, - equalTo(2L) + assertResponse( + prepareSearch("foos", "aliasToTests").setQuery(QueryBuilders.termQuery("name", "something")), + searchResponse -> assertHits(searchResponse.getHits(), "4", "8") + ); + assertResponse( + prepareSearch("foos", "aliasToTests").setSize(0).setQuery(QueryBuilders.termQuery("name", "something")), + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)) ); } @@ -477,58 +508,58 @@ public void testSearchingFilteringAliasesMultipleIndices() throws Exception { refresh(); logger.info("--> checking filtering alias for multiple indices"); - SearchResponse searchResponse = prepareSearch("filter23", "filter13").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "21", "31", "13", "33"); - assertThat( - prepareSearch("filter23", "filter13").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, - equalTo(4L) + assertResponse( + prepareSearch("filter23", "filter13").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "21", "31", "13", "33") + ); + assertResponse( + prepareSearch("filter23", "filter13").setSize(0).setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, equalTo(4L)) ); - searchResponse = prepareSearch("filter23", "filter1").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "21", "31", "11", "12", "13"); - assertThat( - prepareSearch("filter23", "filter1").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, - equalTo(5L) + assertResponse( + prepareSearch("filter23", "filter1").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "21", "31", "11", "12", "13") + ); + assertResponse( + prepareSearch("filter23", "filter1").setSize(0).setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, equalTo(5L)) ); - searchResponse = prepareSearch("filter13", "filter1").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "11", "12", "13", "33"); - assertThat( - prepareSearch("filter13", "filter1").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, - equalTo(4L) + assertResponse( + prepareSearch("filter13", "filter1").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "11", "12", "13", "33") + ); + assertResponse( + prepareSearch("filter13", "filter1").setSize(0).setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, equalTo(4L)) ); - searchResponse = prepareSearch("filter13", "filter1", "filter23").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "11", "12", "13", "21", "31", "33"); - assertThat( - prepareSearch("filter13", "filter1", "filter23").setSize(0) - .setQuery(QueryBuilders.matchAllQuery()) - .get() - .getHits() - .getTotalHits().value, - equalTo(6L) + assertResponse( + prepareSearch("filter13", "filter1", "filter23").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "11", "12", "13", "21", "31", "33") + ); + assertResponse( + prepareSearch("filter13", "filter1", "filter23").setSize(0).setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, equalTo(6L)) ); - searchResponse = prepareSearch("filter23", "filter13", "test2").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "21", "22", "23", "31", "13", "33"); - assertThat( - prepareSearch("filter23", "filter13", "test2").setSize(0) - .setQuery(QueryBuilders.matchAllQuery()) - .get() - .getHits() - .getTotalHits().value, - equalTo(6L) + assertResponse( + prepareSearch("filter23", "filter13", "test2").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "21", "22", "23", "31", "13", "33") + ); + assertResponse( + prepareSearch("filter23", "filter13", "test2").setSize(0).setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, equalTo(6L)) ); - searchResponse = prepareSearch("filter23", "filter13", "test1", "test2").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "11", "12", "13", "21", "22", "23", "31", "33"); - assertThat( - prepareSearch("filter23", "filter13", "test1", "test2").setSize(0) - .setQuery(QueryBuilders.matchAllQuery()) - .get() - .getHits() - .getTotalHits().value, - equalTo(8L) + assertResponse( + prepareSearch("filter23", "filter13", "test1", "test2").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "11", "12", "13", "21", "22", "23", "31", "33") + ); + assertResponse( + prepareSearch("filter23", "filter13", "test1", "test2").setSize(0).setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, equalTo(8L)) ); } @@ -581,9 +612,9 @@ public void testDeletingByQueryFilteringAliases() throws Exception { refresh(); logger.info("--> checking counts before delete"); - assertThat( - prepareSearch("bars").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, - equalTo(1L) + assertResponse( + prepareSearch("bars").setSize(0).setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)) ); } @@ -1299,22 +1330,29 @@ public void testIndexingAndQueryingHiddenAliases() throws Exception { refresh(writeIndex, nonWriteIndex); // Make sure that the doc written to the alias made it - SearchResponse searchResponse = prepareSearch(writeIndex).setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "2", "3"); + assertResponse( + prepareSearch(writeIndex).setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "2", "3") + ); // Ensure that all docs can be gotten through the alias - searchResponse = prepareSearch(alias).setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "1", "2", "3"); + assertResponse( + prepareSearch(alias).setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "1", "2", "3") + ); // And querying using a wildcard with indices options set to expand hidden - searchResponse = prepareSearch("alias*").setQuery(QueryBuilders.matchAllQuery()) - .setIndicesOptions(IndicesOptions.fromOptions(false, false, true, false, true, true, true, false, false)) - .get(); - assertHits(searchResponse.getHits(), "1", "2", "3"); + assertResponse( + prepareSearch("alias*").setQuery(QueryBuilders.matchAllQuery()) + .setIndicesOptions(IndicesOptions.fromOptions(false, false, true, false, true, true, true, false, false)), + searchResponse -> assertHits(searchResponse.getHits(), "1", "2", "3") + ); // And that querying the alias with a wildcard and no expand options fails - searchResponse = prepareSearch("alias*").setQuery(QueryBuilders.matchAllQuery()).get(); - assertThat(searchResponse.getHits().getHits(), emptyArray()); + assertResponse( + prepareSearch("alias*").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertThat(searchResponse.getHits().getHits(), emptyArray()) + ); } public void testCreateIndexAndAliasWithSameNameFails() { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/broadcast/BroadcastActionsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/broadcast/BroadcastActionsIT.java index 5e8e6c634fa47..c45f980553431 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/broadcast/BroadcastActionsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/broadcast/BroadcastActionsIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.broadcast; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; @@ -18,6 +17,7 @@ import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; public class BroadcastActionsIT extends ESIntegTestCase { @@ -42,11 +42,12 @@ public void testBroadcastOperations() throws IOException { // check count for (int i = 0; i < 5; i++) { // test successful - SearchResponse countResponse = prepareSearch("test").setSize(0).setQuery(matchAllQuery()).get(); - assertThat(countResponse.getHits().getTotalHits().value, equalTo(2L)); - assertThat(countResponse.getTotalShards(), equalTo(numShards.numPrimaries)); - assertThat(countResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries)); - assertThat(countResponse.getFailedShards(), equalTo(0)); + assertResponse(prepareSearch("test").setSize(0).setQuery(matchAllQuery()), countResponse -> { + assertThat(countResponse.getHits().getTotalHits().value, equalTo(2L)); + assertThat(countResponse.getTotalShards(), equalTo(numShards.numPrimaries)); + assertThat(countResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries)); + assertThat(countResponse.getFailedShards(), equalTo(0)); + }); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java index c273a0b0f7c6b..d8acd45e8525f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java @@ -14,7 +14,6 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.Requests; @@ -45,6 +44,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertExists; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -255,9 +255,10 @@ public void testNoMasterActionsWriteMasterBlock() throws Exception { logger.info("--> here 3"); assertHitCount(clientToMasterlessNode.prepareSearch("test1").setAllowPartialSearchResults(true), 1L); - SearchResponse countResponse = clientToMasterlessNode.prepareSearch("test2").setAllowPartialSearchResults(true).setSize(0).get(); - assertThat(countResponse.getTotalShards(), equalTo(3)); - assertThat(countResponse.getSuccessfulShards(), equalTo(1)); + assertResponse(clientToMasterlessNode.prepareSearch("test2").setAllowPartialSearchResults(true).setSize(0), countResponse -> { + assertThat(countResponse.getTotalShards(), equalTo(3)); + assertThat(countResponse.getSuccessfulShards(), equalTo(1)); + }); TimeValue timeout = TimeValue.timeValueMillis(200); long now = System.currentTimeMillis(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsServiceIT.java new file mode 100644 index 0000000000000..59f4905d5924b --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsServiceIT.java @@ -0,0 +1,198 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsClusterStateUpdateRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.equalTo; + +public class MetadataUpdateSettingsServiceIT extends ESIntegTestCase { + + public void testThatNonDynamicSettingChangesTakeEffect() throws Exception { + /* + * This test makes sure that when non-dynamic settings are updated that they actually take effect (as opposed to just being set + * in the cluster state). + */ + createIndex("test", Settings.EMPTY); + MetadataUpdateSettingsService metadataUpdateSettingsService = internalCluster().getCurrentMasterNodeInstance( + MetadataUpdateSettingsService.class + ); + UpdateSettingsClusterStateUpdateRequest request = new UpdateSettingsClusterStateUpdateRequest(); + List indices = new ArrayList<>(); + for (IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { + for (IndexService indexService : indicesService) { + indices.add(indexService.index()); + } + } + request.indices(indices.toArray(Index.EMPTY_ARRAY)); + request.settings(Settings.builder().put("index.codec", "FastDecompressionCompressingStoredFieldsData").build()); + + // First make sure it fails if reopenShards is not set on the request: + AtomicBoolean expectedFailureOccurred = new AtomicBoolean(false); + metadataUpdateSettingsService.updateSettings(request, new ActionListener<>() { + @Override + public void onResponse(AcknowledgedResponse acknowledgedResponse) { + fail("Should have failed updating a non-dynamic setting without reopenShards set to true"); + } + + @Override + public void onFailure(Exception e) { + expectedFailureOccurred.set(true); + } + }); + assertBusy(() -> assertThat(expectedFailureOccurred.get(), equalTo(true))); + + // Now we set reopenShards and expect it to work: + request.reopenShards(true); + AtomicBoolean success = new AtomicBoolean(false); + metadataUpdateSettingsService.updateSettings(request, new ActionListener<>() { + @Override + public void onResponse(AcknowledgedResponse acknowledgedResponse) { + success.set(true); + } + + @Override + public void onFailure(Exception e) { + fail(e); + } + }); + assertBusy(() -> assertThat(success.get(), equalTo(true))); + + // Now we look into the IndexShard objects to make sure that the code was actually updated (vs just the setting): + for (IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { + for (IndexService indexService : indicesService) { + assertBusy(() -> { + for (IndexShard indexShard : indexService) { + final Engine engine = indexShard.getEngineOrNull(); + assertNotNull("engine is null for " + indexService.index().getName(), engine); + assertThat(engine.getEngineConfig().getCodec().getName(), equalTo("FastDecompressionCompressingStoredFieldsData")); + } + }); + } + } + } + + public void testThatNonDynamicSettingChangesDoNotUnncessesarilyCauseReopens() throws Exception { + /* + * This test makes sure that if a setting change request for a non-dynamic setting is made on an index that already has that + * value we don't unassign the shards to apply the change -- there is no need. First we set a non-dynamic setting for the + * first time, and see that the shards for the index are unassigned. Then we set a different dynamic setting, and include setting + * the original non-dynamic setting to the same value as the previous request. We make sure that the new setting comes through + * but that the shards are not unassigned. + */ + final String indexName = "test"; + createIndex(indexName, Settings.EMPTY); + MetadataUpdateSettingsService metadataUpdateSettingsService = internalCluster().getCurrentMasterNodeInstance( + MetadataUpdateSettingsService.class + ); + UpdateSettingsClusterStateUpdateRequest request = new UpdateSettingsClusterStateUpdateRequest(); + List indices = new ArrayList<>(); + for (IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { + for (IndexService indexService : indicesService) { + indices.add(indexService.index()); + } + } + request.indices(indices.toArray(Index.EMPTY_ARRAY)); + request.settings(Settings.builder().put("index.codec", "FastDecompressionCompressingStoredFieldsData").build()); + request.reopenShards(true); + + ClusterService clusterService = internalCluster().getInstance(ClusterService.class); + AtomicBoolean shardsUnassigned = new AtomicBoolean(false); + AtomicBoolean expectedSettingsChangeInClusterState = new AtomicBoolean(false); + AtomicReference expectedSetting = new AtomicReference<>("index.codec"); + AtomicReference expectedSettingValue = new AtomicReference<>("FastDecompressionCompressingStoredFieldsData"); + clusterService.addListener(event -> { + // We want the cluster change event where the setting is applied. This will be the same one where shards are unassigned + if (event.metadataChanged() + && event.state().metadata().index(indexName) != null + && expectedSettingValue.get().equals(event.state().metadata().index(indexName).getSettings().get(expectedSetting.get()))) { + expectedSettingsChangeInClusterState.set(true); + if (event.routingTableChanged() && event.state().routingTable().indicesRouting().containsKey(indexName)) { + if (hasUnassignedShards(event.state(), indexName)) { + shardsUnassigned.set(true); + } + } + } + }); + + AtomicBoolean success = new AtomicBoolean(false); + // Make the first request, just to set things up: + metadataUpdateSettingsService.updateSettings(request, new ActionListener<>() { + @Override + public void onResponse(AcknowledgedResponse acknowledgedResponse) { + success.set(true); + } + + @Override + public void onFailure(Exception e) { + fail(e); + } + }); + assertBusy(() -> assertThat(success.get(), equalTo(true))); + assertBusy(() -> assertThat(expectedSettingsChangeInClusterState.get(), equalTo(true))); + assertThat(shardsUnassigned.get(), equalTo(true)); + + assertBusy(() -> assertThat(hasUnassignedShards(clusterService.state(), indexName), equalTo(false))); + + // Same request, except now we'll also set the dynamic "index.max_result_window" setting: + request.settings( + Settings.builder() + .put("index.codec", "FastDecompressionCompressingStoredFieldsData") + .put("index.max_result_window", "1500") + .build() + ); + success.set(false); + expectedSettingsChangeInClusterState.set(false); + shardsUnassigned.set(false); + expectedSetting.set("index.max_result_window"); + expectedSettingValue.set("1500"); + // Making this request ought to add this new setting but not unassign the shards: + metadataUpdateSettingsService.updateSettings(request, new ActionListener<>() { + @Override + public void onResponse(AcknowledgedResponse acknowledgedResponse) { + success.set(true); + } + + @Override + public void onFailure(Exception e) { + fail(e); + } + }); + + assertBusy(() -> assertThat(success.get(), equalTo(true))); + assertBusy(() -> assertThat(expectedSettingsChangeInClusterState.get(), equalTo(true))); + assertThat(shardsUnassigned.get(), equalTo(false)); + + } + + private boolean hasUnassignedShards(ClusterState state, String indexName) { + return state.routingTable() + .indicesRouting() + .get(indexName) + .allShards() + .anyMatch(shardRoutingTable -> shardRoutingTable.shardsWithState(ShardRoutingState.UNASSIGNED).size() > 0); + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java index 206b866bd4758..a12f7feb05b48 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java @@ -377,7 +377,6 @@ private void assertEngineTypes() { } } - @AwaitsFix(bugUrl = "ES-4677") public void testRelocation() { var routingTableWatcher = new RoutingTableWatcher(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorServiceIT.java similarity index 97% rename from server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorServiceIT.java rename to server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorServiceIT.java index e85edc5805482..b862d0b2f20b6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorServiceIT.java @@ -6,13 +6,14 @@ * Side Public License, v 1. */ -package org.elasticsearch.cluster.routing.allocation; +package org.elasticsearch.cluster.routing.allocation.shards; import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.routing.RoutingNodes; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.health.HealthIndicatorResult; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/document/DocumentActionsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/document/DocumentActionsIT.java index 42bc0f19bf757..1ee91c5cd5f3b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/document/DocumentActionsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/document/DocumentActionsIT.java @@ -20,7 +20,6 @@ import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.Strings; @@ -35,6 +34,7 @@ import static org.elasticsearch.action.DocWriteRequest.OpType; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; @@ -154,22 +154,23 @@ public void testIndexActions() throws Exception { // check count for (int i = 0; i < 5; i++) { // test successful - SearchResponse countResponse = prepareSearch("test").setSize(0).setQuery(matchAllQuery()).execute().actionGet(); - assertNoFailures(countResponse); - assertThat(countResponse.getHits().getTotalHits().value, equalTo(2L)); - assertThat(countResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries)); - assertThat(countResponse.getFailedShards(), equalTo(0)); + assertNoFailuresAndResponse(prepareSearch("test").setSize(0).setQuery(matchAllQuery()), countResponse -> { + assertThat(countResponse.getHits().getTotalHits().value, equalTo(2L)); + assertThat(countResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries)); + assertThat(countResponse.getFailedShards(), equalTo(0)); + }); // count with no query is a match all one - countResponse = prepareSearch("test").setSize(0).execute().actionGet(); - assertThat( - "Failures " + countResponse.getShardFailures(), - countResponse.getShardFailures() == null ? 0 : countResponse.getShardFailures().length, - equalTo(0) - ); - assertThat(countResponse.getHits().getTotalHits().value, equalTo(2L)); - assertThat(countResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries)); - assertThat(countResponse.getFailedShards(), equalTo(0)); + assertNoFailuresAndResponse(prepareSearch("test").setSize(0), countResponse -> { + assertThat( + "Failures " + countResponse.getShardFailures(), + countResponse.getShardFailures() == null ? 0 : countResponse.getShardFailures().length, + equalTo(0) + ); + assertThat(countResponse.getHits().getTotalHits().value, equalTo(2L)); + assertThat(countResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries)); + assertThat(countResponse.getFailedShards(), equalTo(0)); + }); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/features/ClusterFeaturesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/features/ClusterFeaturesIT.java index fe447eca6e8fd..24bf198b7b42f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/features/ClusterFeaturesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/features/ClusterFeaturesIT.java @@ -18,7 +18,7 @@ import java.util.stream.Collectors; import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasKey; public class ClusterFeaturesIT extends ESIntegTestCase { @@ -29,7 +29,7 @@ public void testClusterHasFeatures() { FeatureService service = internalCluster().getCurrentMasterNodeInstance(FeatureService.class); - assertThat(service.getNodeFeatures(), hasItem(FeatureService.FEATURES_SUPPORTED.id())); + assertThat(service.getNodeFeatures(), hasKey(FeatureService.FEATURES_SUPPORTED.id())); // check the nodes all have a feature in their cluster state (there should always be features_supported) var response = clusterAdmin().state(new ClusterStateRequest().clear().nodes(true)).actionGet(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/health/GetHealthCancellationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/health/GetHealthCancellationIT.java index 7a9fd0b6ccf60..eda8a4eb9e459 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/health/GetHealthCancellationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/health/GetHealthCancellationIT.java @@ -90,7 +90,7 @@ public void testCancellation() throws Exception { } final ClusterService clusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); - final PlainActionFuture findHealthNodeFuture = PlainActionFuture.newFuture(); + final PlainActionFuture findHealthNodeFuture = new PlainActionFuture<>(); // the health node might take a bit of time to be assigned by the persistent task framework so we wait until we have a health // node in the cluster before proceeding with the test // proceeding with the execution before the health node assignment would yield a non-deterministic behaviour as we diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/SearchIdleIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/SearchIdleIT.java index 22bb5974ad550..13569d4d7d1ae 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/SearchIdleIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/SearchIdleIT.java @@ -95,7 +95,7 @@ private void runTestAutomaticRefresh(final IntToLongFunction count) throws Inter client().prepareIndex("test").setId("0").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); indexingDone.countDown(); // one doc is indexed above blocking IndexShard shard = indexService.getShard(0); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); shard.scheduledRefresh(future); boolean hasRefreshed = future.actionGet(); if (randomTimeValue == TimeValue.ZERO) { @@ -193,7 +193,7 @@ public void testPendingRefreshWithIntervalChange() throws Exception { } private static void scheduleRefresh(IndexShard shard, boolean expectRefresh) { - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); shard.scheduledRefresh(future); assertThat(future.actionGet(), equalTo(expectRefresh)); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java index e34d5059b4991..1a5f913e4bab2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java @@ -290,7 +290,7 @@ protected boolean masterSupportsFetchingLatestSnapshots() { } }; - PlainActionFuture> latestSnapshots = PlainActionFuture.newFuture(); + PlainActionFuture> latestSnapshots = new PlainActionFuture<>(); shardSnapshotsService.fetchLatestSnapshotsForShard(shardId, latestSnapshots); assertThat(latestSnapshots.actionGet().isPresent(), is(equalTo(false))); } @@ -298,7 +298,7 @@ protected boolean masterSupportsFetchingLatestSnapshots() { private Optional getLatestShardSnapshot(ShardId shardId) throws Exception { ShardSnapshotsService shardSnapshotsService = getShardSnapshotsService(); - PlainActionFuture> future = PlainActionFuture.newFuture(); + PlainActionFuture> future = new PlainActionFuture<>(); shardSnapshotsService.fetchLatestSnapshotsForShard(shardId, future); return future.get(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/ComposableTemplateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/ComposableTemplateIT.java index 29c38c07fcbd7..0e385768fc256 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/ComposableTemplateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/ComposableTemplateIT.java @@ -33,9 +33,9 @@ public void testComponentTemplatesCanBeUpdatedAfterRestart() throws Exception { }"""), null), 3L, Collections.singletonMap("eggplant", "potato")); client().execute(PutComponentTemplateAction.INSTANCE, new PutComponentTemplateAction.Request("my-ct").componentTemplate(ct)).get(); - ComposableIndexTemplate cit = new ComposableIndexTemplate( - Collections.singletonList("coleslaw"), - new Template(null, new CompressedXContent(""" + ComposableIndexTemplate cit = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("coleslaw")) + .template(new Template(null, new CompressedXContent(""" { "dynamic": false, "properties": { @@ -43,12 +43,12 @@ public void testComponentTemplatesCanBeUpdatedAfterRestart() throws Exception { "type": "keyword" } } - }"""), null), - Collections.singletonList("my-ct"), - 4L, - 5L, - Collections.singletonMap("egg", "bread") - ); + }"""), null)) + .componentTemplates(Collections.singletonList("my-ct")) + .priority(4L) + .version(5L) + .metadata(Collections.singletonMap("egg", "bread")) + .build(); client().execute( PutComposableIndexTemplateAction.INSTANCE, new PutComposableIndexTemplateAction.Request("my-it").indexTemplate(cit) @@ -68,9 +68,9 @@ public void testComponentTemplatesCanBeUpdatedAfterRestart() throws Exception { }"""), null), 3L, Collections.singletonMap("eggplant", "potato")); client().execute(PutComponentTemplateAction.INSTANCE, new PutComponentTemplateAction.Request("my-ct").componentTemplate(ct2)).get(); - ComposableIndexTemplate cit2 = new ComposableIndexTemplate( - Collections.singletonList("coleslaw"), - new Template(null, new CompressedXContent(""" + ComposableIndexTemplate cit2 = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("coleslaw")) + .template(new Template(null, new CompressedXContent(""" { "dynamic": true, "properties": { @@ -78,12 +78,12 @@ public void testComponentTemplatesCanBeUpdatedAfterRestart() throws Exception { "type": "integer" } } - }"""), null), - Collections.singletonList("my-ct"), - 4L, - 5L, - Collections.singletonMap("egg", "bread") - ); + }"""), null)) + .componentTemplates(Collections.singletonList("my-ct")) + .priority(4L) + .version(5L) + .metadata(Collections.singletonMap("egg", "bread")) + .build(); client().execute( PutComposableIndexTemplateAction.INSTANCE, new PutComposableIndexTemplateAction.Request("my-it").indexTemplate(cit2) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestStatsNamesAndTypesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestStatsNamesAndTypesIT.java index 547da987dcb91..2a4174ba427af 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestStatsNamesAndTypesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestStatsNamesAndTypesIT.java @@ -45,11 +45,6 @@ protected Collection> nodePlugins() { return List.of(CustomIngestTestPlugin.class, CustomScriptPlugin.class); } - @Override - protected boolean ignoreExternalCluster() { - return true; - } - @SuppressWarnings("unchecked") public void testIngestStatsNamesAndTypes() throws IOException { String pipeline1 = org.elasticsearch.core.Strings.format(""" diff --git a/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorFullRestartIT.java b/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorFullRestartIT.java index f51ff1da9bfc9..d1c72a9650b85 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorFullRestartIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorFullRestartIT.java @@ -34,10 +34,6 @@ protected Collection> nodePlugins() { return Collections.singletonList(TestPersistentTasksPlugin.class); } - protected boolean ignoreExternalCluster() { - return true; - } - public void testFullClusterRestart() throws Exception { PersistentTasksService service = internalCluster().getInstance(PersistentTasksService.class); int numberOfTasks = randomIntBetween(1, 10); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java index c91f5138e919f..3cc90a6795e37 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java @@ -51,10 +51,6 @@ protected Collection> nodePlugins() { return Collections.singletonList(TestPersistentTasksPlugin.class); } - protected boolean ignoreExternalCluster() { - return true; - } - @Before public void resetNonClusterStateCondition() { TestPersistentTasksExecutor.setNonClusterStateCondition(true); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java b/server/src/internalClusterTest/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java index cb24b78a499ac..d9aa15ed6e2f5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java @@ -36,11 +36,6 @@ protected Collection> nodePlugins() { return singletonList(TestPersistentTasksPlugin.class); } - @Override - protected boolean ignoreExternalCluster() { - return true; - } - /** * Test that the {@link EnableAssignmentDecider#CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING} setting correctly * prevents persistent tasks to be assigned after a cluster restart. diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java index be8053a1d6866..80c10b7b12296 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java @@ -338,7 +338,7 @@ private PlainActionFuture getLatestSnapshotForShardFut boolean useAllRepositoriesRequest ) { ShardId shardId = new ShardId(new Index(indexName, "__na__"), shard); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); final GetShardSnapshotRequest request; if (useAllRepositoriesRequest && randomBoolean()) { request = GetShardSnapshotRequest.latestSnapshotInAllRepositories(shardId); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java index 7d444eef787c0..0b1802fc71470 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java @@ -90,7 +90,7 @@ private ActionFuture startBlockedCleanup(String repoN final BlobStoreRepository repository = getRepositoryOnMaster(repoName); logger.info("--> creating a garbage data blob"); - final PlainActionFuture garbageFuture = PlainActionFuture.newFuture(); + final PlainActionFuture garbageFuture = new PlainActionFuture<>(); repository.threadPool() .generic() .execute( @@ -137,7 +137,7 @@ public void testCleanupOldIndexN() throws ExecutionException, InterruptedExcepti final BlobStoreRepository repository = getRepositoryOnMaster(repoName); logger.info("--> write two outdated index-N blobs"); for (int i = 0; i < 2; ++i) { - final PlainActionFuture createOldIndexNFuture = PlainActionFuture.newFuture(); + final PlainActionFuture createOldIndexNFuture = new PlainActionFuture<>(); final int generation = i; repository.threadPool() .generic() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java index 345504582305a..ec01e34976058 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search; -import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.search.MultiSearchAction; @@ -33,7 +32,6 @@ import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.test.AbstractSearchCancellationTestCase; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.junit.annotations.TestIssueLogging; import org.elasticsearch.transport.TransportService; import java.util.ArrayList; @@ -51,7 +49,6 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.notNullValue; -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/101739") @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE) public class SearchCancellationIT extends AbstractSearchCancellationTestCase { @@ -230,11 +227,10 @@ public void testCancelMultiSearch() throws Exception { } } - @TestIssueLogging( - value = "org.elasticsearch.action.search:TRACE,org.elasticsearch.search:TRACE," + "org.elasticsearch.tasks:TRACE", - issueUrl = "https://github.com/elastic/elasticsearch/issues/99929" - ) public void testCancelFailedSearchWhenPartialResultDisallowed() throws Exception { + // Have at least two nodes so that we have parallel execution of two request guaranteed even if max concurrent requests per node + // are limited to 1 + internalCluster().ensureAtLeastNumDataNodes(2); int numberOfShards = between(2, 5); createIndex("test", numberOfShards, 0); indexTestData(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/AggregationsIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/AggregationsIntegrationIT.java index cc74dcc3d0d28..ac18177187372 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/AggregationsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/AggregationsIntegrationIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.search.aggregations; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.test.ESIntegTestCase; @@ -19,7 +18,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; @ESIntegTestCase.SuiteScopeTestCase public class AggregationsIntegrationIT extends ESIntegTestCase { @@ -39,25 +38,32 @@ public void setupSuiteScopeCluster() throws Exception { public void testScroll() { final int size = randomIntBetween(1, 4); - SearchResponse response = prepareSearch("index").setSize(size) - .setScroll(TimeValue.timeValueMinutes(1)) - .addAggregation(terms("f").field("f")) - .get(); - assertNoFailures(response); - Aggregations aggregations = response.getAggregations(); - assertNotNull(aggregations); - Terms terms = aggregations.get("f"); - assertEquals(Math.min(numDocs, 3L), terms.getBucketByKey("0").getDocCount()); - - int total = response.getHits().getHits().length; - while (response.getHits().getHits().length > 0) { - response = client().prepareSearchScroll(response.getScrollId()).setScroll(TimeValue.timeValueMinutes(1)).get(); - assertNoFailures(response); - assertNull(response.getAggregations()); - total += response.getHits().getHits().length; + final String[] scroll = new String[1]; + final int[] total = new int[1]; + assertNoFailuresAndResponse( + prepareSearch("index").setSize(size).setScroll(TimeValue.timeValueMinutes(1)).addAggregation(terms("f").field("f")), + response -> { + Aggregations aggregations = response.getAggregations(); + assertNotNull(aggregations); + Terms terms = aggregations.get("f"); + assertEquals(Math.min(numDocs, 3L), terms.getBucketByKey("0").getDocCount()); + scroll[0] = response.getScrollId(); + total[0] = response.getHits().getHits().length; + } + ); + int currentTotal = 0; + while (total[0] - currentTotal > 0) { + currentTotal = total[0]; + assertNoFailuresAndResponse( + client().prepareSearchScroll(scroll[0]).setScroll(TimeValue.timeValueMinutes(1)), + scrollResponse -> { + assertNull(scrollResponse.getAggregations()); + total[0] += scrollResponse.getHits().getHits().length; + scroll[0] = scrollResponse.getScrollId(); + } + ); } - clearScroll(response.getScrollId()); - assertEquals(numDocs, total); + clearScroll(scroll[0]); + assertEquals(numDocs, total[0]); } - } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/CombiIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/CombiIT.java index a0144d30a4728..8c110a298a1d0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/CombiIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/CombiIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.search.aggregations; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.missing.Missing; @@ -24,7 +23,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.missing; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.is; @@ -60,27 +59,28 @@ public void testMultipleAggsOnSameField_WithDifferentRequiredValueSourceType() t indexRandom(true, builders); ensureSearchable(); + final long finalMissingValues = missingValues; SubAggCollectionMode aggCollectionMode = randomFrom(SubAggCollectionMode.values()); - SearchResponse response = prepareSearch("idx").addAggregation(missing("missing_values").field("value")) - .addAggregation(terms("values").field("value").collectMode(aggCollectionMode)) - .get(); - - assertNoFailures(response); - - Aggregations aggs = response.getAggregations(); - - Missing missing = aggs.get("missing_values"); - assertNotNull(missing); - assertThat(missing.getDocCount(), equalTo(missingValues)); - - Terms terms = aggs.get("values"); - assertNotNull(terms); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(values.size())); - for (Terms.Bucket bucket : buckets) { - values.remove(((Number) bucket.getKey()).intValue()); - } - assertTrue(values.isEmpty()); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation(missing("missing_values").field("value")) + .addAggregation(terms("values").field("value").collectMode(aggCollectionMode)), + response -> { + Aggregations aggs = response.getAggregations(); + + Missing missing = aggs.get("missing_values"); + assertNotNull(missing); + assertThat(missing.getDocCount(), equalTo(finalMissingValues)); + + Terms terms = aggs.get("values"); + assertNotNull(terms); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(values.size())); + for (Terms.Bucket bucket : buckets) { + values.remove(((Number) bucket.getKey()).intValue()); + } + assertTrue(values.isEmpty()); + } + ); } /** @@ -108,13 +108,16 @@ public void testSubAggregationForTopAggregationOnUnmappedField() throws Exceptio ensureSearchable("idx"); SubAggCollectionMode aggCollectionMode = randomFrom(SubAggCollectionMode.values()); - SearchResponse searchResponse = prepareSearch("idx").addAggregation( - histogram("values").field("value1").interval(1).subAggregation(terms("names").field("name").collectMode(aggCollectionMode)) - ).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, Matchers.equalTo(0L)); - Histogram values = searchResponse.getAggregations().get("values"); - assertThat(values, notNullValue()); - assertThat(values.getBuckets().isEmpty(), is(true)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("values").field("value1").interval(1).subAggregation(terms("names").field("name").collectMode(aggCollectionMode)) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, Matchers.equalTo(0L)); + Histogram values = response.getAggregations().get("values"); + assertThat(values, notNullValue()); + assertThat(values.getBuckets().isEmpty(), is(true)); + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/EquivalenceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/EquivalenceIT.java index ea896c73f8882..8b7ddcaf7ab24 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/EquivalenceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/EquivalenceIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.Maps; @@ -57,6 +56,8 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.core.IsNull.notNullValue; @@ -164,34 +165,35 @@ public void testRandomRanges() throws Exception { reqBuilder = reqBuilder.addAggregation(filter("filter" + i, filter)); } - SearchResponse resp = reqBuilder.get(); - Range range = resp.getAggregations().get("range"); - List buckets = range.getBuckets(); + assertResponse(reqBuilder, response -> { + Range range = response.getAggregations().get("range"); + List buckets = range.getBuckets(); - Map bucketMap = Maps.newMapWithExpectedSize(buckets.size()); - for (Bucket bucket : buckets) { - bucketMap.put(bucket.getKeyAsString(), bucket); - } + Map bucketMap = Maps.newMapWithExpectedSize(buckets.size()); + for (Bucket bucket : buckets) { + bucketMap.put(bucket.getKeyAsString(), bucket); + } - for (int i = 0; i < ranges.length; ++i) { + for (int i = 0; i < ranges.length; ++i) { - long count = 0; - for (double[] values : docs) { - for (double value : values) { - if (value >= ranges[i][0] && value < ranges[i][1]) { - ++count; - break; + long count = 0; + for (double[] values : docs) { + for (double value : values) { + if (value >= ranges[i][0] && value < ranges[i][1]) { + ++count; + break; + } } } - } - final Range.Bucket bucket = bucketMap.get(Integer.toString(i)); - assertEquals(bucket.getKeyAsString(), Integer.toString(i), bucket.getKeyAsString()); - assertEquals(bucket.getKeyAsString(), count, bucket.getDocCount()); + final Range.Bucket bucket = bucketMap.get(Integer.toString(i)); + assertEquals(bucket.getKeyAsString(), Integer.toString(i), bucket.getKeyAsString()); + assertEquals(bucket.getKeyAsString(), count, bucket.getDocCount()); - final Filter filter = resp.getAggregations().get("filter" + i); - assertThat(filter.getDocCount(), equalTo(count)); - } + final Filter filter = response.getAggregations().get("filter" + i); + assertThat(filter.getDocCount(), equalTo(count)); + } + }); } // test long/double/string terms aggs with high number of buckets that require array growth @@ -254,68 +256,71 @@ public void testDuelTerms() throws Exception { assertNoFailures(indicesAdmin().prepareRefresh("idx").setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute().get()); - SearchResponse resp = prepareSearch("idx").addAggregation( - terms("long").field("long_values") - .size(maxNumTerms) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(min("min").field("num")) - ) - .addAggregation( - terms("double").field("double_values") - .size(maxNumTerms) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(max("max").field("num")) - ) - .addAggregation( - terms("string_map").field("string_values") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .executionHint(TermsAggregatorFactory.ExecutionMode.MAP.toString()) + assertResponse( + prepareSearch("idx").addAggregation( + terms("long").field("long_values") .size(maxNumTerms) - .subAggregation(stats("stats").field("num")) - ) - .addAggregation( - terms("string_global_ordinals").field("string_values") .collectMode(randomFrom(SubAggCollectionMode.values())) - .executionHint(TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS.toString()) - .size(maxNumTerms) - .subAggregation(extendedStats("stats").field("num")) - ) - .addAggregation( - terms("string_global_ordinals_doc_values").field("string_values.doc_values") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .executionHint(TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS.toString()) - .size(maxNumTerms) - .subAggregation(extendedStats("stats").field("num")) + .subAggregation(min("min").field("num")) ) - .get(); - assertAllSuccessful(resp); - assertEquals(numDocs, resp.getHits().getTotalHits().value); - - final Terms longTerms = resp.getAggregations().get("long"); - final Terms doubleTerms = resp.getAggregations().get("double"); - final Terms stringMapTerms = resp.getAggregations().get("string_map"); - final Terms stringGlobalOrdinalsTerms = resp.getAggregations().get("string_global_ordinals"); - final Terms stringGlobalOrdinalsDVTerms = resp.getAggregations().get("string_global_ordinals_doc_values"); - - assertEquals(valuesSet.size(), longTerms.getBuckets().size()); - assertEquals(valuesSet.size(), doubleTerms.getBuckets().size()); - assertEquals(valuesSet.size(), stringMapTerms.getBuckets().size()); - assertEquals(valuesSet.size(), stringGlobalOrdinalsTerms.getBuckets().size()); - assertEquals(valuesSet.size(), stringGlobalOrdinalsDVTerms.getBuckets().size()); - for (Terms.Bucket bucket : longTerms.getBuckets()) { - final Terms.Bucket doubleBucket = doubleTerms.getBucketByKey(Double.toString(Long.parseLong(bucket.getKeyAsString()))); - final Terms.Bucket stringMapBucket = stringMapTerms.getBucketByKey(bucket.getKeyAsString()); - final Terms.Bucket stringGlobalOrdinalsBucket = stringGlobalOrdinalsTerms.getBucketByKey(bucket.getKeyAsString()); - final Terms.Bucket stringGlobalOrdinalsDVBucket = stringGlobalOrdinalsDVTerms.getBucketByKey(bucket.getKeyAsString()); - assertNotNull(doubleBucket); - assertNotNull(stringMapBucket); - assertNotNull(stringGlobalOrdinalsBucket); - assertNotNull(stringGlobalOrdinalsDVBucket); - assertEquals(bucket.getDocCount(), doubleBucket.getDocCount()); - assertEquals(bucket.getDocCount(), stringMapBucket.getDocCount()); - assertEquals(bucket.getDocCount(), stringGlobalOrdinalsBucket.getDocCount()); - assertEquals(bucket.getDocCount(), stringGlobalOrdinalsDVBucket.getDocCount()); - } + .addAggregation( + terms("double").field("double_values") + .size(maxNumTerms) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(max("max").field("num")) + ) + .addAggregation( + terms("string_map").field("string_values") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .executionHint(TermsAggregatorFactory.ExecutionMode.MAP.toString()) + .size(maxNumTerms) + .subAggregation(stats("stats").field("num")) + ) + .addAggregation( + terms("string_global_ordinals").field("string_values") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .executionHint(TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS.toString()) + .size(maxNumTerms) + .subAggregation(extendedStats("stats").field("num")) + ) + .addAggregation( + terms("string_global_ordinals_doc_values").field("string_values.doc_values") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .executionHint(TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS.toString()) + .size(maxNumTerms) + .subAggregation(extendedStats("stats").field("num")) + ), + response -> { + assertAllSuccessful(response); + assertEquals(numDocs, response.getHits().getTotalHits().value); + + final Terms longTerms = response.getAggregations().get("long"); + final Terms doubleTerms = response.getAggregations().get("double"); + final Terms stringMapTerms = response.getAggregations().get("string_map"); + final Terms stringGlobalOrdinalsTerms = response.getAggregations().get("string_global_ordinals"); + final Terms stringGlobalOrdinalsDVTerms = response.getAggregations().get("string_global_ordinals_doc_values"); + + assertEquals(valuesSet.size(), longTerms.getBuckets().size()); + assertEquals(valuesSet.size(), doubleTerms.getBuckets().size()); + assertEquals(valuesSet.size(), stringMapTerms.getBuckets().size()); + assertEquals(valuesSet.size(), stringGlobalOrdinalsTerms.getBuckets().size()); + assertEquals(valuesSet.size(), stringGlobalOrdinalsDVTerms.getBuckets().size()); + for (Terms.Bucket bucket : longTerms.getBuckets()) { + final Terms.Bucket doubleBucket = doubleTerms.getBucketByKey(Double.toString(Long.parseLong(bucket.getKeyAsString()))); + final Terms.Bucket stringMapBucket = stringMapTerms.getBucketByKey(bucket.getKeyAsString()); + final Terms.Bucket stringGlobalOrdinalsBucket = stringGlobalOrdinalsTerms.getBucketByKey(bucket.getKeyAsString()); + final Terms.Bucket stringGlobalOrdinalsDVBucket = stringGlobalOrdinalsDVTerms.getBucketByKey(bucket.getKeyAsString()); + assertNotNull(doubleBucket); + assertNotNull(stringMapBucket); + assertNotNull(stringGlobalOrdinalsBucket); + assertNotNull(stringGlobalOrdinalsDVBucket); + assertEquals(bucket.getDocCount(), doubleBucket.getDocCount()); + assertEquals(bucket.getDocCount(), stringMapBucket.getDocCount()); + assertEquals(bucket.getDocCount(), stringGlobalOrdinalsBucket.getDocCount()); + assertEquals(bucket.getDocCount(), stringGlobalOrdinalsDVBucket.getDocCount()); + } + } + ); } // Duel between histograms and scripted terms @@ -355,25 +360,26 @@ public void testDuelTermsHistogram() throws Exception { Map params = new HashMap<>(); params.put("interval", interval); - SearchResponse resp = prepareSearch("idx").addAggregation( - terms("terms").field("values") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "floor(_value / interval)", params)) - .size(maxNumTerms) - ).addAggregation(histogram("histo").field("values").interval(interval).minDocCount(1)).get(); - - assertNoFailures(resp); - - Terms terms = resp.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - Histogram histo = resp.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(terms.getBuckets().size(), equalTo(histo.getBuckets().size())); - for (Histogram.Bucket bucket : histo.getBuckets()) { - final double key = ((Number) bucket.getKey()).doubleValue() / interval; - final Terms.Bucket termsBucket = terms.getBucketByKey(String.valueOf(key)); - assertEquals(bucket.getDocCount(), termsBucket.getDocCount()); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("terms").field("values") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "floor(_value / interval)", params)) + .size(maxNumTerms) + ).addAggregation(histogram("histo").field("values").interval(interval).minDocCount(1)), + response -> { + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(terms.getBuckets().size(), equalTo(histo.getBuckets().size())); + for (Histogram.Bucket bucket : histo.getBuckets()) { + final double key = ((Number) bucket.getKey()).doubleValue() / interval; + final Terms.Bucket termsBucket = terms.getBucketByKey(String.valueOf(key)); + assertEquals(bucket.getDocCount(), termsBucket.getDocCount()); + } + } + ); } public void testLargeNumbersOfPercentileBuckets() throws Exception { @@ -398,13 +404,17 @@ public void testLargeNumbersOfPercentileBuckets() throws Exception { } indexRandom(true, indexingRequests); - SearchResponse response = prepareSearch("idx").addAggregation( - terms("terms").field("double_value") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(percentiles("pcts").field("double_value")) - ).get(); - assertAllSuccessful(response); - assertEquals(numDocs, response.getHits().getTotalHits().value); + assertResponse( + prepareSearch("idx").addAggregation( + terms("terms").field("double_value") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(percentiles("pcts").field("double_value")) + ), + response -> { + assertAllSuccessful(response); + assertEquals(numDocs, response.getHits().getTotalHits().value); + } + ); } // https://github.com/elastic/elasticsearch/issues/6435 @@ -412,41 +422,42 @@ public void testReduce() throws Exception { createIndex("idx"); final int value = randomIntBetween(0, 10); indexRandom(true, client().prepareIndex("idx").setSource("f", value)); - SearchResponse response = prepareSearch("idx").addAggregation( - filter("filter", QueryBuilders.matchAllQuery()).subAggregation( - range("range").field("f").addUnboundedTo(6).addUnboundedFrom(6).subAggregation(sum("sum").field("f")) - ) - ).get(); - - assertNoFailures(response); - - Filter filter = response.getAggregations().get("filter"); - assertNotNull(filter); - assertEquals(1, filter.getDocCount()); - - Range range = filter.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(buckets.size(), equalTo(2)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("*-6.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); - assertThat(bucket.getDocCount(), equalTo(value < 6 ? 1L : 0L)); - Sum sum = bucket.getAggregations().get("sum"); - assertEquals(value < 6 ? value : 0, sum.value(), 0d); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("6.0-*")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getDocCount(), equalTo(value >= 6 ? 1L : 0L)); - sum = bucket.getAggregations().get("sum"); - assertEquals(value >= 6 ? value : 0, sum.value(), 0d); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + filter("filter", QueryBuilders.matchAllQuery()).subAggregation( + range("range").field("f").addUnboundedTo(6).addUnboundedFrom(6).subAggregation(sum("sum").field("f")) + ) + ), + response -> { + Filter filter = response.getAggregations().get("filter"); + assertNotNull(filter); + assertEquals(1, filter.getDocCount()); + + Range range = filter.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(buckets.size(), equalTo(2)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("*-6.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); + assertThat(bucket.getDocCount(), equalTo(value < 6 ? 1L : 0L)); + Sum sum = bucket.getAggregations().get("sum"); + assertEquals(value < 6 ? value : 0, sum.value(), 0d); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("6.0-*")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getDocCount(), equalTo(value >= 6 ? 1L : 0L)); + sum = bucket.getAggregations().get("sum"); + assertEquals(value >= 6 ? value : 0, sum.value(), 0d); + } + ); } private void assertEquals(Terms t1, Terms t2) { @@ -473,42 +484,48 @@ public void testDuelDepthBreadthFirst() throws Exception { } indexRandom(true, reqs); - final SearchResponse r1 = prepareSearch("idx").addAggregation( - terms("f1").field("f1") - .collectMode(SubAggCollectionMode.DEPTH_FIRST) - .subAggregation( - terms("f2").field("f2") - .collectMode(SubAggCollectionMode.DEPTH_FIRST) - .subAggregation(terms("f3").field("f3").collectMode(SubAggCollectionMode.DEPTH_FIRST)) - ) - ).get(); - assertNoFailures(r1); - final SearchResponse r2 = prepareSearch("idx").addAggregation( - terms("f1").field("f1") - .collectMode(SubAggCollectionMode.BREADTH_FIRST) - .subAggregation( - terms("f2").field("f2") - .collectMode(SubAggCollectionMode.BREADTH_FIRST) - .subAggregation(terms("f3").field("f3").collectMode(SubAggCollectionMode.BREADTH_FIRST)) - ) - ).get(); - assertNoFailures(r2); - - final Terms t1 = r1.getAggregations().get("f1"); - final Terms t2 = r2.getAggregations().get("f1"); - assertEquals(t1, t2); - for (Terms.Bucket b1 : t1.getBuckets()) { - final Terms.Bucket b2 = t2.getBucketByKey(b1.getKeyAsString()); - final Terms sub1 = b1.getAggregations().get("f2"); - final Terms sub2 = b2.getAggregations().get("f2"); - assertEquals(sub1, sub2); - for (Terms.Bucket subB1 : sub1.getBuckets()) { - final Terms.Bucket subB2 = sub2.getBucketByKey(subB1.getKeyAsString()); - final Terms subSub1 = subB1.getAggregations().get("f3"); - final Terms subSub2 = subB2.getAggregations().get("f3"); - assertEquals(subSub1, subSub2); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("f1").field("f1") + .collectMode(SubAggCollectionMode.DEPTH_FIRST) + .subAggregation( + terms("f2").field("f2") + .collectMode(SubAggCollectionMode.DEPTH_FIRST) + .subAggregation(terms("f3").field("f3").collectMode(SubAggCollectionMode.DEPTH_FIRST)) + ) + ), + response1 -> { + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("f1").field("f1") + .collectMode(SubAggCollectionMode.BREADTH_FIRST) + .subAggregation( + terms("f2").field("f2") + .collectMode(SubAggCollectionMode.BREADTH_FIRST) + .subAggregation(terms("f3").field("f3").collectMode(SubAggCollectionMode.BREADTH_FIRST)) + ) + ), + response2 -> { + + final Terms t1 = response1.getAggregations().get("f1"); + final Terms t2 = response2.getAggregations().get("f1"); + assertEquals(t1, t2); + for (Terms.Bucket b1 : t1.getBuckets()) { + final Terms.Bucket b2 = t2.getBucketByKey(b1.getKeyAsString()); + final Terms sub1 = b1.getAggregations().get("f2"); + final Terms sub2 = b2.getAggregations().get("f2"); + assertEquals(sub1, sub2); + for (Terms.Bucket subB1 : sub1.getBuckets()) { + final Terms.Bucket subB2 = sub2.getBucketByKey(subB1.getKeyAsString()); + final Terms subSub1 = subB1.getAggregations().get("f3"); + final Terms subSub2 = subB2.getAggregations().get("f3"); + assertEquals(subSub1, subSub2); + } + } + } + ); } - } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/FiltersAggsRewriteIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/FiltersAggsRewriteIT.java index fe51f4a1e2fb4..ce6a77408a5e1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/FiltersAggsRewriteIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/FiltersAggsRewriteIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.WrapperQueryBuilder; @@ -24,6 +23,8 @@ import java.util.HashMap; import java.util.Map; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; + public class FiltersAggsRewriteIT extends ESSingleNodeTestCase { public void testWrapperQueryIsRewritten() throws IOException { @@ -54,11 +55,12 @@ public void testWrapperQueryIsRewritten() throws IOException { Map metadata = new HashMap<>(); metadata.put(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); builder.setMetadata(metadata); - SearchResponse searchResponse = client().prepareSearch("test").setSize(0).addAggregation(builder).get(); - assertEquals(3, searchResponse.getHits().getTotalHits().value); - InternalFilters filters = searchResponse.getAggregations().get("titles"); - assertEquals(1, filters.getBuckets().size()); - assertEquals(2, filters.getBuckets().get(0).getDocCount()); - assertEquals(metadata, filters.getMetadata()); + assertResponse(client().prepareSearch("test").setSize(0).addAggregation(builder), response -> { + assertEquals(3, response.getHits().getTotalHits().value); + InternalFilters filters = response.getAggregations().get("titles"); + assertEquals(1, filters.getBuckets().size()); + assertEquals(2, filters.getBuckets().get(0).getDocCount()); + assertEquals(metadata, filters.getMetadata()); + }); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MetadataIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MetadataIT.java index b255a7b5f9bb6..3775ba6025ec2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MetadataIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MetadataIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.search.aggregations; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.search.aggregations.pipeline.InternalBucketMetricValue; @@ -22,7 +21,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.search.aggregations.PipelineAggregatorBuilders.maxBucket; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; public class MetadataIT extends ESIntegTestCase { @@ -39,32 +38,33 @@ public void testMetadataSetOnAggregationResult() throws Exception { final var nestedMetadata = Map.of("nested", "value"); var metadata = Map.of("key", "value", "numeric", 1.2, "bool", true, "complex", nestedMetadata); - SearchResponse response = prepareSearch("idx").addAggregation( - terms("the_terms").setMetadata(metadata).field("name").subAggregation(sum("the_sum").setMetadata(metadata).field("value")) - ).addAggregation(maxBucket("the_max_bucket", "the_terms>the_sum").setMetadata(metadata)).get(); - - assertNoFailures(response); - - Aggregations aggs = response.getAggregations(); - assertNotNull(aggs); - - Terms terms = aggs.get("the_terms"); - assertNotNull(terms); - assertMetadata(terms.getMetadata()); - - List buckets = terms.getBuckets(); - for (Terms.Bucket bucket : buckets) { - Aggregations subAggs = bucket.getAggregations(); - assertNotNull(subAggs); - - Sum sum = subAggs.get("the_sum"); - assertNotNull(sum); - assertMetadata(sum.getMetadata()); - } - - InternalBucketMetricValue maxBucket = aggs.get("the_max_bucket"); - assertNotNull(maxBucket); - assertMetadata(maxBucket.getMetadata()); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("the_terms").setMetadata(metadata).field("name").subAggregation(sum("the_sum").setMetadata(metadata).field("value")) + ).addAggregation(maxBucket("the_max_bucket", "the_terms>the_sum").setMetadata(metadata)), + response -> { + Aggregations aggs = response.getAggregations(); + assertNotNull(aggs); + + Terms terms = aggs.get("the_terms"); + assertNotNull(terms); + assertMetadata(terms.getMetadata()); + + List buckets = terms.getBuckets(); + for (Terms.Bucket bucket : buckets) { + Aggregations subAggs = bucket.getAggregations(); + assertNotNull(subAggs); + + Sum sum = subAggs.get("the_sum"); + assertNotNull(sum); + assertMetadata(sum.getMetadata()); + } + + InternalBucketMetricValue maxBucket = aggs.get("the_max_bucket"); + assertNotNull(maxBucket); + assertMetadata(maxBucket.getMetadata()); + } + ); } private void assertMetadata(Map returnedMetadata) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java index 0af496d83f9db..ad1d4086c690c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java @@ -8,14 +8,13 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.bucket.terms.LongTerms; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.UnmappedTerms; import org.elasticsearch.test.ESIntegTestCase; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; @@ -73,81 +72,84 @@ public void setupSuiteScopeCluster() throws Exception { } public void testSingleValueField() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(response); - - LongTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - final int bucketCount = numSingleFalses > 0 && numSingleTrues > 0 ? 2 : numSingleFalses + numSingleTrues > 0 ? 1 : 0; - assertThat(terms.getBuckets().size(), equalTo(bucketCount)); - - LongTerms.Bucket bucket = terms.getBucketByKey("false"); - if (numSingleFalses == 0) { - assertNull(bucket); - } else { - assertNotNull(bucket); - assertEquals(numSingleFalses, bucket.getDocCount()); - assertEquals("false", bucket.getKeyAsString()); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) + ), + response -> { + LongTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + final int bucketCount = numSingleFalses > 0 && numSingleTrues > 0 ? 2 : numSingleFalses + numSingleTrues > 0 ? 1 : 0; + assertThat(terms.getBuckets().size(), equalTo(bucketCount)); + + LongTerms.Bucket bucket = terms.getBucketByKey("false"); + if (numSingleFalses == 0) { + assertNull(bucket); + } else { + assertNotNull(bucket); + assertEquals(numSingleFalses, bucket.getDocCount()); + assertEquals("false", bucket.getKeyAsString()); + } - bucket = terms.getBucketByKey("true"); - if (numSingleTrues == 0) { - assertNull(bucket); - } else { - assertNotNull(bucket); - assertEquals(numSingleTrues, bucket.getDocCount()); - assertEquals("true", bucket.getKeyAsString()); - } + bucket = terms.getBucketByKey("true"); + if (numSingleTrues == 0) { + assertNull(bucket); + } else { + assertNotNull(bucket); + assertEquals(numSingleTrues, bucket.getDocCount()); + assertEquals("true", bucket.getKeyAsString()); + } + } + ); } public void testMultiValueField() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(MULTI_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(response); - - LongTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - final int bucketCount = numMultiFalses > 0 && numMultiTrues > 0 ? 2 : numMultiFalses + numMultiTrues > 0 ? 1 : 0; - assertThat(terms.getBuckets(), hasSize(bucketCount)); - - LongTerms.Bucket bucket = terms.getBucketByKey("false"); - if (numMultiFalses == 0) { - assertNull(bucket); - } else { - assertNotNull(bucket); - assertEquals(numMultiFalses, bucket.getDocCount()); - assertEquals("false", bucket.getKeyAsString()); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(MULTI_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) + ), + response -> { + LongTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + final int bucketCount = numMultiFalses > 0 && numMultiTrues > 0 ? 2 : numMultiFalses + numMultiTrues > 0 ? 1 : 0; + assertThat(terms.getBuckets(), hasSize(bucketCount)); + + LongTerms.Bucket bucket = terms.getBucketByKey("false"); + if (numMultiFalses == 0) { + assertNull(bucket); + } else { + assertNotNull(bucket); + assertEquals(numMultiFalses, bucket.getDocCount()); + assertEquals("false", bucket.getKeyAsString()); + } - bucket = terms.getBucketByKey("true"); - if (numMultiTrues == 0) { - assertNull(bucket); - } else { - assertNotNull(bucket); - assertEquals(numMultiTrues, bucket.getDocCount()); - assertEquals("true", bucket.getKeyAsString()); - } + bucket = terms.getBucketByKey("true"); + if (numMultiTrues == 0) { + assertNull(bucket); + } else { + assertNotNull(bucket); + assertEquals(numMultiTrues, bucket.getDocCount()); + assertEquals("true", bucket.getKeyAsString()); + } + } + ); } public void testUnmapped() throws Exception { - SearchResponse response = prepareSearch("idx_unmapped").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) - .size(between(1, 5)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(response); - - UnmappedTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(0)); + assertNoFailuresAndResponse( + prepareSearch("idx_unmapped").addAggregation( + new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) + .size(between(1, 5)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + response -> { + UnmappedTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(0)); + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java index 920fd79401cc6..e3242a561c2ad 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; @@ -61,6 +60,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; @@ -225,94 +225,100 @@ private static String getBucketKeyAsString(ZonedDateTime key, ZoneId tz) { } public void testSingleValuedField() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH) - ).get(); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation(dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH)), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(1L)); - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(2L)); - - key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(3L)); + key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(2L)); + + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(3L)); + } + ); } public void testSingleValuedFieldWithTimeZone() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.DAY).minDocCount(1).timeZone(ZoneId.of("+01:00")) - ).execute().actionGet(); - ZoneId tz = ZoneId.of("+01:00"); - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(6)); - - ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 23, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - key = ZonedDateTime.of(2012, 2, 1, 23, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - key = ZonedDateTime.of(2012, 2, 14, 23, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - key = ZonedDateTime.of(2012, 3, 1, 23, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(3); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - key = ZonedDateTime.of(2012, 3, 14, 23, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(4); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - key = ZonedDateTime.of(2012, 3, 22, 23, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(5); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(1L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").field("date") + .calendarInterval(DateHistogramInterval.DAY) + .minDocCount(1) + .timeZone(ZoneId.of("+01:00")) + ), + response -> { + ZoneId tz = ZoneId.of("+01:00"); + ; + + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(6)); + + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 23, 0, 0, 0, ZoneOffset.UTC); + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(1L)); + + key = ZonedDateTime.of(2012, 2, 1, 23, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(1L)); + + key = ZonedDateTime.of(2012, 2, 14, 23, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(1L)); + + key = ZonedDateTime.of(2012, 3, 1, 23, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(3); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(1L)); + + key = ZonedDateTime.of(2012, 3, 14, 23, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(4); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(1L)); + + key = ZonedDateTime.of(2012, 3, 22, 23, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(5); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + ); } public void testSingleValued_timeZone_epoch() throws Exception { @@ -322,257 +328,268 @@ public void testSingleValued_timeZone_epoch() throws Exception { format = format + "||date_optional_time"; } ZoneId tz = ZoneId.of("+01:00"); - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.DAY).minDocCount(1).timeZone(tz).format(format) - ).get(); - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(6)); - - List expectedKeys = new ArrayList<>(); - expectedKeys.add(ZonedDateTime.of(2012, 1, 1, 23, 0, 0, 0, ZoneOffset.UTC)); - expectedKeys.add(ZonedDateTime.of(2012, 2, 1, 23, 0, 0, 0, ZoneOffset.UTC)); - expectedKeys.add(ZonedDateTime.of(2012, 2, 14, 23, 0, 0, 0, ZoneOffset.UTC)); - expectedKeys.add(ZonedDateTime.of(2012, 3, 1, 23, 0, 0, 0, ZoneOffset.UTC)); - expectedKeys.add(ZonedDateTime.of(2012, 3, 14, 23, 0, 0, 0, ZoneOffset.UTC)); - expectedKeys.add(ZonedDateTime.of(2012, 3, 22, 23, 0, 0, 0, ZoneOffset.UTC)); - - Iterator keyIterator = expectedKeys.iterator(); - for (Histogram.Bucket bucket : buckets) { - assertThat(bucket, notNullValue()); - ZonedDateTime expectedKey = keyIterator.next(); - String bucketKey = bucket.getKeyAsString(); - String expectedBucketName = Long.toString(expectedKey.toInstant().toEpochMilli() / millisDivider); - assertThat(bucketKey, equalTo(expectedBucketName)); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(expectedKey)); - assertThat(bucket.getDocCount(), equalTo(1L)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.DAY).minDocCount(1).timeZone(tz).format(format) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(6)); + + List expectedKeys = new ArrayList<>(); + expectedKeys.add(ZonedDateTime.of(2012, 1, 1, 23, 0, 0, 0, ZoneOffset.UTC)); + expectedKeys.add(ZonedDateTime.of(2012, 2, 1, 23, 0, 0, 0, ZoneOffset.UTC)); + expectedKeys.add(ZonedDateTime.of(2012, 2, 14, 23, 0, 0, 0, ZoneOffset.UTC)); + expectedKeys.add(ZonedDateTime.of(2012, 3, 1, 23, 0, 0, 0, ZoneOffset.UTC)); + expectedKeys.add(ZonedDateTime.of(2012, 3, 14, 23, 0, 0, 0, ZoneOffset.UTC)); + expectedKeys.add(ZonedDateTime.of(2012, 3, 22, 23, 0, 0, 0, ZoneOffset.UTC)); + + Iterator keyIterator = expectedKeys.iterator(); + for (Histogram.Bucket bucket : buckets) { + assertThat(bucket, notNullValue()); + ZonedDateTime expectedKey = keyIterator.next(); + String bucketKey = bucket.getKeyAsString(); + String expectedBucketName = Long.toString(expectedKey.toInstant().toEpochMilli() / millisDivider); + assertThat(bucketKey, equalTo(expectedBucketName)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(expectedKey)); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + } + ); } public void testSingleValuedFieldOrderedByKeyAsc() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).order(BucketOrder.key(true)) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - int i = 0; - for (Histogram.Bucket bucket : buckets) { - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); - i++; - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).order(BucketOrder.key(true)) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + int i = 0; + for (Histogram.Bucket bucket : buckets) { + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); + i++; + } + } + ); } public void testSingleValuedFieldOrderedByKeyDesc() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).order(BucketOrder.key(false)) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(3)); - - int i = 2; - for (Histogram.Bucket bucket : histo.getBuckets()) { - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); - i--; - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).order(BucketOrder.key(false)) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(3)); + + int i = 2; + for (Histogram.Bucket bucket : histo.getBuckets()) { + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); + i--; + } + } + ); } public void testSingleValuedFieldOrderedByCountAsc() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).order(BucketOrder.count(true)) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(3)); - - int i = 0; - for (Histogram.Bucket bucket : histo.getBuckets()) { - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); - i++; - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).order(BucketOrder.count(true)) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(3)); + + int i = 0; + for (Histogram.Bucket bucket : histo.getBuckets()) { + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); + i++; + } + } + ); } public void testSingleValuedFieldOrderedByCountDesc() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).order(BucketOrder.count(false)) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(3)); - - int i = 2; - for (Histogram.Bucket bucket : histo.getBuckets()) { - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); - i--; - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).order(BucketOrder.count(false)) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(3)); + + int i = 2; + for (Histogram.Bucket bucket : histo.getBuckets()) { + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); + i--; + } + } + ); } public void testSingleValuedFieldWithSubAggregation() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).subAggregation(sum("sum").field("value")) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - assertThat(((InternalAggregation) histo).getProperty("_bucket_count"), equalTo(3)); - Object[] propertiesKeys = (Object[]) ((InternalAggregation) histo).getProperty("_key"); - Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) histo).getProperty("_count"); - Object[] propertiesCounts = (Object[]) ((InternalAggregation) histo).getProperty("sum.value"); - - ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(1L)); - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - assertThat(sum.value(), equalTo(1.0)); - assertThat((ZonedDateTime) propertiesKeys[0], equalTo(key)); - assertThat((long) propertiesDocCounts[0], equalTo(1L)); - assertThat((double) propertiesCounts[0], equalTo(1.0)); - - key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(2L)); - sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - assertThat(sum.value(), equalTo(5.0)); - assertThat((ZonedDateTime) propertiesKeys[1], equalTo(key)); - assertThat((long) propertiesDocCounts[1], equalTo(2L)); - assertThat((double) propertiesCounts[1], equalTo(5.0)); - - key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(3L)); - sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - assertThat(sum.value(), equalTo(15.0)); - assertThat((ZonedDateTime) propertiesKeys[2], equalTo(key)); - assertThat((long) propertiesDocCounts[2], equalTo(3L)); - assertThat((double) propertiesCounts[2], equalTo(15.0)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).subAggregation(sum("sum").field("value")) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + assertThat(((InternalAggregation) histo).getProperty("_bucket_count"), equalTo(3)); + Object[] propertiesKeys = (Object[]) ((InternalAggregation) histo).getProperty("_key"); + Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) histo).getProperty("_count"); + Object[] propertiesCounts = (Object[]) ((InternalAggregation) histo).getProperty("sum.value"); + + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(1L)); + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat(sum.value(), equalTo(1.0)); + assertThat((ZonedDateTime) propertiesKeys[0], equalTo(key)); + assertThat((long) propertiesDocCounts[0], equalTo(1L)); + assertThat((double) propertiesCounts[0], equalTo(1.0)); + + key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(2L)); + sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat(sum.value(), equalTo(5.0)); + assertThat((ZonedDateTime) propertiesKeys[1], equalTo(key)); + assertThat((long) propertiesDocCounts[1], equalTo(2L)); + assertThat((double) propertiesCounts[1], equalTo(5.0)); + + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(3L)); + sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat(sum.value(), equalTo(15.0)); + assertThat((ZonedDateTime) propertiesKeys[2], equalTo(key)); + assertThat((long) propertiesDocCounts[2], equalTo(3L)); + assertThat((double) propertiesCounts[2], equalTo(15.0)); + } + ); } public void testSingleValuedFieldOrderedBySubAggregationAsc() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("date") - .calendarInterval(DateHistogramInterval.MONTH) - .order(BucketOrder.aggregation("sum", true)) - .subAggregation(max("sum").field("value")) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(3)); - - int i = 0; - for (Histogram.Bucket bucket : histo.getBuckets()) { - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); - i++; - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").field("date") + .calendarInterval(DateHistogramInterval.MONTH) + .order(BucketOrder.aggregation("sum", true)) + .subAggregation(max("sum").field("value")) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(3)); + + int i = 0; + for (Histogram.Bucket bucket : histo.getBuckets()) { + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); + i++; + } + } + ); } public void testSingleValuedFieldOrderedBySubAggregationDesc() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("date") - .calendarInterval(DateHistogramInterval.MONTH) - .order(BucketOrder.aggregation("sum", false)) - .subAggregation(max("sum").field("value")) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(3)); - - int i = 2; - for (Histogram.Bucket bucket : histo.getBuckets()) { - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); - i--; - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").field("date") + .calendarInterval(DateHistogramInterval.MONTH) + .order(BucketOrder.aggregation("sum", false)) + .subAggregation(max("sum").field("value")) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(3)); + + int i = 2; + for (Histogram.Bucket bucket : histo.getBuckets()) { + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); + i--; + } + } + ); } public void testSingleValuedFieldOrderedByMultiValuedSubAggregationDesc() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("date") - .calendarInterval(DateHistogramInterval.MONTH) - .order(BucketOrder.aggregation("stats", "sum", false)) - .subAggregation(stats("stats").field("value")) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(3)); - - int i = 2; - for (Histogram.Bucket bucket : histo.getBuckets()) { - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); - i--; - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").field("date") + .calendarInterval(DateHistogramInterval.MONTH) + .order(BucketOrder.aggregation("stats", "sum", false)) + .subAggregation(stats("stats").field("value")) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(3)); + + int i = 2; + for (Histogram.Bucket bucket : histo.getBuckets()) { + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); + i--; + } + } + ); } public void testSingleValuedFieldOrderedByTieBreaker() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("date") - .calendarInterval(DateHistogramInterval.MONTH) - .order(BucketOrder.aggregation("max_constant", randomBoolean())) - .subAggregation(max("max_constant").field("constant")) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(3)); - - int i = 1; - for (Histogram.Bucket bucket : histo.getBuckets()) { - assertThat(bucket.getKey(), equalTo(date(i, 1))); - i++; - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").field("date") + .calendarInterval(DateHistogramInterval.MONTH) + .order(BucketOrder.aggregation("max_constant", randomBoolean())) + .subAggregation(max("max_constant").field("constant")) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(3)); + + int i = 1; + for (Histogram.Bucket bucket : histo.getBuckets()) { + assertThat(bucket.getKey(), equalTo(date(i, 1))); + i++; + } + } + ); } public void testSingleValuedFieldOrderedByIllegalAgg() throws Exception { @@ -607,41 +624,42 @@ public void testSingleValuedFieldOrderedByIllegalAgg() throws Exception { public void testSingleValuedFieldWithValueScript() throws Exception { Map params = new HashMap<>(); params.put("fieldname", "date"); - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("date") - .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.LONG_PLUS_ONE_MONTH, params)) - .calendarInterval(DateHistogramInterval.MONTH) - ).get(); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").field("date") + .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.LONG_PLUS_ONE_MONTH, params)) + .calendarInterval(DateHistogramInterval.MONTH) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + ZonedDateTime key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(1L)); - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - ZonedDateTime key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(2L)); - - key = ZonedDateTime.of(2012, 4, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(3L)); + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(2L)); + + key = ZonedDateTime.of(2012, 4, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(3L)); + } + ); } /* @@ -654,80 +672,80 @@ public void testSingleValuedFieldWithValueScript() throws Exception { */ public void testMultiValuedField() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("dates").calendarInterval(DateHistogramInterval.MONTH) - ).get(); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation(dateHistogram("histo").field("dates").calendarInterval(DateHistogramInterval.MONTH)), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(4)); + + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(1L)); - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(4)); - - ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(3L)); - - key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(5L)); - - key = ZonedDateTime.of(2012, 4, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(3); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(3L)); + key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(3L)); + + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(5L)); + + key = ZonedDateTime.of(2012, 4, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(3); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(3L)); + } + ); } public void testMultiValuedFieldOrderedByCountDesc() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("dates").calendarInterval(DateHistogramInterval.MONTH).order(BucketOrder.count(false)) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(4)); - - List buckets = new ArrayList<>(histo.getBuckets()); - - Histogram.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo(date(3, 1))); - assertThat(bucket.getDocCount(), equalTo(5L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo(date(2, 1))); - assertThat(bucket.getDocCount(), equalTo(3L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo(date(4, 1))); - assertThat(bucket.getDocCount(), equalTo(3L)); - - bucket = buckets.get(3); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo(date(1, 1))); - assertThat(bucket.getDocCount(), equalTo(1L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").field("dates").calendarInterval(DateHistogramInterval.MONTH).order(BucketOrder.count(false)) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(4)); + + List buckets = new ArrayList<>(histo.getBuckets()); + + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo(date(3, 1))); + assertThat(bucket.getDocCount(), equalTo(5L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo(date(2, 1))); + assertThat(bucket.getDocCount(), equalTo(3L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo(date(4, 1))); + assertThat(bucket.getDocCount(), equalTo(3L)); + + bucket = buckets.get(3); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo(date(1, 1))); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + ); } /** @@ -743,47 +761,48 @@ public void testMultiValuedFieldOrderedByCountDesc() throws Exception { public void testMultiValuedFieldWithValueScript() throws Exception { Map params = new HashMap<>(); params.put("fieldname", "dates"); - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("dates") - .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.LONG_PLUS_ONE_MONTH, params)) - .calendarInterval(DateHistogramInterval.MONTH) - ).get(); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").field("dates") + .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.LONG_PLUS_ONE_MONTH, params)) + .calendarInterval(DateHistogramInterval.MONTH) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(4)); + + ZonedDateTime key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(1L)); - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(4)); - - ZonedDateTime key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(3L)); - - key = ZonedDateTime.of(2012, 4, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(5L)); - - key = ZonedDateTime.of(2012, 5, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(3); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(3L)); + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(3L)); + + key = ZonedDateTime.of(2012, 4, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(5L)); + + key = ZonedDateTime.of(2012, 5, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(3); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(3L)); + } + ); } /** @@ -797,84 +816,86 @@ public void testMultiValuedFieldWithValueScript() throws Exception { public void testScriptSingleValue() throws Exception { Map params = new HashMap<>(); params.put("fieldname", "date"); - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.EXTRACT_FIELD, params)) - .calendarInterval(DateHistogramInterval.MONTH) - ).get(); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.EXTRACT_FIELD, params)) + .calendarInterval(DateHistogramInterval.MONTH) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(1L)); - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(2L)); - - key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(3L)); + key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(2L)); + + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(3L)); + } + ); } public void testScriptMultiValued() throws Exception { Map params = new HashMap<>(); params.put("fieldname", "dates"); - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.EXTRACT_FIELD, params)) - .calendarInterval(DateHistogramInterval.MONTH) - ).get(); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.EXTRACT_FIELD, params)) + .calendarInterval(DateHistogramInterval.MONTH) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(4)); + + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(1L)); - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(4)); - - ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(3L)); - - key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(5L)); - - key = ZonedDateTime.of(2012, 4, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(3); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(3L)); + key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(3L)); + + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(5L)); + + key = ZonedDateTime.of(2012, 4, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(3); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(3L)); + } + ); } /* @@ -887,78 +908,81 @@ public void testScriptMultiValued() throws Exception { */ public void testUnmapped() throws Exception { - SearchResponse response = prepareSearch("idx_unmapped").addAggregation( - dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(0)); + assertNoFailuresAndResponse( + prepareSearch("idx_unmapped").addAggregation( + dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(0)); + } + ); } public void testPartiallyUnmapped() throws Exception { - SearchResponse response = prepareSearch("idx", "idx_unmapped").addAggregation( - dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH) - ).get(); + assertNoFailuresAndResponse( + prepareSearch("idx", "idx_unmapped").addAggregation( + dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(1L)); - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(2L)); - - key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(3L)); + key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(2L)); + + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(3L)); + } + ); } public void testEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field("value") - .interval(1L) - .minDocCount(0) - .subAggregation(dateHistogram("date_histo").field("value").fixedInterval(DateHistogramInterval.HOUR)) - ) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - Histogram histo = searchResponse.getAggregations().get("histo"); - assertThat(histo, Matchers.notNullValue()); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - Histogram.Bucket bucket = buckets.get(1); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("1.0")); - - Histogram dateHisto = bucket.getAggregations().get("date_histo"); - assertThat(dateHisto, Matchers.notNullValue()); - assertThat(dateHisto.getName(), equalTo("date_histo")); - assertThat(dateHisto.getBuckets().isEmpty(), is(true)); - + assertNoFailuresAndResponse( + prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(1L) + .minDocCount(0) + .subAggregation(dateHistogram("date_histo").field("value").fixedInterval(DateHistogramInterval.HOUR)) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, Matchers.notNullValue()); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + Histogram.Bucket bucket = buckets.get(1); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("1.0")); + + Histogram dateHisto = bucket.getAggregations().get("date_histo"); + assertThat(dateHisto, Matchers.notNullValue()); + assertThat(dateHisto.getName(), equalTo("date_histo")); + assertThat(dateHisto.getBuckets().isEmpty(), is(true)); + } + ); } public void testSingleValueWithTimeZone() throws Exception { @@ -973,30 +997,32 @@ public void testSingleValueWithTimeZone() throws Exception { } indexRandom(true, reqs); - SearchResponse response = prepareSearch("idx2").setQuery(matchAllQuery()) - .addAggregation( - dateHistogram("date_histo").field("date") - .timeZone(ZoneId.of("-02:00")) - .calendarInterval(DateHistogramInterval.DAY) - .format("yyyy-MM-dd:HH-mm-ssZZZZZ") - ) - .get(); - - assertThat(response.getHits().getTotalHits().value, equalTo(5L)); - - Histogram histo = response.getAggregations().get("date_histo"); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(2)); - - Histogram.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("2014-03-10:00-00-00-02:00")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("2014-03-11:00-00-00-02:00")); - assertThat(bucket.getDocCount(), equalTo(3L)); + assertNoFailuresAndResponse( + prepareSearch("idx2").setQuery(matchAllQuery()) + .addAggregation( + dateHistogram("date_histo").field("date") + .timeZone(ZoneId.of("-02:00")) + .calendarInterval(DateHistogramInterval.DAY) + .format("yyyy-MM-dd:HH-mm-ssZZZZZ") + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(5L)); + + Histogram histo = response.getAggregations().get("date_histo"); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(2)); + + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("2014-03-10:00-00-00-02:00")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("2014-03-11:00-00-00-02:00")); + assertThat(bucket.getDocCount(), equalTo(3L)); + } + ); } public void testSingleValueFieldWithExtendedBounds() throws Exception { @@ -1066,47 +1092,42 @@ public void testSingleValueFieldWithExtendedBounds() throws Exception { long[] extendedValueCounts = new long[bucketsCount]; System.arraycopy(docCounts, 0, extendedValueCounts, addedBucketsLeft, docCounts.length); - SearchResponse response = null; try { - response = prepareSearch("idx2").addAggregation( - dateHistogram("histo").field("date") - .fixedInterval(DateHistogramInterval.days(interval)) - .minDocCount(0) - // when explicitly specifying a format, the extended bounds should be defined by the same format - .extendedBounds(new LongBounds(format(boundsMin, pattern), format(boundsMax, pattern))) - .format(pattern) - ).get(); - - if (invalidBoundsError) { - fail("Expected an exception to be thrown when bounds.min is greater than bounds.max"); - return; - } - + assertNoFailuresAndResponse( + prepareSearch("idx2").addAggregation( + dateHistogram("histo").field("date") + .fixedInterval(DateHistogramInterval.days(interval)) + .minDocCount(0) + // when explicitly specifying a format, the extended bounds should be defined by the same format + .extendedBounds(new LongBounds(format(boundsMin, pattern), format(boundsMax, pattern))) + .format(pattern) + ), + response -> { + if (invalidBoundsError) { + fail("Expected an exception to be thrown when bounds.min is greater than bounds.max"); + } + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(bucketsCount)); + + ZonedDateTime key = baseKey.isBefore(boundsMinKey) ? baseKey : boundsMinKey; + for (int i = 0; i < bucketsCount; i++) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getKeyAsString(), equalTo(format(key, pattern))); + assertThat(bucket.getDocCount(), equalTo(extendedValueCounts[i])); + key = key.plusDays(interval); + } + } + ); } catch (Exception e) { - if (invalidBoundsError) { - // expected - return; - } else { + if (invalidBoundsError == false) { throw e; } } - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(bucketsCount)); - - ZonedDateTime key = baseKey.isBefore(boundsMinKey) ? baseKey : boundsMinKey; - for (int i = 0; i < bucketsCount; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getKeyAsString(), equalTo(format(key, pattern))); - assertThat(bucket.getDocCount(), equalTo(extendedValueCounts[i])); - key = key.plusDays(interval); - } } /** @@ -1133,45 +1154,47 @@ public void testSingleValueFieldWithExtendedBoundsTimezone() throws Exception { indexRandom(true, builders); ensureSearchable(index); - SearchResponse response = null; // retrieve those docs with the same time zone and extended bounds - response = prepareSearch(index).setQuery( - QueryBuilders.rangeQuery("date").from("now/d").to("now/d").includeLower(true).includeUpper(true).timeZone(timezone.getId()) - ) - .addAggregation( - dateHistogram("histo").field("date") - .calendarInterval(DateHistogramInterval.hours(1)) - .timeZone(timezone) - .minDocCount(0) - .extendedBounds(new LongBounds("now/d", "now/d+23h")) + assertNoFailuresAndResponse( + prepareSearch(index).setQuery( + QueryBuilders.rangeQuery("date").from("now/d").to("now/d").includeLower(true).includeUpper(true).timeZone(timezone.getId()) ) - .get(); - assertNoFailures(response); - - assertThat( - "Expected 24 buckets for one day aggregation with hourly interval", - response.getHits().getTotalHits().value, - equalTo(2L) - ); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(24)); - - for (int i = 0; i < buckets.size(); i++) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - ZonedDateTime zonedDateTime = timeZoneStartToday.plus(i * 60 * 60 * 1000, ChronoUnit.MILLIS); - assertThat("InternalBucket " + i + " had wrong key", (ZonedDateTime) bucket.getKey(), equalTo(zonedDateTime)); - if (i == 0 || i == 12) { - assertThat(bucket.getDocCount(), equalTo(1L)); - } else { - assertThat(bucket.getDocCount(), equalTo(0L)); + .addAggregation( + dateHistogram("histo").field("date") + .calendarInterval(DateHistogramInterval.hours(1)) + .timeZone(timezone) + .minDocCount(0) + .extendedBounds(new LongBounds("now/d", "now/d+23h")) + ), + response -> { + + assertThat( + "Expected 24 buckets for one day aggregation with hourly interval", + response.getHits().getTotalHits().value, + equalTo(2L) + ); + + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(24)); + + for (int i = 0; i < buckets.size(); i++) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + ZonedDateTime zonedDateTime = timeZoneStartToday.plus(i * 60 * 60 * 1000, ChronoUnit.MILLIS); + assertThat("InternalBucket " + i + " had wrong key", (ZonedDateTime) bucket.getKey(), equalTo(zonedDateTime)); + if (i == 0 || i == 12) { + assertThat(bucket.getDocCount(), equalTo(1L)); + } else { + assertThat(bucket.getDocCount(), equalTo(0L)); + } + } } - } + ); internalCluster().wipeIndices(index); + } /** @@ -1193,40 +1216,40 @@ public void testSingleValueFieldWithExtendedBoundsOffset() throws Exception { indexRandom(true, builders); ensureSearchable(index); - SearchResponse response = null; // retrieve those docs with the same time zone and extended bounds - response = prepareSearch(index).addAggregation( - dateHistogram("histo").field("date") - .calendarInterval(DateHistogramInterval.days(1)) - .offset("+6h") - .minDocCount(0) - .extendedBounds(new LongBounds("2016-01-01T06:00:00Z", "2016-01-08T08:00:00Z")) - ).get(); - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(8)); - - assertEquals("2016-01-01T06:00:00.000Z", buckets.get(0).getKeyAsString()); - assertEquals(0, buckets.get(0).getDocCount()); - assertEquals("2016-01-02T06:00:00.000Z", buckets.get(1).getKeyAsString()); - assertEquals(0, buckets.get(1).getDocCount()); - assertEquals("2016-01-03T06:00:00.000Z", buckets.get(2).getKeyAsString()); - assertEquals(2, buckets.get(2).getDocCount()); - assertEquals("2016-01-04T06:00:00.000Z", buckets.get(3).getKeyAsString()); - assertEquals(0, buckets.get(3).getDocCount()); - assertEquals("2016-01-05T06:00:00.000Z", buckets.get(4).getKeyAsString()); - assertEquals(0, buckets.get(4).getDocCount()); - assertEquals("2016-01-06T06:00:00.000Z", buckets.get(5).getKeyAsString()); - assertEquals(2, buckets.get(5).getDocCount()); - assertEquals("2016-01-07T06:00:00.000Z", buckets.get(6).getKeyAsString()); - assertEquals(0, buckets.get(6).getDocCount()); - assertEquals("2016-01-08T06:00:00.000Z", buckets.get(7).getKeyAsString()); - assertEquals(0, buckets.get(7).getDocCount()); - + assertNoFailuresAndResponse( + prepareSearch(index).addAggregation( + dateHistogram("histo").field("date") + .calendarInterval(DateHistogramInterval.days(1)) + .offset("+6h") + .minDocCount(0) + .extendedBounds(new LongBounds("2016-01-01T06:00:00Z", "2016-01-08T08:00:00Z")) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(8)); + + assertEquals("2016-01-01T06:00:00.000Z", buckets.get(0).getKeyAsString()); + assertEquals(0, buckets.get(0).getDocCount()); + assertEquals("2016-01-02T06:00:00.000Z", buckets.get(1).getKeyAsString()); + assertEquals(0, buckets.get(1).getDocCount()); + assertEquals("2016-01-03T06:00:00.000Z", buckets.get(2).getKeyAsString()); + assertEquals(2, buckets.get(2).getDocCount()); + assertEquals("2016-01-04T06:00:00.000Z", buckets.get(3).getKeyAsString()); + assertEquals(0, buckets.get(3).getDocCount()); + assertEquals("2016-01-05T06:00:00.000Z", buckets.get(4).getKeyAsString()); + assertEquals(0, buckets.get(4).getDocCount()); + assertEquals("2016-01-06T06:00:00.000Z", buckets.get(5).getKeyAsString()); + assertEquals(2, buckets.get(5).getDocCount()); + assertEquals("2016-01-07T06:00:00.000Z", buckets.get(6).getKeyAsString()); + assertEquals(0, buckets.get(6).getDocCount()); + assertEquals("2016-01-08T06:00:00.000Z", buckets.get(7).getKeyAsString()); + assertEquals(0, buckets.get(7).getDocCount()); + } + ); internalCluster().wipeIndices(index); } @@ -1250,59 +1273,67 @@ public void testSingleValueWithMultipleDateFormatsFromMapping() throws Exception } indexRandom(true, reqs); - SearchResponse response = prepareSearch("idx2").setQuery(matchAllQuery()) - .addAggregation(dateHistogram("date_histo").field("date").calendarInterval(DateHistogramInterval.DAY)) - .get(); + assertNoFailuresAndResponse( + prepareSearch("idx2").setQuery(matchAllQuery()) + .addAggregation(dateHistogram("date_histo").field("date").calendarInterval(DateHistogramInterval.DAY)), + response -> { - assertSearchHits(response, "0", "1", "2", "3", "4"); + assertSearchHits(response, "0", "1", "2", "3", "4"); - Histogram histo = response.getAggregations().get("date_histo"); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(1)); + Histogram histo = response.getAggregations().get("date_histo"); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(1)); - ZonedDateTime key = ZonedDateTime.of(2014, 3, 10, 0, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(5L)); + ZonedDateTime key = ZonedDateTime.of(2014, 3, 10, 0, 0, 0, 0, ZoneOffset.UTC); + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(5L)); + } + ); } public void testIssue6965() { - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("date").timeZone(ZoneId.of("+01:00")).calendarInterval(DateHistogramInterval.MONTH).minDocCount(0) - ).get(); - - assertNoFailures(response); - - ZoneId tz = ZoneId.of("+01:00"); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").field("date") + .timeZone(ZoneId.of("+01:00")) + .calendarInterval(DateHistogramInterval.MONTH) + .minDocCount(0) + ), + response -> { + + ZoneId tz = ZoneId.of("+01:00"); + + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + ZonedDateTime key = ZonedDateTime.of(2011, 12, 31, 23, 0, 0, 0, ZoneOffset.UTC); + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(1L)); - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - ZonedDateTime key = ZonedDateTime.of(2011, 12, 31, 23, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - key = ZonedDateTime.of(2012, 1, 31, 23, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(2L)); - - key = ZonedDateTime.of(2012, 2, 29, 23, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(3L)); + key = ZonedDateTime.of(2012, 1, 31, 23, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(2L)); + + key = ZonedDateTime.of(2012, 2, 29, 23, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(3L)); + } + ); } public void testDSTBoundaryIssue9491() throws InterruptedException, ExecutionException { @@ -1313,17 +1344,20 @@ public void testDSTBoundaryIssue9491() throws InterruptedException, ExecutionExc client().prepareIndex("test9491").setSource("d", "2014-11-08T13:00:00Z") ); ensureSearchable("test9491"); - SearchResponse response = prepareSearch("test9491").addAggregation( - dateHistogram("histo").field("d") - .calendarInterval(DateHistogramInterval.YEAR) - .timeZone(ZoneId.of("Asia/Jerusalem")) - .format("yyyy-MM-dd'T'HH:mm:ss.SSSXXXXX") - ).get(); - assertNoFailures(response); - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(1)); - assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("2014-01-01T00:00:00.000+02:00")); - internalCluster().wipeIndices("test9491"); + assertNoFailuresAndResponse( + prepareSearch("test9491").addAggregation( + dateHistogram("histo").field("d") + .calendarInterval(DateHistogramInterval.YEAR) + .timeZone(ZoneId.of("Asia/Jerusalem")) + .format("yyyy-MM-dd'T'HH:mm:ss.SSSXXXXX") + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(1)); + assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("2014-01-01T00:00:00.000+02:00")); + internalCluster().wipeIndices("test9491"); + } + ); } public void testIssue8209() throws InterruptedException, ExecutionException { @@ -1335,25 +1369,28 @@ public void testIssue8209() throws InterruptedException, ExecutionException { client().prepareIndex("test8209").setSource("d", "2014-04-30T00:00:00Z") ); ensureSearchable("test8209"); - SearchResponse response = prepareSearch("test8209").addAggregation( - dateHistogram("histo").field("d") - .calendarInterval(DateHistogramInterval.MONTH) - .format("yyyy-MM-dd'T'HH:mm:ss.SSSXXXXX") - .timeZone(ZoneId.of("CET")) - .minDocCount(0) - ).get(); - assertNoFailures(response); - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(4)); - assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("2014-01-01T00:00:00.000+01:00")); - assertThat(histo.getBuckets().get(0).getDocCount(), equalTo(1L)); - assertThat(histo.getBuckets().get(1).getKeyAsString(), equalTo("2014-02-01T00:00:00.000+01:00")); - assertThat(histo.getBuckets().get(1).getDocCount(), equalTo(0L)); - assertThat(histo.getBuckets().get(2).getKeyAsString(), equalTo("2014-03-01T00:00:00.000+01:00")); - assertThat(histo.getBuckets().get(2).getDocCount(), equalTo(0L)); - assertThat(histo.getBuckets().get(3).getKeyAsString(), equalTo("2014-04-01T00:00:00.000+02:00")); - assertThat(histo.getBuckets().get(3).getDocCount(), equalTo(2L)); - internalCluster().wipeIndices("test8209"); + assertNoFailuresAndResponse( + prepareSearch("test8209").addAggregation( + dateHistogram("histo").field("d") + .calendarInterval(DateHistogramInterval.MONTH) + .format("yyyy-MM-dd'T'HH:mm:ss.SSSXXXXX") + .timeZone(ZoneId.of("CET")) + .minDocCount(0) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("2014-01-01T00:00:00.000+01:00")); + assertThat(histo.getBuckets().get(0).getDocCount(), equalTo(1L)); + assertThat(histo.getBuckets().get(1).getKeyAsString(), equalTo("2014-02-01T00:00:00.000+01:00")); + assertThat(histo.getBuckets().get(1).getDocCount(), equalTo(0L)); + assertThat(histo.getBuckets().get(2).getKeyAsString(), equalTo("2014-03-01T00:00:00.000+01:00")); + assertThat(histo.getBuckets().get(2).getDocCount(), equalTo(0L)); + assertThat(histo.getBuckets().get(3).getKeyAsString(), equalTo("2014-04-01T00:00:00.000+02:00")); + assertThat(histo.getBuckets().get(3).getDocCount(), equalTo(2L)); + internalCluster().wipeIndices("test8209"); + } + ); } // TODO: add some tests for negative fixed and calendar intervals @@ -1370,19 +1407,22 @@ public void testFormatIndexUnmapped() throws InterruptedException, ExecutionExce indexRandom(true, client().prepareIndex(indexDateUnmapped).setSource("foo", "bar")); ensureSearchable(indexDateUnmapped); - SearchResponse response = prepareSearch(indexDateUnmapped).addAggregation( - dateHistogram("histo").field("dateField") - .calendarInterval(DateHistogramInterval.MONTH) - .format("yyyy-MM") - .minDocCount(0) - .extendedBounds(new LongBounds("2018-01", "2018-01")) - ).get(); - assertNoFailures(response); - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(1)); - assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("2018-01")); - assertThat(histo.getBuckets().get(0).getDocCount(), equalTo(0L)); - internalCluster().wipeIndices(indexDateUnmapped); + assertNoFailuresAndResponse( + prepareSearch(indexDateUnmapped).addAggregation( + dateHistogram("histo").field("dateField") + .calendarInterval(DateHistogramInterval.MONTH) + .format("yyyy-MM") + .minDocCount(0) + .extendedBounds(new LongBounds("2018-01", "2018-01")) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(1)); + assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("2018-01")); + assertThat(histo.getBuckets().get(0).getDocCount(), equalTo(0L)); + internalCluster().wipeIndices(indexDateUnmapped); + } + ); } /** @@ -1395,27 +1435,31 @@ public void testRewriteTimeZone_EpochMillisFormat() throws InterruptedException, assertAcked(indicesAdmin().prepareCreate(index).setMapping("d", "type=date,format=epoch_millis").get()); indexRandom(true, client().prepareIndex(index).setSource("d", "1477954800000")); ensureSearchable(index); - SearchResponse response = prepareSearch(index).addAggregation( - dateHistogram("histo").field("d").calendarInterval(DateHistogramInterval.MONTH).timeZone(ZoneId.of("Europe/Berlin")) - ).get(); - assertNoFailures(response); - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(1)); - assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("1477954800000")); - assertThat(histo.getBuckets().get(0).getDocCount(), equalTo(1L)); - - response = prepareSearch(index).addAggregation( - dateHistogram("histo").field("d") - .calendarInterval(DateHistogramInterval.MONTH) - .timeZone(ZoneId.of("Europe/Berlin")) - .format("yyyy-MM-dd") - ).get(); - assertNoFailures(response); - histo = response.getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(1)); - assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("2016-11-01")); - assertThat(histo.getBuckets().get(0).getDocCount(), equalTo(1L)); - + assertNoFailuresAndResponse( + prepareSearch(index).addAggregation( + dateHistogram("histo").field("d").calendarInterval(DateHistogramInterval.MONTH).timeZone(ZoneId.of("Europe/Berlin")) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(1)); + assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("1477954800000")); + assertThat(histo.getBuckets().get(0).getDocCount(), equalTo(1L)); + } + ); + assertNoFailuresAndResponse( + prepareSearch(index).addAggregation( + dateHistogram("histo").field("d") + .calendarInterval(DateHistogramInterval.MONTH) + .timeZone(ZoneId.of("Europe/Berlin")) + .format("yyyy-MM-dd") + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(1)); + assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("2016-11-01")); + assertThat(histo.getBuckets().get(0).getDocCount(), equalTo(1L)); + } + ); internalCluster().wipeIndices(index); } @@ -1427,62 +1471,73 @@ public void testRewriteTimeZone_EpochMillisFormat() throws InterruptedException, * "2015-10-25T04:00:00.000+01:00". */ public void testDSTEndTransition() throws Exception { - SearchResponse response = prepareSearch("idx").setQuery(new MatchNoneQueryBuilder()) - .addAggregation( - dateHistogram("histo").field("date") - .timeZone(ZoneId.of("Europe/Oslo")) - .calendarInterval(DateHistogramInterval.HOUR) - .minDocCount(0) - .extendedBounds(new LongBounds("2015-10-25T02:00:00.000+02:00", "2015-10-25T04:00:00.000+01:00")) - ) - .get(); - - Histogram histo = response.getAggregations().get("histo"); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(4)); - assertThat( - ((ZonedDateTime) buckets.get(1).getKey()).toInstant().toEpochMilli() - ((ZonedDateTime) buckets.get(0).getKey()).toInstant() - .toEpochMilli(), - equalTo(3600000L) - ); - assertThat( - ((ZonedDateTime) buckets.get(2).getKey()).toInstant().toEpochMilli() - ((ZonedDateTime) buckets.get(1).getKey()).toInstant() - .toEpochMilli(), - equalTo(3600000L) - ); - assertThat( - ((ZonedDateTime) buckets.get(3).getKey()).toInstant().toEpochMilli() - ((ZonedDateTime) buckets.get(2).getKey()).toInstant() - .toEpochMilli(), - equalTo(3600000L) + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(new MatchNoneQueryBuilder()) + .addAggregation( + dateHistogram("histo").field("date") + .timeZone(ZoneId.of("Europe/Oslo")) + .calendarInterval(DateHistogramInterval.HOUR) + .minDocCount(0) + .extendedBounds(new LongBounds("2015-10-25T02:00:00.000+02:00", "2015-10-25T04:00:00.000+01:00")) + ), + response -> { + + Histogram histo = response.getAggregations().get("histo"); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(4)); + assertThat( + ((ZonedDateTime) buckets.get(1).getKey()).toInstant().toEpochMilli() - ((ZonedDateTime) buckets.get(0).getKey()) + .toInstant() + .toEpochMilli(), + equalTo(3600000L) + ); + assertThat( + ((ZonedDateTime) buckets.get(2).getKey()).toInstant().toEpochMilli() - ((ZonedDateTime) buckets.get(1).getKey()) + .toInstant() + .toEpochMilli(), + equalTo(3600000L) + ); + assertThat( + ((ZonedDateTime) buckets.get(3).getKey()).toInstant().toEpochMilli() - ((ZonedDateTime) buckets.get(2).getKey()) + .toInstant() + .toEpochMilli(), + equalTo(3600000L) + ); + } ); - response = prepareSearch("idx").setQuery(new MatchNoneQueryBuilder()) - .addAggregation( - dateHistogram("histo").field("date") - .timeZone(ZoneId.of("Europe/Oslo")) - .calendarInterval(DateHistogramInterval.HOUR) - .minDocCount(0) - .extendedBounds(new LongBounds("2015-10-25T02:00:00.000+02:00", "2015-10-25T04:00:00.000+01:00")) - ) - .get(); - - histo = response.getAggregations().get("histo"); - buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(4)); - assertThat( - ((ZonedDateTime) buckets.get(1).getKey()).toInstant().toEpochMilli() - ((ZonedDateTime) buckets.get(0).getKey()).toInstant() - .toEpochMilli(), - equalTo(3600000L) - ); - assertThat( - ((ZonedDateTime) buckets.get(2).getKey()).toInstant().toEpochMilli() - ((ZonedDateTime) buckets.get(1).getKey()).toInstant() - .toEpochMilli(), - equalTo(3600000L) - ); - assertThat( - ((ZonedDateTime) buckets.get(3).getKey()).toInstant().toEpochMilli() - ((ZonedDateTime) buckets.get(2).getKey()).toInstant() - .toEpochMilli(), - equalTo(3600000L) + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(new MatchNoneQueryBuilder()) + .addAggregation( + dateHistogram("histo").field("date") + .timeZone(ZoneId.of("Europe/Oslo")) + .calendarInterval(DateHistogramInterval.HOUR) + .minDocCount(0) + .extendedBounds(new LongBounds("2015-10-25T02:00:00.000+02:00", "2015-10-25T04:00:00.000+01:00")) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(4)); + assertThat( + ((ZonedDateTime) buckets.get(1).getKey()).toInstant().toEpochMilli() - ((ZonedDateTime) buckets.get(0).getKey()) + .toInstant() + .toEpochMilli(), + equalTo(3600000L) + ); + assertThat( + ((ZonedDateTime) buckets.get(2).getKey()).toInstant().toEpochMilli() - ((ZonedDateTime) buckets.get(1).getKey()) + .toInstant() + .toEpochMilli(), + equalTo(3600000L) + ); + assertThat( + ((ZonedDateTime) buckets.get(3).getKey()).toInstant().toEpochMilli() - ((ZonedDateTime) buckets.get(2).getKey()) + .toInstant() + .toEpochMilli(), + equalTo(3600000L) + ); + } ); } @@ -1516,14 +1571,14 @@ public void testScriptCaching() throws Exception { // Test that a request using a nondeterministic script does not get cached Map params = new HashMap<>(); params.put("fieldname", "d"); - SearchResponse r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - dateHistogram("histo").field("d") - .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.CURRENT_DATE, params)) - .calendarInterval(DateHistogramInterval.MONTH) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + dateHistogram("histo").field("d") + .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.CURRENT_DATE, params)) + .calendarInterval(DateHistogramInterval.MONTH) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -1535,14 +1590,14 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a deterministic script gets cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - dateHistogram("histo").field("d") - .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.LONG_PLUS_ONE_MONTH, params)) - .calendarInterval(DateHistogramInterval.MONTH) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + dateHistogram("histo").field("d") + .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.LONG_PLUS_ONE_MONTH, params)) + .calendarInterval(DateHistogramInterval.MONTH) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -1554,10 +1609,10 @@ public void testScriptCaching() throws Exception { ); // Ensure that non-scripted requests are cached as normal - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation(dateHistogram("histo").field("d").calendarInterval(DateHistogramInterval.MONTH)) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation(dateHistogram("histo").field("d").calendarInterval(DateHistogramInterval.MONTH)) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -1611,34 +1666,35 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAsCompound private void assertMultiSortResponse(int[] expectedDays, BucketOrder... order) { ZonedDateTime[] expectedKeys = Arrays.stream(expectedDays).mapToObj(d -> date(1, d)).toArray(ZonedDateTime[]::new); - SearchResponse response = prepareSearch("sort_idx").addAggregation( - dateHistogram("histo").field("date") - .calendarInterval(DateHistogramInterval.DAY) - .order(BucketOrder.compound(order)) - .subAggregation(avg("avg_l").field("l")) - .subAggregation(sum("sum_d").field("d")) - ).get(); - - assertNoFailures(response); - - Histogram histogram = response.getAggregations().get("histo"); - assertThat(histogram, notNullValue()); - assertThat(histogram.getName(), equalTo("histo")); - assertThat(histogram.getBuckets().size(), equalTo(expectedKeys.length)); - - int i = 0; - for (Histogram.Bucket bucket : histogram.getBuckets()) { - assertThat(bucket, notNullValue()); - assertThat(key(bucket), equalTo(expectedKeys[i])); - assertThat(bucket.getDocCount(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("_count"))); - Avg avg = bucket.getAggregations().get("avg_l"); - assertThat(avg, notNullValue()); - assertThat(avg.getValue(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("avg_l"))); - Sum sum = bucket.getAggregations().get("sum_d"); - assertThat(sum, notNullValue()); - assertThat(sum.value(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("sum_d"))); - i++; - } + assertNoFailuresAndResponse( + prepareSearch("sort_idx").addAggregation( + dateHistogram("histo").field("date") + .calendarInterval(DateHistogramInterval.DAY) + .order(BucketOrder.compound(order)) + .subAggregation(avg("avg_l").field("l")) + .subAggregation(sum("sum_d").field("d")) + ), + response -> { + Histogram histogram = response.getAggregations().get("histo"); + assertThat(histogram, notNullValue()); + assertThat(histogram.getName(), equalTo("histo")); + assertThat(histogram.getBuckets().size(), equalTo(expectedKeys.length)); + + int i = 0; + for (Histogram.Bucket bucket : histogram.getBuckets()) { + assertThat(bucket, notNullValue()); + assertThat(key(bucket), equalTo(expectedKeys[i])); + assertThat(bucket.getDocCount(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("_count"))); + Avg avg = bucket.getAggregations().get("avg_l"); + assertThat(avg, notNullValue()); + assertThat(avg.getValue(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("avg_l"))); + Sum sum = bucket.getAggregations().get("sum_d"); + assertThat(sum, notNullValue()); + assertThat(sum.value(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("sum_d"))); + i++; + } + } + ); } private ZonedDateTime key(Histogram.Bucket bucket) { @@ -1655,63 +1711,68 @@ public void testDateNanosHistogram() throws Exception { indexRandom(true, client().prepareIndex("nanos").setId("2").setSource("date", "2000-01-02")); // Search interval 24 hours - SearchResponse r = prepareSearch("nanos").addAggregation( - dateHistogram("histo").field("date") - .fixedInterval(DateHistogramInterval.seconds(60 * 60 * 24)) - .timeZone(ZoneId.of("Europe/Berlin")) - ).addDocValueField("date").get(); - assertNoFailures(r); - - Histogram histogram = r.getAggregations().get("histo"); - List buckets = histogram.getBuckets(); - assertEquals(2, buckets.size()); - assertEquals(946681200000L, ((ZonedDateTime) buckets.get(0).getKey()).toEpochSecond() * 1000); - assertEquals(1, buckets.get(0).getDocCount()); - assertEquals(946767600000L, ((ZonedDateTime) buckets.get(1).getKey()).toEpochSecond() * 1000); - assertEquals(1, buckets.get(1).getDocCount()); - - r = prepareSearch("nanos").addAggregation( - dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.seconds(60 * 60 * 24)).timeZone(ZoneId.of("UTC")) - ).addDocValueField("date").get(); - assertNoFailures(r); - - histogram = r.getAggregations().get("histo"); - buckets = histogram.getBuckets(); - assertEquals(2, buckets.size()); - assertEquals(946684800000L, ((ZonedDateTime) buckets.get(0).getKey()).toEpochSecond() * 1000); - assertEquals(1, buckets.get(0).getDocCount()); - assertEquals(946771200000L, ((ZonedDateTime) buckets.get(1).getKey()).toEpochSecond() * 1000); - assertEquals(1, buckets.get(1).getDocCount()); + assertNoFailuresAndResponse( + prepareSearch("nanos").addAggregation( + dateHistogram("histo").field("date") + .fixedInterval(DateHistogramInterval.seconds(60 * 60 * 24)) + .timeZone(ZoneId.of("Europe/Berlin")) + ).addDocValueField("date"), + response -> { + Histogram histogram = response.getAggregations().get("histo"); + List buckets = histogram.getBuckets(); + assertEquals(2, buckets.size()); + assertEquals(946681200000L, ((ZonedDateTime) buckets.get(0).getKey()).toEpochSecond() * 1000); + assertEquals(1, buckets.get(0).getDocCount()); + assertEquals(946767600000L, ((ZonedDateTime) buckets.get(1).getKey()).toEpochSecond() * 1000); + assertEquals(1, buckets.get(1).getDocCount()); + } + ); + + assertNoFailuresAndResponse( + prepareSearch("nanos").addAggregation( + dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.seconds(60 * 60 * 24)).timeZone(ZoneId.of("UTC")) + ).addDocValueField("date"), + response -> { + Histogram histogram = response.getAggregations().get("histo"); + List buckets = histogram.getBuckets(); + assertEquals(2, buckets.size()); + assertEquals(946684800000L, ((ZonedDateTime) buckets.get(0).getKey()).toEpochSecond() * 1000); + assertEquals(1, buckets.get(0).getDocCount()); + assertEquals(946771200000L, ((ZonedDateTime) buckets.get(1).getKey()).toEpochSecond() * 1000); + assertEquals(1, buckets.get(1).getDocCount()); + } + ); } public void testDateKeyFormatting() { - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).timeZone(ZoneId.of("America/Edmonton")) - ).get(); - - assertNoFailures(response); - - InternalDateHistogram histogram = response.getAggregations().get("histo"); - List buckets = histogram.getBuckets(); - assertThat(buckets.get(0).getKeyAsString(), equalTo("2012-01-01T00:00:00.000-07:00")); - assertThat(buckets.get(1).getKeyAsString(), equalTo("2012-02-01T00:00:00.000-07:00")); - assertThat(buckets.get(2).getKeyAsString(), equalTo("2012-03-01T00:00:00.000-07:00")); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).timeZone(ZoneId.of("America/Edmonton")) + ), + response -> { + InternalDateHistogram histogram = response.getAggregations().get("histo"); + List buckets = histogram.getBuckets(); + assertThat(buckets.get(0).getKeyAsString(), equalTo("2012-01-01T00:00:00.000-07:00")); + assertThat(buckets.get(1).getKeyAsString(), equalTo("2012-02-01T00:00:00.000-07:00")); + assertThat(buckets.get(2).getKeyAsString(), equalTo("2012-03-01T00:00:00.000-07:00")); + } + ); } public void testHardBoundsOnDates() { - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("date") - .calendarInterval(DateHistogramInterval.DAY) - .hardBounds(new LongBounds("2012-02-01T00:00:00.000", "2012-03-03T00:00:00.000")) - ).get(); - - assertNoFailures(response); - - InternalDateHistogram histogram = response.getAggregations().get("histo"); - List buckets = histogram.getBuckets(); - assertThat(buckets.size(), equalTo(30)); - assertThat(buckets.get(1).getKeyAsString(), equalTo("2012-02-03T00:00:00.000Z")); - assertThat(buckets.get(29).getKeyAsString(), equalTo("2012-03-02T00:00:00.000Z")); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").field("date") + .calendarInterval(DateHistogramInterval.DAY) + .hardBounds(new LongBounds("2012-02-01T00:00:00.000", "2012-03-03T00:00:00.000")) + ), + response -> { + InternalDateHistogram histogram = response.getAggregations().get("histo"); + List buckets = histogram.getBuckets(); + assertThat(buckets.size(), equalTo(30)); + assertThat(buckets.get(1).getKeyAsString(), equalTo("2012-02-03T00:00:00.000Z")); + assertThat(buckets.get(29).getKeyAsString(), equalTo("2012-03-02T00:00:00.000Z")); + } + ); } - } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java index 64c1a7c8859fc..c3a1209c7d3bf 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.index.mapper.DateFieldMapper; @@ -25,6 +24,7 @@ import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.search.aggregations.AggregationBuilders.dateHistogram; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.core.IsNull.notNullValue; @@ -72,39 +72,43 @@ private void prepareIndex(ZonedDateTime date, int numHours, int stepSizeHours, i public void testSingleValueWithPositiveOffset() throws Exception { prepareIndex(date("2014-03-11T00:00:00+00:00"), 5, 1, 0); - SearchResponse response = prepareSearch("idx2").setQuery(matchAllQuery()) - .addAggregation( - dateHistogram("date_histo").field("date").offset("2h").format(DATE_FORMAT).fixedInterval(DateHistogramInterval.DAY) - ) - .get(); - - assertThat(response.getHits().getTotalHits().value, equalTo(5L)); - - Histogram histo = response.getAggregations().get("date_histo"); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(2)); - - checkBucketFor(buckets.get(0), ZonedDateTime.of(2014, 3, 10, 2, 0, 0, 0, ZoneOffset.UTC), 2L); - checkBucketFor(buckets.get(1), ZonedDateTime.of(2014, 3, 11, 2, 0, 0, 0, ZoneOffset.UTC), 3L); + assertResponse( + prepareSearch("idx2").setQuery(matchAllQuery()) + .addAggregation( + dateHistogram("date_histo").field("date").offset("2h").format(DATE_FORMAT).fixedInterval(DateHistogramInterval.DAY) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(5L)); + + Histogram histo = response.getAggregations().get("date_histo"); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(2)); + + checkBucketFor(buckets.get(0), ZonedDateTime.of(2014, 3, 10, 2, 0, 0, 0, ZoneOffset.UTC), 2L); + checkBucketFor(buckets.get(1), ZonedDateTime.of(2014, 3, 11, 2, 0, 0, 0, ZoneOffset.UTC), 3L); + } + ); } public void testSingleValueWithNegativeOffset() throws Exception { prepareIndex(date("2014-03-11T00:00:00+00:00"), 5, -1, 0); - SearchResponse response = prepareSearch("idx2").setQuery(matchAllQuery()) - .addAggregation( - dateHistogram("date_histo").field("date").offset("-2h").format(DATE_FORMAT).fixedInterval(DateHistogramInterval.DAY) - ) - .get(); - - assertThat(response.getHits().getTotalHits().value, equalTo(5L)); - - Histogram histo = response.getAggregations().get("date_histo"); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(2)); - - checkBucketFor(buckets.get(0), ZonedDateTime.of(2014, 3, 9, 22, 0, 0, 0, ZoneOffset.UTC), 2L); - checkBucketFor(buckets.get(1), ZonedDateTime.of(2014, 3, 10, 22, 0, 0, 0, ZoneOffset.UTC), 3L); + assertResponse( + prepareSearch("idx2").setQuery(matchAllQuery()) + .addAggregation( + dateHistogram("date_histo").field("date").offset("-2h").format(DATE_FORMAT).fixedInterval(DateHistogramInterval.DAY) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(5L)); + + Histogram histo = response.getAggregations().get("date_histo"); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(2)); + + checkBucketFor(buckets.get(0), ZonedDateTime.of(2014, 3, 9, 22, 0, 0, 0, ZoneOffset.UTC), 2L); + checkBucketFor(buckets.get(1), ZonedDateTime.of(2014, 3, 10, 22, 0, 0, 0, ZoneOffset.UTC), 3L); + } + ); } /** @@ -114,27 +118,29 @@ public void testSingleValueWithOffsetMinDocCount() throws Exception { prepareIndex(date("2014-03-11T00:00:00+00:00"), 12, 1, 0); prepareIndex(date("2014-03-14T00:00:00+00:00"), 12, 1, 13); - SearchResponse response = prepareSearch("idx2").setQuery(matchAllQuery()) - .addAggregation( - dateHistogram("date_histo").field("date") - .offset("6h") - .minDocCount(0) - .format(DATE_FORMAT) - .fixedInterval(DateHistogramInterval.DAY) - ) - .get(); - - assertThat(response.getHits().getTotalHits().value, equalTo(24L)); - - Histogram histo = response.getAggregations().get("date_histo"); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(5)); - - checkBucketFor(buckets.get(0), ZonedDateTime.of(2014, 3, 10, 6, 0, 0, 0, ZoneOffset.UTC), 6L); - checkBucketFor(buckets.get(1), ZonedDateTime.of(2014, 3, 11, 6, 0, 0, 0, ZoneOffset.UTC), 6L); - checkBucketFor(buckets.get(2), ZonedDateTime.of(2014, 3, 12, 6, 0, 0, 0, ZoneOffset.UTC), 0L); - checkBucketFor(buckets.get(3), ZonedDateTime.of(2014, 3, 13, 6, 0, 0, 0, ZoneOffset.UTC), 6L); - checkBucketFor(buckets.get(4), ZonedDateTime.of(2014, 3, 14, 6, 0, 0, 0, ZoneOffset.UTC), 6L); + assertResponse( + prepareSearch("idx2").setQuery(matchAllQuery()) + .addAggregation( + dateHistogram("date_histo").field("date") + .offset("6h") + .minDocCount(0) + .format(DATE_FORMAT) + .fixedInterval(DateHistogramInterval.DAY) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(24L)); + + Histogram histo = response.getAggregations().get("date_histo"); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(5)); + + checkBucketFor(buckets.get(0), ZonedDateTime.of(2014, 3, 10, 6, 0, 0, 0, ZoneOffset.UTC), 6L); + checkBucketFor(buckets.get(1), ZonedDateTime.of(2014, 3, 11, 6, 0, 0, 0, ZoneOffset.UTC), 6L); + checkBucketFor(buckets.get(2), ZonedDateTime.of(2014, 3, 12, 6, 0, 0, 0, ZoneOffset.UTC), 0L); + checkBucketFor(buckets.get(3), ZonedDateTime.of(2014, 3, 13, 6, 0, 0, 0, ZoneOffset.UTC), 6L); + checkBucketFor(buckets.get(4), ZonedDateTime.of(2014, 3, 14, 6, 0, 0, 0, ZoneOffset.UTC), 6L); + } + ); } /** diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java index 44b0ff05ea274..e7acc10e98f58 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; @@ -43,6 +42,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -127,166 +127,173 @@ public void testDateMath() throws Exception { } else { rangeBuilder.script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.EXTRACT_FIELD, params)); } - SearchResponse response = prepareSearch("idx").addAggregation( - rangeBuilder.addUnboundedTo("a long time ago", "now-50y") - .addRange("recently", "now-50y", "now-1y") - .addUnboundedFrom("last year", "now-1y") - .timeZone(ZoneId.of("Etc/GMT+5")) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - assertThat(range.getBuckets().size(), equalTo(3)); - - List buckets = new ArrayList<>(range.getBuckets()); - - Range.Bucket bucket = buckets.get(0); - assertThat((String) bucket.getKey(), equalTo("a long time ago")); - assertThat(bucket.getKeyAsString(), equalTo("a long time ago")); - assertThat(bucket.getDocCount(), equalTo(0L)); - - bucket = buckets.get(1); - assertThat((String) bucket.getKey(), equalTo("recently")); - assertThat(bucket.getKeyAsString(), equalTo("recently")); - assertThat(bucket.getDocCount(), equalTo((long) numDocs)); - - bucket = buckets.get(2); - assertThat((String) bucket.getKey(), equalTo("last year")); - assertThat(bucket.getKeyAsString(), equalTo("last year")); - assertThat(bucket.getDocCount(), equalTo(0L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + rangeBuilder.addUnboundedTo("a long time ago", "now-50y") + .addRange("recently", "now-50y", "now-1y") + .addUnboundedFrom("last year", "now-1y") + .timeZone(ZoneId.of("Etc/GMT+5")) + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + assertThat(range.getBuckets().size(), equalTo(3)); + + List buckets = new ArrayList<>(range.getBuckets()); + + Range.Bucket bucket = buckets.get(0); + assertThat((String) bucket.getKey(), equalTo("a long time ago")); + assertThat(bucket.getKeyAsString(), equalTo("a long time ago")); + assertThat(bucket.getDocCount(), equalTo(0L)); + + bucket = buckets.get(1); + assertThat((String) bucket.getKey(), equalTo("recently")); + assertThat(bucket.getKeyAsString(), equalTo("recently")); + assertThat(bucket.getDocCount(), equalTo((long) numDocs)); + + bucket = buckets.get(2); + assertThat((String) bucket.getKey(), equalTo("last year")); + assertThat(bucket.getKeyAsString(), equalTo("last year")); + assertThat(bucket.getDocCount(), equalTo(0L)); + } + ); } public void testSingleValueField() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateRange("range").field("date").addUnboundedTo(date(2, 15)).addRange(date(2, 15), date(3, 15)).addUnboundedFrom(date(3, 15)) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); - assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); - assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); - assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); - assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateRange("range").field("date") + .addUnboundedTo(date(2, 15)) + .addRange(date(2, 15), date(3, 15)) + .addUnboundedFrom(date(3, 15)) + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); + assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); + assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); + } + ); } public void testSingleValueFieldWithStringDates() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateRange("range").field("date") - .addUnboundedTo("2012-02-15") - .addRange("2012-02-15", "2012-03-15") - .addUnboundedFrom("2012-03-15") - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); - assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); - assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); - assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); - assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateRange("range").field("date") + .addUnboundedTo("2012-02-15") + .addRange("2012-02-15", "2012-03-15") + .addUnboundedFrom("2012-03-15") + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); + assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); + assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); + } + ); } public void testSingleValueFieldWithStringDatesWithCustomFormat() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateRange("range").field("date") - .format("yyyy-MM-dd") - .addUnboundedTo("2012-02-15") - .addRange("2012-02-15", "2012-03-15") - .addUnboundedFrom("2012-03-15") - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("*-2012-02-15")); - assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("2012-02-15")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-02-15-2012-03-15")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); - assertThat(bucket.getFromAsString(), equalTo("2012-02-15")); - assertThat(bucket.getToAsString(), equalTo("2012-03-15")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-03-15-*")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); - assertThat(bucket.getFromAsString(), equalTo("2012-03-15")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateRange("range").field("date") + .format("yyyy-MM-dd") + .addUnboundedTo("2012-02-15") + .addRange("2012-02-15", "2012-03-15") + .addUnboundedFrom("2012-03-15") + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("*-2012-02-15")); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("2012-02-15")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("2012-02-15-2012-03-15")); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(bucket.getFromAsString(), equalTo("2012-02-15")); + assertThat(bucket.getToAsString(), equalTo("2012-03-15")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("2012-03-15-*")); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); + assertThat(bucket.getFromAsString(), equalTo("2012-03-15")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); + } + ); } public void testSingleValueFieldWithDateMath() throws Exception { @@ -297,92 +304,97 @@ public void testSingleValueFieldWithDateMath() throws Exception { String mar15Suffix = timeZoneOffset == 0 ? "Z" : date(3, 15, timezone).format(DateTimeFormatter.ofPattern("xxx", Locale.ROOT)); long expectedFirstBucketCount = timeZoneOffset < 0 ? 3L : 2L; - SearchResponse response = prepareSearch("idx").addAggregation( - dateRange("range").field("date") - .addUnboundedTo("2012-02-15") - .addRange("2012-02-15", "2012-02-15||+1M") - .addUnboundedFrom("2012-02-15||+1M") - .timeZone(timezone) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000" + feb15Suffix)); - assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15, timezone).withZoneSameInstant(ZoneOffset.UTC))); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000" + feb15Suffix)); - assertThat(bucket.getDocCount(), equalTo(expectedFirstBucketCount)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000" + feb15Suffix + "-2012-03-15T00:00:00.000" + mar15Suffix)); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15, timezone).withZoneSameInstant(ZoneOffset.UTC))); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15, timezone).withZoneSameInstant(ZoneOffset.UTC))); - assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000" + feb15Suffix)); - assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000" + mar15Suffix)); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000" + mar15Suffix + "-*")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15, timezone).withZoneSameInstant(ZoneOffset.UTC))); - assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); - assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000" + mar15Suffix)); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 2L - expectedFirstBucketCount)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateRange("range").field("date") + .addUnboundedTo("2012-02-15") + .addRange("2012-02-15", "2012-02-15||+1M") + .addUnboundedFrom("2012-02-15||+1M") + .timeZone(timezone) + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000" + feb15Suffix)); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15, timezone).withZoneSameInstant(ZoneOffset.UTC))); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000" + feb15Suffix)); + assertThat(bucket.getDocCount(), equalTo(expectedFirstBucketCount)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat( + (String) bucket.getKey(), + equalTo("2012-02-15T00:00:00.000" + feb15Suffix + "-2012-03-15T00:00:00.000" + mar15Suffix) + ); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15, timezone).withZoneSameInstant(ZoneOffset.UTC))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15, timezone).withZoneSameInstant(ZoneOffset.UTC))); + assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000" + feb15Suffix)); + assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000" + mar15Suffix)); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000" + mar15Suffix + "-*")); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15, timezone).withZoneSameInstant(ZoneOffset.UTC))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); + assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000" + mar15Suffix)); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 2L - expectedFirstBucketCount)); + } + ); } public void testSingleValueFieldWithCustomKey() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateRange("range").field("date") - .addUnboundedTo("r1", date(2, 15)) - .addRange("r2", date(2, 15), date(3, 15)) - .addUnboundedFrom("r3", date(3, 15)) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("r1")); - assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("r2")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); - assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); - assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("r3")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); - assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateRange("range").field("date") + .addUnboundedTo("r1", date(2, 15)) + .addRange("r2", date(2, 15), date(3, 15)) + .addUnboundedFrom("r3", date(3, 15)) + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("r1")); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("r2")); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); + assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("r3")); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); + assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); + } + ); } /* @@ -395,68 +407,69 @@ public void testSingleValueFieldWithCustomKey() throws Exception { */ public void testSingleValuedFieldWithSubAggregation() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateRange("range").field("date") - .addUnboundedTo("r1", date(2, 15)) - .addRange("r2", date(2, 15), date(3, 15)) - .addUnboundedFrom("r3", date(3, 15)) - .subAggregation(sum("sum").field("value")) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - assertThat(((InternalAggregation) range).getProperty("_bucket_count"), equalTo(3)); - Object[] propertiesKeys = (Object[]) ((InternalAggregation) range).getProperty("_key"); - Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) range).getProperty("_count"); - Object[] propertiesCounts = (Object[]) ((InternalAggregation) range).getProperty("sum.value"); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("r1")); - assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); - assertThat(bucket.getDocCount(), equalTo(2L)); - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - assertThat(sum.value(), equalTo((double) 1 + 2)); - assertThat((String) propertiesKeys[0], equalTo("r1")); - assertThat((long) propertiesDocCounts[0], equalTo(2L)); - assertThat((double) propertiesCounts[0], equalTo((double) 1 + 2)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("r2")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); - assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); - assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); - assertThat(bucket.getDocCount(), equalTo(2L)); - sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - assertThat(sum.value(), equalTo((double) 3 + 4)); - assertThat((String) propertiesKeys[1], equalTo("r2")); - assertThat((long) propertiesDocCounts[1], equalTo(2L)); - assertThat((double) propertiesCounts[1], equalTo((double) 3 + 4)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("r3")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); - assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); - sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - assertThat((String) propertiesKeys[2], equalTo("r3")); - assertThat((long) propertiesDocCounts[2], equalTo(numDocs - 4L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateRange("range").field("date") + .addUnboundedTo("r1", date(2, 15)) + .addRange("r2", date(2, 15), date(3, 15)) + .addUnboundedFrom("r3", date(3, 15)) + .subAggregation(sum("sum").field("value")) + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + assertThat(((InternalAggregation) range).getProperty("_bucket_count"), equalTo(3)); + Object[] propertiesKeys = (Object[]) ((InternalAggregation) range).getProperty("_key"); + Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) range).getProperty("_count"); + Object[] propertiesCounts = (Object[]) ((InternalAggregation) range).getProperty("sum.value"); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("r1")); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); + assertThat(bucket.getDocCount(), equalTo(2L)); + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat(sum.value(), equalTo((double) 1 + 2)); + assertThat((String) propertiesKeys[0], equalTo("r1")); + assertThat((long) propertiesDocCounts[0], equalTo(2L)); + assertThat((double) propertiesCounts[0], equalTo((double) 1 + 2)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("r2")); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); + assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); + assertThat(bucket.getDocCount(), equalTo(2L)); + sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat(sum.value(), equalTo((double) 3 + 4)); + assertThat((String) propertiesKeys[1], equalTo("r2")); + assertThat((long) propertiesDocCounts[1], equalTo(2L)); + assertThat((double) propertiesCounts[1], equalTo((double) 3 + 4)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("r3")); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); + assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); + sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat((String) propertiesKeys[2], equalTo("r3")); + assertThat((long) propertiesDocCounts[2], equalTo(numDocs - 4L)); + } + ); } /* @@ -469,113 +482,123 @@ public void testSingleValuedFieldWithSubAggregation() throws Exception { */ public void testMultiValuedField() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateRange("range").field("dates").addUnboundedTo(date(2, 15)).addRange(date(2, 15), date(3, 15)).addUnboundedFrom(date(3, 15)) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); - assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); - assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); - assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); - assertThat(bucket.getDocCount(), equalTo(3L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); - assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 2L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateRange("range").field("dates") + .addUnboundedTo(date(2, 15)) + .addRange(date(2, 15), date(3, 15)) + .addUnboundedFrom(date(3, 15)) + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); + assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); + assertThat(bucket.getDocCount(), equalTo(3L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); + assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 2L)); + } + ); } public void testPartiallyUnmapped() throws Exception { - SearchResponse response = prepareSearch("idx", "idx_unmapped").addAggregation( - dateRange("range").field("date").addUnboundedTo(date(2, 15)).addRange(date(2, 15), date(3, 15)).addUnboundedFrom(date(3, 15)) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); - assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); - assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); - assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); - assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); + assertNoFailuresAndResponse( + prepareSearch("idx", "idx_unmapped").addAggregation( + dateRange("range").field("date") + .addUnboundedTo(date(2, 15)) + .addRange(date(2, 15), date(3, 15)) + .addUnboundedFrom(date(3, 15)) + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); + assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); + assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); + } + ); } public void testEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field("value") - .interval(1L) - .minDocCount(0) - .subAggregation(dateRange("date_range").field("value").addRange("0-1", 0, 1)) - ) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - Histogram histo = searchResponse.getAggregations().get("histo"); - assertThat(histo, Matchers.notNullValue()); - Histogram.Bucket bucket = histo.getBuckets().get(1); - assertThat(bucket, Matchers.notNullValue()); - - Range dateRange = bucket.getAggregations().get("date_range"); - List buckets = new ArrayList<>(dateRange.getBuckets()); - assertThat(dateRange, Matchers.notNullValue()); - assertThat(dateRange.getName(), equalTo("date_range")); - assertThat(buckets.size(), is(1)); - assertThat((String) buckets.get(0).getKey(), equalTo("0-1")); - assertThat(((ZonedDateTime) buckets.get(0).getFrom()).toInstant().toEpochMilli(), equalTo(0L)); - assertThat(((ZonedDateTime) buckets.get(0).getTo()).toInstant().toEpochMilli(), equalTo(1L)); - assertThat(buckets.get(0).getDocCount(), equalTo(0L)); - assertThat(buckets.get(0).getAggregations().asList().isEmpty(), is(true)); + assertNoFailuresAndResponse( + prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(1L) + .minDocCount(0) + .subAggregation(dateRange("date_range").field("value").addRange("0-1", 0, 1)) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, Matchers.notNullValue()); + Histogram.Bucket bucket = histo.getBuckets().get(1); + assertThat(bucket, Matchers.notNullValue()); + + Range dateRange = bucket.getAggregations().get("date_range"); + List buckets = new ArrayList<>(dateRange.getBuckets()); + assertThat(dateRange, Matchers.notNullValue()); + assertThat(dateRange.getName(), equalTo("date_range")); + assertThat(buckets.size(), is(1)); + assertThat((String) buckets.get(0).getKey(), equalTo("0-1")); + assertThat(((ZonedDateTime) buckets.get(0).getFrom()).toInstant().toEpochMilli(), equalTo(0L)); + assertThat(((ZonedDateTime) buckets.get(0).getTo()).toInstant().toEpochMilli(), equalTo(1L)); + assertThat(buckets.get(0).getDocCount(), equalTo(0L)); + assertThat(buckets.get(0).getAggregations().asList().isEmpty(), is(true)); + } + ); } public void testNoRangesInQuery() { @@ -621,17 +644,17 @@ public void testScriptCaching() throws Exception { // Test that a request using a nondeterministic script does not get cached Map params = new HashMap<>(); params.put("fieldname", "date"); - SearchResponse r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - dateRange("foo").field("date") - .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.CURRENT_DATE, params)) - .addRange( - ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC), - ZonedDateTime.of(2013, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC) - ) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + dateRange("foo").field("date") + .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.CURRENT_DATE, params)) + .addRange( + ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2013, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC) + ) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -643,17 +666,17 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a deterministic script gets cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - dateRange("foo").field("date") - .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.DOUBLE_PLUS_ONE_MONTH, params)) - .addRange( - ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC), - ZonedDateTime.of(2013, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC) - ) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + dateRange("foo").field("date") + .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.DOUBLE_PLUS_ONE_MONTH, params)) + .addRange( + ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2013, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC) + ) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -665,16 +688,16 @@ public void testScriptCaching() throws Exception { ); // Ensure that non-scripted requests are cached as normal - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - dateRange("foo").field("date") - .addRange( - ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC), - ZonedDateTime.of(2013, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC) - ) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + dateRange("foo").field("date") + .addRange( + ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2013, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC) + ) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -702,38 +725,47 @@ public void testRangeWithFormatStringValue() throws Exception { // using no format should work when to/from is compatible with format in // mapping - SearchResponse searchResponse = prepareSearch(indexName).setSize(0) - .addAggregation(dateRange("date_range").field("date").addRange("00:16:40", "00:50:00").addRange("00:50:00", "01:06:40")) - .get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - List buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); - assertBucket(buckets.get(0), 2L, "00:16:40-00:50:00", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "00:50:00-01:06:40", 3000000L, 4000000L); - + assertNoFailuresAndResponse( + prepareSearch(indexName).setSize(0) + .addAggregation(dateRange("date_range").field("date").addRange("00:16:40", "00:50:00").addRange("00:50:00", "01:06:40")), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + List buckets = checkBuckets(response.getAggregations().get("date_range"), "date_range", 2); + assertBucket(buckets.get(0), 2L, "00:16:40-00:50:00", 1000000L, 3000000L); + assertBucket(buckets.get(1), 1L, "00:50:00-01:06:40", 3000000L, 4000000L); + } + ); // using different format should work when to/from is compatible with // format in aggregation - searchResponse = prepareSearch(indexName).setSize(0) - .addAggregation( - dateRange("date_range").field("date").addRange("00.16.40", "00.50.00").addRange("00.50.00", "01.06.40").format("HH.mm.ss") - ) - .get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); - assertBucket(buckets.get(0), 2L, "00.16.40-00.50.00", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "00.50.00-01.06.40", 3000000L, 4000000L); - + assertNoFailuresAndResponse( + prepareSearch(indexName).setSize(0) + .addAggregation( + dateRange("date_range").field("date") + .addRange("00.16.40", "00.50.00") + .addRange("00.50.00", "01.06.40") + .format("HH.mm.ss") + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + List buckets = checkBuckets(response.getAggregations().get("date_range"), "date_range", 2); + assertBucket(buckets.get(0), 2L, "00.16.40-00.50.00", 1000000L, 3000000L); + assertBucket(buckets.get(1), 1L, "00.50.00-01.06.40", 3000000L, 4000000L); + } + ); // providing numeric input with format should work, but bucket keys are // different now - searchResponse = prepareSearch(indexName).setSize(0) - .addAggregation( - dateRange("date_range").field("date").addRange(1000000, 3000000).addRange(3000000, 4000000).format("epoch_millis") - ) - .get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); - assertBucket(buckets.get(0), 2L, "1000000-3000000", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "3000000-4000000", 3000000L, 4000000L); - + assertNoFailuresAndResponse( + prepareSearch(indexName).setSize(0) + .addAggregation( + dateRange("date_range").field("date").addRange(1000000, 3000000).addRange(3000000, 4000000).format("epoch_millis") + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + List buckets = checkBuckets(response.getAggregations().get("date_range"), "date_range", 2); + assertBucket(buckets.get(0), 2L, "1000000-3000000", 1000000L, 3000000L); + assertBucket(buckets.get(1), 1L, "3000000-4000000", 3000000L, 4000000L); + } + ); // providing numeric input without format should throw an exception ElasticsearchException e = expectThrows( ElasticsearchException.class, @@ -760,55 +792,69 @@ public void testRangeWithFormatNumericValue() throws Exception { // using no format should work when to/from is compatible with format in // mapping - SearchResponse searchResponse = prepareSearch(indexName).setSize(0) - .addAggregation(dateRange("date_range").field("date").addRange(1000, 3000).addRange(3000, 4000)) - .get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - List buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); - assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); - + assertNoFailuresAndResponse( + prepareSearch(indexName).setSize(0) + .addAggregation(dateRange("date_range").field("date").addRange(1000, 3000).addRange(3000, 4000)), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + List buckets = checkBuckets(response.getAggregations().get("date_range"), "date_range", 2); + assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); + assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); + } + ); // using no format should also work when and to/from are string values - searchResponse = prepareSearch(indexName).setSize(0) - .addAggregation(dateRange("date_range").field("date").addRange("1000", "3000").addRange("3000", "4000")) - .get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); - assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); - + assertNoFailuresAndResponse( + prepareSearch(indexName).setSize(0) + .addAggregation(dateRange("date_range").field("date").addRange("1000", "3000").addRange("3000", "4000")), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + List buckets = checkBuckets(response.getAggregations().get("date_range"), "date_range", 2); + assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); + assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); + } + ); // also e-notation should work, fractional parts should be truncated - searchResponse = prepareSearch(indexName).setSize(0) - .addAggregation(dateRange("date_range").field("date").addRange(1.0e3, 3000.8123).addRange(3000.8123, 4.0e3)) - .get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); - assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); - + assertNoFailuresAndResponse( + prepareSearch(indexName).setSize(0) + .addAggregation(dateRange("date_range").field("date").addRange(1.0e3, 3000.8123).addRange(3000.8123, 4.0e3)), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + List buckets = checkBuckets(response.getAggregations().get("date_range"), "date_range", 2); + assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); + assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); + } + ); // using different format should work when to/from is compatible with // format in aggregation - searchResponse = prepareSearch(indexName).setSize(0) - .addAggregation( - dateRange("date_range").field("date").addRange("00.16.40", "00.50.00").addRange("00.50.00", "01.06.40").format("HH.mm.ss") - ) - .get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); - assertBucket(buckets.get(0), 2L, "00.16.40-00.50.00", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "00.50.00-01.06.40", 3000000L, 4000000L); - + assertNoFailuresAndResponse( + prepareSearch(indexName).setSize(0) + .addAggregation( + dateRange("date_range").field("date") + .addRange("00.16.40", "00.50.00") + .addRange("00.50.00", "01.06.40") + .format("HH.mm.ss") + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + List buckets = checkBuckets(response.getAggregations().get("date_range"), "date_range", 2); + assertBucket(buckets.get(0), 2L, "00.16.40-00.50.00", 1000000L, 3000000L); + assertBucket(buckets.get(1), 1L, "00.50.00-01.06.40", 3000000L, 4000000L); + } + ); // providing different numeric input with format should work, but bucket // keys are different now - searchResponse = prepareSearch(indexName).setSize(0) - .addAggregation( - dateRange("date_range").field("date").addRange(1000000, 3000000).addRange(3000000, 4000000).format("epoch_millis") - ) - .get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); - assertBucket(buckets.get(0), 2L, "1000000-3000000", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "3000000-4000000", 3000000L, 4000000L); + assertNoFailuresAndResponse( + prepareSearch(indexName).setSize(0) + .addAggregation( + dateRange("date_range").field("date").addRange(1000000, 3000000).addRange(3000000, 4000000).format("epoch_millis") + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + List buckets = checkBuckets(response.getAggregations().get("date_range"), "date_range", 2); + assertBucket(buckets.get(0), 2L, "1000000-3000000", 1000000L, 3000000L); + assertBucket(buckets.get(1), 1L, "3000000-4000000", 3000000L, 4000000L); + } + ); } private static List checkBuckets(Range dateRange, String expectedAggName, long expectedBucketsSize) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java index 612b4bf006aa2..a4ee100fa7541 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.search.aggregations.BucketOrder; @@ -29,6 +28,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -93,31 +93,32 @@ public void testIssue10719() throws Exception { // Tests that we can refer to nested elements under a sample in a path // statement boolean asc = randomBoolean(); - SearchResponse response = prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) - .addAggregation( - terms("genres").field("genre") - .order(BucketOrder.aggregation("sample>max_price.value", asc)) - .subAggregation(sampler("sample").shardSize(100).subAggregation(max("max_price").field("price"))) - ) - .get(); - assertNoFailures(response); - Terms genres = response.getAggregations().get("genres"); - Collection genreBuckets = genres.getBuckets(); - // For this test to be useful we need >1 genre bucket to compare - assertThat(genreBuckets.size(), greaterThan(1)); - double lastMaxPrice = asc ? Double.MIN_VALUE : Double.MAX_VALUE; - for (Terms.Bucket genreBucket : genres.getBuckets()) { - Sampler sample = genreBucket.getAggregations().get("sample"); - Max maxPriceInGenre = sample.getAggregations().get("max_price"); - double price = maxPriceInGenre.value(); - if (asc) { - assertThat(price, greaterThanOrEqualTo(lastMaxPrice)); - } else { - assertThat(price, lessThanOrEqualTo(lastMaxPrice)); + assertNoFailuresAndResponse( + prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) + .addAggregation( + terms("genres").field("genre") + .order(BucketOrder.aggregation("sample>max_price.value", asc)) + .subAggregation(sampler("sample").shardSize(100).subAggregation(max("max_price").field("price"))) + ), + response -> { + Terms genres = response.getAggregations().get("genres"); + Collection genreBuckets = genres.getBuckets(); + // For this test to be useful we need >1 genre bucket to compare + assertThat(genreBuckets.size(), greaterThan(1)); + double lastMaxPrice = asc ? Double.MIN_VALUE : Double.MAX_VALUE; + for (Terms.Bucket genreBucket : genres.getBuckets()) { + Sampler sample = genreBucket.getAggregations().get("sample"); + Max maxPriceInGenre = sample.getAggregations().get("max_price"); + double price = maxPriceInGenre.value(); + if (asc) { + assertThat(price, greaterThanOrEqualTo(lastMaxPrice)); + } else { + assertThat(price, lessThanOrEqualTo(lastMaxPrice)); + } + lastMaxPrice = price; + } } - lastMaxPrice = price; - } - + ); } public void testSimpleDiversity() throws Exception { @@ -125,20 +126,22 @@ public void testSimpleDiversity() throws Exception { DiversifiedAggregationBuilder sampleAgg = new DiversifiedAggregationBuilder("sample").shardSize(100); sampleAgg.field("author").maxDocsPerValue(MAX_DOCS_PER_AUTHOR).executionHint(randomExecutionHint()); sampleAgg.subAggregation(terms("authors").field("author")); - SearchResponse response = prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) - .setQuery(new TermQueryBuilder("genre", "fantasy")) - .setFrom(0) - .setSize(60) - .addAggregation(sampleAgg) - .get(); - assertNoFailures(response); - Sampler sample = response.getAggregations().get("sample"); - Terms authors = sample.getAggregations().get("authors"); - List testBuckets = authors.getBuckets(); + assertNoFailuresAndResponse( + prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) + .setQuery(new TermQueryBuilder("genre", "fantasy")) + .setFrom(0) + .setSize(60) + .addAggregation(sampleAgg), + response -> { + Sampler sample = response.getAggregations().get("sample"); + Terms authors = sample.getAggregations().get("authors"); + List testBuckets = authors.getBuckets(); - for (Terms.Bucket testBucket : testBuckets) { - assertThat(testBucket.getDocCount(), lessThanOrEqualTo((long) NUM_SHARDS * MAX_DOCS_PER_AUTHOR)); - } + for (Terms.Bucket testBucket : testBuckets) { + assertThat(testBucket.getDocCount(), lessThanOrEqualTo((long) NUM_SHARDS * MAX_DOCS_PER_AUTHOR)); + } + } + ); } public void testNestedDiversity() throws Exception { @@ -151,19 +154,22 @@ public void testNestedDiversity() throws Exception { sampleAgg.subAggregation(terms("authors").field("author")); rootTerms.subAggregation(sampleAgg); - SearchResponse response = prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH).addAggregation(rootTerms).get(); - assertNoFailures(response); - Terms genres = response.getAggregations().get("genres"); - List genreBuckets = genres.getBuckets(); - for (Terms.Bucket genreBucket : genreBuckets) { - Sampler sample = genreBucket.getAggregations().get("sample"); - Terms authors = sample.getAggregations().get("authors"); - List testBuckets = authors.getBuckets(); + assertNoFailuresAndResponse( + prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH).addAggregation(rootTerms), + response -> { + Terms genres = response.getAggregations().get("genres"); + List genreBuckets = genres.getBuckets(); + for (Terms.Bucket genreBucket : genreBuckets) { + Sampler sample = genreBucket.getAggregations().get("sample"); + Terms authors = sample.getAggregations().get("authors"); + List testBuckets = authors.getBuckets(); - for (Terms.Bucket testBucket : testBuckets) { - assertThat(testBucket.getDocCount(), lessThanOrEqualTo((long) NUM_SHARDS * MAX_DOCS_PER_AUTHOR)); + for (Terms.Bucket testBucket : testBuckets) { + assertThat(testBucket.getDocCount(), lessThanOrEqualTo((long) NUM_SHARDS * MAX_DOCS_PER_AUTHOR)); + } + } } - } + ); } public void testNestedSamples() throws Exception { @@ -180,22 +186,25 @@ public void testNestedSamples() throws Exception { sampleAgg.subAggregation(terms("genres").field("genre")); rootSample.subAggregation(sampleAgg); - SearchResponse response = prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH).addAggregation(rootSample).get(); - assertNoFailures(response); - Sampler genreSample = response.getAggregations().get("genreSample"); - Sampler sample = genreSample.getAggregations().get("sample"); + assertNoFailuresAndResponse( + prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH).addAggregation(rootSample), + response -> { + Sampler genreSample = response.getAggregations().get("genreSample"); + Sampler sample = genreSample.getAggregations().get("sample"); - Terms genres = sample.getAggregations().get("genres"); - List testBuckets = genres.getBuckets(); - for (Terms.Bucket testBucket : testBuckets) { - assertThat(testBucket.getDocCount(), lessThanOrEqualTo((long) NUM_SHARDS * MAX_DOCS_PER_GENRE)); - } + Terms genres = sample.getAggregations().get("genres"); + List testBuckets = genres.getBuckets(); + for (Terms.Bucket testBucket : testBuckets) { + assertThat(testBucket.getDocCount(), lessThanOrEqualTo((long) NUM_SHARDS * MAX_DOCS_PER_GENRE)); + } - Terms authors = sample.getAggregations().get("authors"); - testBuckets = authors.getBuckets(); - for (Terms.Bucket testBucket : testBuckets) { - assertThat(testBucket.getDocCount(), lessThanOrEqualTo((long) NUM_SHARDS * MAX_DOCS_PER_AUTHOR)); - } + Terms authors = sample.getAggregations().get("authors"); + testBuckets = authors.getBuckets(); + for (Terms.Bucket testBucket : testBuckets) { + assertThat(testBucket.getDocCount(), lessThanOrEqualTo((long) NUM_SHARDS * MAX_DOCS_PER_AUTHOR)); + } + } + ); } public void testPartiallyUnmappedDiversifyField() throws Exception { @@ -205,17 +214,19 @@ public void testPartiallyUnmappedDiversifyField() throws Exception { .field("author") .maxDocsPerValue(1); sampleAgg.subAggregation(terms("authors").field("author")); - SearchResponse response = prepareSearch("idx_unmapped_author", "test").setSearchType(SearchType.QUERY_THEN_FETCH) - .setQuery(new TermQueryBuilder("genre", "fantasy")) - .setFrom(0) - .setSize(60) - .addAggregation(sampleAgg) - .get(); - assertNoFailures(response); - Sampler sample = response.getAggregations().get("sample"); - assertThat(sample.getDocCount(), greaterThan(0L)); - Terms authors = sample.getAggregations().get("authors"); - assertThat(authors.getBuckets().size(), greaterThan(0)); + assertNoFailuresAndResponse( + prepareSearch("idx_unmapped_author", "test").setSearchType(SearchType.QUERY_THEN_FETCH) + .setQuery(new TermQueryBuilder("genre", "fantasy")) + .setFrom(0) + .setSize(60) + .addAggregation(sampleAgg), + response -> { + Sampler sample = response.getAggregations().get("sample"); + assertThat(sample.getDocCount(), greaterThan(0L)); + Terms authors = sample.getAggregations().get("authors"); + assertThat(authors.getBuckets().size(), greaterThan(0)); + } + ); } public void testWhollyUnmappedDiversifyField() throws Exception { @@ -224,17 +235,19 @@ public void testWhollyUnmappedDiversifyField() throws Exception { DiversifiedAggregationBuilder sampleAgg = new DiversifiedAggregationBuilder("sample").shardSize(100); sampleAgg.field("author").maxDocsPerValue(MAX_DOCS_PER_AUTHOR).executionHint(randomExecutionHint()); sampleAgg.subAggregation(terms("authors").field("author")); - SearchResponse response = prepareSearch("idx_unmapped", "idx_unmapped_author").setSearchType(SearchType.QUERY_THEN_FETCH) - .setQuery(new TermQueryBuilder("genre", "fantasy")) - .setFrom(0) - .setSize(60) - .addAggregation(sampleAgg) - .get(); - assertNoFailures(response); - Sampler sample = response.getAggregations().get("sample"); - assertThat(sample.getDocCount(), equalTo(0L)); - Terms authors = sample.getAggregations().get("authors"); - assertNull(authors); + assertNoFailuresAndResponse( + prepareSearch("idx_unmapped", "idx_unmapped_author").setSearchType(SearchType.QUERY_THEN_FETCH) + .setQuery(new TermQueryBuilder("genre", "fantasy")) + .setFrom(0) + .setSize(60) + .addAggregation(sampleAgg), + response -> { + Sampler sample = response.getAggregations().get("sample"); + assertThat(sample.getDocCount(), equalTo(0L)); + Terms authors = sample.getAggregations().get("authors"); + assertNull(authors); + } + ); } public void testRidiculousSizeDiversity() throws Exception { @@ -242,24 +255,23 @@ public void testRidiculousSizeDiversity() throws Exception { DiversifiedAggregationBuilder sampleAgg = new DiversifiedAggregationBuilder("sample").shardSize(Integer.MAX_VALUE); sampleAgg.field("author").maxDocsPerValue(MAX_DOCS_PER_AUTHOR).executionHint(randomExecutionHint()); sampleAgg.subAggregation(terms("authors").field("author")); - SearchResponse response = prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) - .setQuery(new TermQueryBuilder("genre", "fantasy")) - .setFrom(0) - .setSize(60) - .addAggregation(sampleAgg) - .get(); - assertNoFailures(response); + assertNoFailures( + prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) + .setQuery(new TermQueryBuilder("genre", "fantasy")) + .setFrom(0) + .setSize(60) + .addAggregation(sampleAgg) + ); sampleAgg = new DiversifiedAggregationBuilder("sample").shardSize(100); sampleAgg.field("author").maxDocsPerValue(Integer.MAX_VALUE).executionHint(randomExecutionHint()); sampleAgg.subAggregation(terms("authors").field("author")); - response = prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) - .setQuery(new TermQueryBuilder("genre", "fantasy")) - .setFrom(0) - .setSize(60) - .addAggregation(sampleAgg) - .get(); - assertNoFailures(response); + assertNoFailures( + prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) + .setQuery(new TermQueryBuilder("genre", "fantasy")) + .setFrom(0) + .setSize(60) + .addAggregation(sampleAgg) + ); } - } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java index 0381a5521dea0..2477d61b9e608 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java @@ -9,7 +9,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Strings; import org.elasticsearch.index.fielddata.ScriptDocValues; @@ -55,6 +54,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -275,105 +275,116 @@ public void testMultiValueFieldWithPartitionedFiltering() throws Exception { private void runTestFieldWithPartitionedFiltering(String field) throws Exception { // Find total number of unique terms - SearchResponse allResponse = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(field).size(10000).collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - assertNoFailures(allResponse); - DoubleTerms terms = allResponse.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - int expectedCardinality = terms.getBuckets().size(); + int[] expectedCardinality = new int[1]; + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(field).size(10000).collectMode(randomFrom(SubAggCollectionMode.values())) + ), + response -> { + assertNoFailures(response); + DoubleTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + expectedCardinality[0] = terms.getBuckets().size(); + } + ); // Gather terms using partitioned aggregations final int numPartitions = randomIntBetween(2, 4); Set foundTerms = new HashSet<>(); for (int partition = 0; partition < numPartitions; partition++) { - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(field) - .includeExclude(new IncludeExclude(partition, numPartitions)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - assertNoFailures(response); - terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - for (DoubleTerms.Bucket bucket : terms.getBuckets()) { - assertTrue(foundTerms.add(bucket.getKeyAsNumber())); - assertThat(bucket.getKeyAsNumber(), instanceOf(Double.class)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(field) + .includeExclude(new IncludeExclude(partition, numPartitions)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + response -> { + DoubleTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + for (DoubleTerms.Bucket bucket : terms.getBuckets()) { + assertTrue(foundTerms.add(bucket.getKeyAsNumber())); + assertThat(bucket.getKeyAsNumber(), instanceOf(Double.class)); + } + } + ); } - assertEquals(expectedCardinality, foundTerms.size()); + assertEquals(expectedCardinality[0], foundTerms.size()); } public void testSingleValuedFieldWithValueScript() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) - ).get(); - - assertNoFailures(response); - - DoubleTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (i + 1d)); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + (i + 1d))); - assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i + 1)); - assertThat(bucket.getDocCount(), equalTo(1L)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) + ), + response -> { + DoubleTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (i + 1d)); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + (i + 1d))); + assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i + 1)); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + } + ); } public void testMultiValuedFieldWithValueScript() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(MULTI_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) - ).get(); - - assertNoFailures(response); - - DoubleTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(6)); - - for (int i = 0; i < 6; i++) { - DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (i + 1d)); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + (i + 1d))); - assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i + 1)); - if (i == 0 || i == 5) { - assertThat(bucket.getDocCount(), equalTo(1L)); - } else { - assertThat(bucket.getDocCount(), equalTo(2L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(MULTI_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) + ), + response -> { + DoubleTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(6)); + + for (int i = 0; i < 6; i++) { + DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (i + 1d)); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + (i + 1d))); + assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i + 1)); + if (i == 0 || i == 5) { + assertThat(bucket.getDocCount(), equalTo(1L)); + } else { + assertThat(bucket.getDocCount(), equalTo(2L)); + } + } } - } + ); } public void testMultiValuedFieldWithValueScriptNotUnique() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(MULTI_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "(long) (_value / 1000 + 1)", Collections.emptyMap())) - ).get(); - - assertNoFailures(response); - - DoubleTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(1)); - - DoubleTerms.Bucket bucket = terms.getBucketByKey("1.0"); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("1.0")); - assertThat(bucket.getKeyAsNumber().intValue(), equalTo(1)); - assertThat(bucket.getDocCount(), equalTo(5L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(MULTI_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "(long) (_value / 1000 + 1)", Collections.emptyMap())) + ), + response -> { + DoubleTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(1)); + + DoubleTerms.Bucket bucket = terms.getBucketByKey("1.0"); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("1.0")); + assertThat(bucket.getKeyAsNumber().intValue(), equalTo(1)); + assertThat(bucket.getDocCount(), equalTo(5L)); + } + ); } /* @@ -394,239 +405,251 @@ public void testMultiValuedFieldWithValueScriptNotUnique() throws Exception { */ public void testScriptSingleValue() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").collectMode(randomFrom(SubAggCollectionMode.values())) - .userValueTypeHint(ValueType.DOUBLE) - .script( - new Script( - ScriptType.INLINE, - CustomScriptPlugin.NAME, - "doc['" + MULTI_VALUED_FIELD_NAME + "'].value", - Collections.emptyMap() + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").collectMode(randomFrom(SubAggCollectionMode.values())) + .userValueTypeHint(ValueType.DOUBLE) + .script( + new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "doc['" + MULTI_VALUED_FIELD_NAME + "'].value", + Collections.emptyMap() + ) ) - ) - ).get(); - - assertNoFailures(response); - - DoubleTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (double) i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + (double) i)); - assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - } + ), + response -> { + DoubleTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (double) i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + (double) i)); + assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + } + ); } public void testScriptMultiValued() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").collectMode(randomFrom(SubAggCollectionMode.values())) - .userValueTypeHint(ValueType.DOUBLE) - .script( - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['" + MULTI_VALUED_FIELD_NAME + "']", Collections.emptyMap()) - ) - ).get(); - - assertNoFailures(response); - - DoubleTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(6)); - - for (int i = 0; i < 6; i++) { - DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (double) i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + (double) i)); - assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); - if (i == 0 || i == 5) { - assertThat(bucket.getDocCount(), equalTo(1L)); - } else { - assertThat(bucket.getDocCount(), equalTo(2L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").collectMode(randomFrom(SubAggCollectionMode.values())) + .userValueTypeHint(ValueType.DOUBLE) + .script( + new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "doc['" + MULTI_VALUED_FIELD_NAME + "']", + Collections.emptyMap() + ) + ) + ), + response -> { + DoubleTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(6)); + + for (int i = 0; i < 6; i++) { + DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (double) i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + (double) i)); + assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); + if (i == 0 || i == 5) { + assertThat(bucket.getDocCount(), equalTo(1L)); + } else { + assertThat(bucket.getDocCount(), equalTo(2L)); + } + } } - } + ); } public void testPartiallyUnmapped() throws Exception { - SearchResponse response = prepareSearch("idx_unmapped", "idx").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(response); - - DoubleTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (double) i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + (double) i)); - assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - } + assertNoFailuresAndResponse( + prepareSearch("idx_unmapped", "idx").addAggregation( + new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) + ), + response -> { + DoubleTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (double) i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + (double) i)); + assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + } + ); } public void testPartiallyUnmappedWithFormat() throws Exception { - SearchResponse response = prepareSearch("idx_unmapped", "idx").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .format("0000.00") - ).get(); - - assertNoFailures(response); - - DoubleTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - String key = Strings.format("%07.2f", (double) i); - DoubleTerms.Bucket bucket = terms.getBucketByKey(key); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(key)); - assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - } + assertNoFailuresAndResponse( + prepareSearch("idx_unmapped", "idx").addAggregation( + new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .format("0000.00") + ), + response -> { + DoubleTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + String key = Strings.format("%07.2f", (double) i); + DoubleTerms.Bucket bucket = terms.getBucketByKey(key); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(key)); + assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + } + ); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscWithSubTermsAgg() throws Exception { boolean asc = true; - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("avg_i", asc)) - .subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME)) - .subAggregation( - new TermsAggregationBuilder("subTerms").field(MULTI_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ) - ).get(); - - assertNoFailures(response); - - DoubleTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (double) i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + (double) i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - Avg avg = bucket.getAggregations().get("avg_i"); - assertThat(avg, notNullValue()); - assertThat(avg.getValue(), equalTo((double) i)); - - DoubleTerms subTermsAgg = bucket.getAggregations().get("subTerms"); - assertThat(subTermsAgg, notNullValue()); - assertThat(subTermsAgg.getBuckets().size(), equalTo(2)); - double j = i; - for (DoubleTerms.Bucket subBucket : subTermsAgg.getBuckets()) { - assertThat(subBucket, notNullValue()); - assertThat(subBucket.getKeyAsString(), equalTo(String.valueOf(j))); - assertThat(subBucket.getDocCount(), equalTo(1L)); - j++; + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("avg_i", asc)) + .subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME)) + .subAggregation( + new TermsAggregationBuilder("subTerms").field(MULTI_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + ), + response -> { + DoubleTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (double) i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + (double) i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + + Avg avg = bucket.getAggregations().get("avg_i"); + assertThat(avg, notNullValue()); + assertThat(avg.getValue(), equalTo((double) i)); + + DoubleTerms subTermsAgg = bucket.getAggregations().get("subTerms"); + assertThat(subTermsAgg, notNullValue()); + assertThat(subTermsAgg.getBuckets().size(), equalTo(2)); + double j = i; + for (DoubleTerms.Bucket subBucket : subTermsAgg.getBuckets()) { + assertThat(subBucket, notNullValue()); + assertThat(subBucket.getKeyAsString(), equalTo(String.valueOf(j))); + assertThat(subBucket.getDocCount(), equalTo(1L)); + j++; + } + } } - } + ); } public void testSingleValuedFieldOrderedBySingleBucketSubAggregationAsc() throws Exception { boolean asc = randomBoolean(); - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("num_tags").field("num_tag") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("filter", asc)) - .subAggregation(filter("filter", QueryBuilders.matchAllQuery())) - ).get(); - - assertNoFailures(response); - - DoubleTerms tags = response.getAggregations().get("num_tags"); - assertThat(tags, notNullValue()); - assertThat(tags.getName(), equalTo("num_tags")); - assertThat(tags.getBuckets().size(), equalTo(2)); - - Iterator iters = tags.getBuckets().iterator(); - - DoubleTerms.Bucket tag = iters.next(); - assertThat(tag, notNullValue()); - assertThat(tag.getKeyAsString(), equalTo(asc ? "0.0" : "1.0")); - assertThat(tag.getDocCount(), equalTo(asc ? 2L : 3L)); - Filter filter = tag.getAggregations().get("filter"); - assertThat(filter, notNullValue()); - assertThat(filter.getDocCount(), equalTo(asc ? 2L : 3L)); - - tag = iters.next(); - assertThat(tag, notNullValue()); - assertThat(tag.getKeyAsString(), equalTo(asc ? "1.0" : "0.0")); - assertThat(tag.getDocCount(), equalTo(asc ? 3L : 2L)); - filter = tag.getAggregations().get("filter"); - assertThat(filter, notNullValue()); - assertThat(filter.getDocCount(), equalTo(asc ? 3L : 2L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("num_tags").field("num_tag") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("filter", asc)) + .subAggregation(filter("filter", QueryBuilders.matchAllQuery())) + ), + response -> { + DoubleTerms tags = response.getAggregations().get("num_tags"); + assertThat(tags, notNullValue()); + assertThat(tags.getName(), equalTo("num_tags")); + assertThat(tags.getBuckets().size(), equalTo(2)); + + Iterator iters = tags.getBuckets().iterator(); + + DoubleTerms.Bucket tag = iters.next(); + assertThat(tag, notNullValue()); + assertThat(tag.getKeyAsString(), equalTo(asc ? "0.0" : "1.0")); + assertThat(tag.getDocCount(), equalTo(asc ? 2L : 3L)); + Filter filter = tag.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(asc ? 2L : 3L)); + + tag = iters.next(); + assertThat(tag, notNullValue()); + assertThat(tag.getKeyAsString(), equalTo(asc ? "1.0" : "0.0")); + assertThat(tag.getDocCount(), equalTo(asc ? 3L : 2L)); + filter = tag.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(asc ? 3L : 2L)); + } + ); } public void testSingleValuedFieldOrderedBySubAggregationAscMultiHierarchyLevels() throws Exception { boolean asc = randomBoolean(); - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("tags").field("num_tag") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("filter1>filter2>max", asc)) - .subAggregation( - filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( - filter("filter2", QueryBuilders.matchAllQuery()).subAggregation(max("max").field(SINGLE_VALUED_FIELD_NAME)) + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("tags").field("num_tag") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("filter1>filter2>max", asc)) + .subAggregation( + filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( + filter("filter2", QueryBuilders.matchAllQuery()).subAggregation(max("max").field(SINGLE_VALUED_FIELD_NAME)) + ) ) - ) - ).get(); - - assertNoFailures(response); - - DoubleTerms tags = response.getAggregations().get("tags"); - assertThat(tags, notNullValue()); - assertThat(tags.getName(), equalTo("tags")); - assertThat(tags.getBuckets().size(), equalTo(2)); - - Iterator iters = tags.getBuckets().iterator(); - - // the max for "1" is 2 - // the max for "0" is 4 - - DoubleTerms.Bucket tag = iters.next(); - assertThat(tag, notNullValue()); - assertThat(tag.getKeyAsString(), equalTo(asc ? "1.0" : "0.0")); - assertThat(tag.getDocCount(), equalTo(asc ? 3L : 2L)); - Filter filter1 = tag.getAggregations().get("filter1"); - assertThat(filter1, notNullValue()); - assertThat(filter1.getDocCount(), equalTo(asc ? 3L : 2L)); - Filter filter2 = filter1.getAggregations().get("filter2"); - assertThat(filter2, notNullValue()); - assertThat(filter2.getDocCount(), equalTo(asc ? 3L : 2L)); - Max max = filter2.getAggregations().get("max"); - assertThat(max, notNullValue()); - assertThat(max.value(), equalTo(asc ? 2.0 : 4.0)); - - tag = iters.next(); - assertThat(tag, notNullValue()); - assertThat(tag.getKeyAsString(), equalTo(asc ? "0.0" : "1.0")); - assertThat(tag.getDocCount(), equalTo(asc ? 2L : 3L)); - filter1 = tag.getAggregations().get("filter1"); - assertThat(filter1, notNullValue()); - assertThat(filter1.getDocCount(), equalTo(asc ? 2L : 3L)); - filter2 = filter1.getAggregations().get("filter2"); - assertThat(filter2, notNullValue()); - assertThat(filter2.getDocCount(), equalTo(asc ? 2L : 3L)); - max = filter2.getAggregations().get("max"); - assertThat(max, notNullValue()); - assertThat(max.value(), equalTo(asc ? 4.0 : 2.0)); + ), + response -> { + DoubleTerms tags = response.getAggregations().get("tags"); + assertThat(tags, notNullValue()); + assertThat(tags.getName(), equalTo("tags")); + assertThat(tags.getBuckets().size(), equalTo(2)); + + Iterator iters = tags.getBuckets().iterator(); + + // the max for "1" is 2 + // the max for "0" is 4 + + DoubleTerms.Bucket tag = iters.next(); + assertThat(tag, notNullValue()); + assertThat(tag.getKeyAsString(), equalTo(asc ? "1.0" : "0.0")); + assertThat(tag.getDocCount(), equalTo(asc ? 3L : 2L)); + Filter filter1 = tag.getAggregations().get("filter1"); + assertThat(filter1, notNullValue()); + assertThat(filter1.getDocCount(), equalTo(asc ? 3L : 2L)); + Filter filter2 = filter1.getAggregations().get("filter2"); + assertThat(filter2, notNullValue()); + assertThat(filter2.getDocCount(), equalTo(asc ? 3L : 2L)); + Max max = filter2.getAggregations().get("max"); + assertThat(max, notNullValue()); + assertThat(max.value(), equalTo(asc ? 2.0 : 4.0)); + + tag = iters.next(); + assertThat(tag, notNullValue()); + assertThat(tag.getKeyAsString(), equalTo(asc ? "0.0" : "1.0")); + assertThat(tag.getDocCount(), equalTo(asc ? 2L : 3L)); + filter1 = tag.getAggregations().get("filter1"); + assertThat(filter1, notNullValue()); + assertThat(filter1.getDocCount(), equalTo(asc ? 2L : 3L)); + filter2 = filter1.getAggregations().get("filter2"); + assertThat(filter2, notNullValue()); + assertThat(filter2.getDocCount(), equalTo(asc ? 2L : 3L)); + max = filter2.getAggregations().get("max"); + assertThat(max, notNullValue()); + assertThat(max.value(), equalTo(asc ? 4.0 : 2.0)); + } + ); } public void testSingleValuedFieldOrderedByMissingSubAggregation() throws Exception { @@ -710,87 +733,89 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithoutMetric public void testSingleValuedFieldOrderedByMultiValueSubAggregationAsc() throws Exception { boolean asc = true; - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("stats.avg", asc)) - .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) - ).get(); - - assertNoFailures(response); - - DoubleTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (double) i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + (double) i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - Stats stats = bucket.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getMax(), equalTo((double) i)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("stats.avg", asc)) + .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) + ), + response -> { + DoubleTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (double) i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + (double) i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + + Stats stats = bucket.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getMax(), equalTo((double) i)); + } + } + ); } public void testSingleValuedFieldOrderedByMultiValueSubAggregationDesc() throws Exception { boolean asc = false; - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("stats.avg", asc)) - .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) - ).get(); - - assertNoFailures(response); - - DoubleTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 4; i >= 0; i--) { - DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (double) i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + (double) i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - Stats stats = bucket.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getMax(), equalTo((double) i)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("stats.avg", asc)) + .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) + ), + response -> { + DoubleTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 4; i >= 0; i--) { + DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (double) i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + (double) i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + + Stats stats = bucket.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getMax(), equalTo((double) i)); + } + } + ); } public void testSingleValuedFieldOrderedByMultiValueExtendedStatsAsc() throws Exception { boolean asc = true; - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("stats.variance", asc)) - .subAggregation(extendedStats("stats").field(SINGLE_VALUED_FIELD_NAME)) - ).get(); - - assertNoFailures(response); - - DoubleTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (double) i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + (double) i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - ExtendedStats stats = bucket.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getMax(), equalTo((double) i)); - } - + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("stats.variance", asc)) + .subAggregation(extendedStats("stats").field(SINGLE_VALUED_FIELD_NAME)) + ), + response -> { + DoubleTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (double) i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + (double) i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + + ExtendedStats stats = bucket.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getMax(), equalTo((double) i)); + } + } + ); } public void testScriptScore() { @@ -808,28 +833,28 @@ public void testScriptScore() { Collections.emptyMap() ); - SearchResponse response = prepareSearch("idx").setQuery(functionScoreQuery(scriptFunction(scoringScript))) - .addAggregation( - new TermsAggregationBuilder("terms").collectMode(randomFrom(SubAggCollectionMode.values())) - .userValueTypeHint(ValueType.DOUBLE) - .script(aggregationScript) - ) - .get(); - - assertNoFailures(response); - - DoubleTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(3)); - - for (int i = 0; i < 3; i++) { - DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (double) i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + (double) i)); - assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); - assertThat(bucket.getDocCount(), equalTo(i == 1 ? 3L : 1L)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(functionScoreQuery(scriptFunction(scoringScript))) + .addAggregation( + new TermsAggregationBuilder("terms").collectMode(randomFrom(SubAggCollectionMode.values())) + .userValueTypeHint(ValueType.DOUBLE) + .script(aggregationScript) + ), + response -> { + DoubleTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(3)); + + for (int i = 0; i < 3; i++) { + DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (double) i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + (double) i)); + assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); + assertThat(bucket.getDocCount(), equalTo(i == 1 ? 3L : 1L)); + } + } + ); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAndTermsDesc() throws Exception { @@ -873,34 +898,35 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAsCompound } private void assertMultiSortResponse(double[] expectedKeys, BucketOrder... order) { - SearchResponse response = prepareSearch("sort_idx").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.compound(order)) - .subAggregation(avg("avg_l").field("l")) - .subAggregation(sum("sum_d").field("d")) - ).get(); - - assertNoFailures(response); - - DoubleTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(expectedKeys.length)); - - int i = 0; - for (DoubleTerms.Bucket bucket : terms.getBuckets()) { - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(String.valueOf(expectedKeys[i]))); - assertThat(bucket.getDocCount(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("_count"))); - Avg avg = bucket.getAggregations().get("avg_l"); - assertThat(avg, notNullValue()); - assertThat(avg.getValue(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("avg_l"))); - Sum sum = bucket.getAggregations().get("sum_d"); - assertThat(sum, notNullValue()); - assertThat(sum.value(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("sum_d"))); - i++; - } + assertNoFailuresAndResponse( + prepareSearch("sort_idx").addAggregation( + new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.compound(order)) + .subAggregation(avg("avg_l").field("l")) + .subAggregation(sum("sum_d").field("d")) + ), + response -> { + DoubleTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(expectedKeys.length)); + + int i = 0; + for (DoubleTerms.Bucket bucket : terms.getBuckets()) { + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(String.valueOf(expectedKeys[i]))); + assertThat(bucket.getDocCount(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("_count"))); + Avg avg = bucket.getAggregations().get("avg_l"); + assertThat(avg, notNullValue()); + assertThat(avg.getValue(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("avg_l"))); + Sum sum = bucket.getAggregations().get("sum_d"); + assertThat(sum, notNullValue()); + assertThat(sum.value(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("sum_d"))); + i++; + } + } + ); } public void testOtherDocCount() { @@ -933,13 +959,13 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - new TermsAggregationBuilder("terms").field("d") - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", Collections.emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + new TermsAggregationBuilder("terms").field("d") + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", Collections.emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -951,13 +977,13 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a deterministic script gets cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - new TermsAggregationBuilder("terms").field("d") - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + new TermsAggregationBuilder("terms").field("d") + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -969,8 +995,7 @@ public void testScriptCaching() throws Exception { ); // Ensure that non-scripted requests are cached as normal - r = prepareSearch("cache_test_idx").setSize(0).addAggregation(new TermsAggregationBuilder("terms").field("d")).get(); - assertNoFailures(r); + assertNoFailures(prepareSearch("cache_test_idx").setSize(0).addAggregation(new TermsAggregationBuilder("terms").field("d"))); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/FilterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/FilterIT.java index 5971e287882f2..b2098aee48b10 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/FilterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/FilterIT.java @@ -9,7 +9,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.aggregations.InternalAggregation; @@ -28,7 +27,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.avg; import static org.elasticsearch.search.aggregations.AggregationBuilders.filter; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -79,70 +78,66 @@ public void setupSuiteScopeCluster() throws Exception { } public void testSimple() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation(filter("tag1", termQuery("tag", "tag1"))).get(); - - assertNoFailures(response); - - Filter filter = response.getAggregations().get("tag1"); - assertThat(filter, notNullValue()); - assertThat(filter.getName(), equalTo("tag1")); - assertThat(filter.getDocCount(), equalTo((long) numTag1Docs)); + assertNoFailuresAndResponse(prepareSearch("idx").addAggregation(filter("tag1", termQuery("tag", "tag1"))), response -> { + Filter filter = response.getAggregations().get("tag1"); + assertThat(filter, notNullValue()); + assertThat(filter.getName(), equalTo("tag1")); + assertThat(filter.getDocCount(), equalTo((long) numTag1Docs)); + }); } // See NullPointer issue when filters are empty: // https://github.com/elastic/elasticsearch/issues/8438 public void testEmptyFilterDeclarations() throws Exception { QueryBuilder emptyFilter = new BoolQueryBuilder(); - SearchResponse response = prepareSearch("idx").addAggregation(filter("tag1", emptyFilter)).get(); - - assertNoFailures(response); - - Filter filter = response.getAggregations().get("tag1"); - assertThat(filter, notNullValue()); - assertThat(filter.getDocCount(), equalTo((long) numDocs)); + assertNoFailuresAndResponse(prepareSearch("idx").addAggregation(filter("tag1", emptyFilter)), response -> { + Filter filter = response.getAggregations().get("tag1"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo((long) numDocs)); + }); } public void testWithSubAggregation() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - filter("tag1", termQuery("tag", "tag1")).subAggregation(avg("avg_value").field("value")) - ).get(); - - assertNoFailures(response); - - Filter filter = response.getAggregations().get("tag1"); - assertThat(filter, notNullValue()); - assertThat(filter.getName(), equalTo("tag1")); - assertThat(filter.getDocCount(), equalTo((long) numTag1Docs)); - assertThat((long) ((InternalAggregation) filter).getProperty("_count"), equalTo((long) numTag1Docs)); - - long sum = 0; - for (int i = 0; i < numTag1Docs; ++i) { - sum += i + 1; - } - assertThat(filter.getAggregations().asList().isEmpty(), is(false)); - Avg avgValue = filter.getAggregations().get("avg_value"); - assertThat(avgValue, notNullValue()); - assertThat(avgValue.getName(), equalTo("avg_value")); - assertThat(avgValue.getValue(), equalTo((double) sum / numTag1Docs)); - assertThat((double) ((InternalAggregation) filter).getProperty("avg_value.value"), equalTo((double) sum / numTag1Docs)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation(filter("tag1", termQuery("tag", "tag1")).subAggregation(avg("avg_value").field("value"))), + response -> { + Filter filter = response.getAggregations().get("tag1"); + assertThat(filter, notNullValue()); + assertThat(filter.getName(), equalTo("tag1")); + assertThat(filter.getDocCount(), equalTo((long) numTag1Docs)); + assertThat((long) ((InternalAggregation) filter).getProperty("_count"), equalTo((long) numTag1Docs)); + + long sum = 0; + for (int i = 0; i < numTag1Docs; ++i) { + sum += i + 1; + } + assertThat(filter.getAggregations().asList().isEmpty(), is(false)); + Avg avgValue = filter.getAggregations().get("avg_value"); + assertThat(avgValue, notNullValue()); + assertThat(avgValue.getName(), equalTo("avg_value")); + assertThat(avgValue.getValue(), equalTo((double) sum / numTag1Docs)); + assertThat((double) ((InternalAggregation) filter).getProperty("avg_value.value"), equalTo((double) sum / numTag1Docs)); + } + ); } public void testAsSubAggregation() { - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").field("value").interval(2L).subAggregation(filter("filter", matchAllQuery())) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getBuckets().size(), greaterThanOrEqualTo(1)); - - for (Histogram.Bucket bucket : histo.getBuckets()) { - Filter filter = bucket.getAggregations().get("filter"); - assertThat(filter, notNullValue()); - assertEquals(bucket.getDocCount(), filter.getDocCount()); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").field("value").interval(2L).subAggregation(filter("filter", matchAllQuery())) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getBuckets().size(), greaterThanOrEqualTo(1)); + + for (Histogram.Bucket bucket : histo.getBuckets()) { + Filter filter = bucket.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertEquals(bucket.getDocCount(), filter.getDocCount()); + } + } + ); } public void testWithContextBasedSubAggregation() throws Exception { @@ -160,19 +155,23 @@ public void testWithContextBasedSubAggregation() throws Exception { } public void testEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) - .addAggregation(histogram("histo").field("value").interval(1L).minDocCount(0).subAggregation(filter("filter", matchAllQuery()))) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - Histogram histo = searchResponse.getAggregations().get("histo"); - assertThat(histo, Matchers.notNullValue()); - Histogram.Bucket bucket = histo.getBuckets().get(1); - assertThat(bucket, Matchers.notNullValue()); - - Filter filter = bucket.getAggregations().get("filter"); - assertThat(filter, Matchers.notNullValue()); - assertThat(filter.getName(), equalTo("filter")); - assertThat(filter.getDocCount(), is(0L)); + assertNoFailuresAndResponse( + prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value").interval(1L).minDocCount(0).subAggregation(filter("filter", matchAllQuery())) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, Matchers.notNullValue()); + Histogram.Bucket bucket = histo.getBuckets().get(1); + assertThat(bucket, Matchers.notNullValue()); + + Filter filter = bucket.getAggregations().get("filter"); + assertThat(filter, Matchers.notNullValue()); + assertThat(filter.getName(), equalTo("filter")); + assertThat(filter.getDocCount(), is(0L)); + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/FiltersIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/FiltersIT.java index fa8974371a935..664644a3a2632 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/FiltersIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/FiltersIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.aggregations.InternalAggregation; @@ -34,7 +33,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.avg; import static org.elasticsearch.search.aggregations.AggregationBuilders.filters; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -98,121 +97,125 @@ public void setupSuiteScopeCluster() throws Exception { } public void testSimple() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - filters( - "tags", - randomOrder(new KeyedFilter("tag1", termQuery("tag", "tag1")), new KeyedFilter("tag2", termQuery("tag", "tag2"))) - ) - ).get(); - - assertNoFailures(response); - - Filters filters = response.getAggregations().get("tags"); - assertThat(filters, notNullValue()); - assertThat(filters.getName(), equalTo("tags")); - - assertThat(filters.getBuckets().size(), equalTo(2)); - - Filters.Bucket bucket = filters.getBucketByKey("tag1"); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numTag1Docs)); - - bucket = filters.getBucketByKey("tag2"); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numTag2Docs)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + filters( + "tags", + randomOrder(new KeyedFilter("tag1", termQuery("tag", "tag1")), new KeyedFilter("tag2", termQuery("tag", "tag2"))) + ) + ), + response -> { + Filters filters = response.getAggregations().get("tags"); + assertThat(filters, notNullValue()); + assertThat(filters.getName(), equalTo("tags")); + + assertThat(filters.getBuckets().size(), equalTo(2)); + + Filters.Bucket bucket = filters.getBucketByKey("tag1"); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numTag1Docs)); + + bucket = filters.getBucketByKey("tag2"); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numTag2Docs)); + } + ); } // See NullPointer issue when filters are empty: // https://github.com/elastic/elasticsearch/issues/8438 public void testEmptyFilterDeclarations() throws Exception { QueryBuilder emptyFilter = new BoolQueryBuilder(); - SearchResponse response = prepareSearch("idx").addAggregation( - filters("tags", randomOrder(new KeyedFilter("all", emptyFilter), new KeyedFilter("tag1", termQuery("tag", "tag1")))) - ).get(); - - assertNoFailures(response); - - Filters filters = response.getAggregations().get("tags"); - assertThat(filters, notNullValue()); - Filters.Bucket allBucket = filters.getBucketByKey("all"); - assertThat(allBucket.getDocCount(), equalTo((long) numDocs)); - - Filters.Bucket bucket = filters.getBucketByKey("tag1"); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numTag1Docs)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + filters("tags", randomOrder(new KeyedFilter("all", emptyFilter), new KeyedFilter("tag1", termQuery("tag", "tag1")))) + ), + response -> { + Filters filters = response.getAggregations().get("tags"); + assertThat(filters, notNullValue()); + Filters.Bucket allBucket = filters.getBucketByKey("all"); + assertThat(allBucket.getDocCount(), equalTo((long) numDocs)); + + Filters.Bucket bucket = filters.getBucketByKey("tag1"); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numTag1Docs)); + } + ); } public void testWithSubAggregation() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - filters( - "tags", - randomOrder(new KeyedFilter("tag1", termQuery("tag", "tag1")), new KeyedFilter("tag2", termQuery("tag", "tag2"))) - ).subAggregation(avg("avg_value").field("value")) - ).get(); - - assertNoFailures(response); - - Filters filters = response.getAggregations().get("tags"); - assertThat(filters, notNullValue()); - assertThat(filters.getName(), equalTo("tags")); - - assertThat(filters.getBuckets().size(), equalTo(2)); - assertThat(((InternalAggregation) filters).getProperty("_bucket_count"), equalTo(2)); - Object[] propertiesKeys = (Object[]) ((InternalAggregation) filters).getProperty("_key"); - Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) filters).getProperty("_count"); - Object[] propertiesCounts = (Object[]) ((InternalAggregation) filters).getProperty("avg_value.value"); - - Filters.Bucket bucket = filters.getBucketByKey("tag1"); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numTag1Docs)); - long sum = 0; - for (int i = 0; i < numTag1Docs; ++i) { - sum += i + 1; - } - assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); - Avg avgValue = bucket.getAggregations().get("avg_value"); - assertThat(avgValue, notNullValue()); - assertThat(avgValue.getName(), equalTo("avg_value")); - assertThat(avgValue.getValue(), equalTo((double) sum / numTag1Docs)); - assertThat((String) propertiesKeys[0], equalTo("tag1")); - assertThat((long) propertiesDocCounts[0], equalTo((long) numTag1Docs)); - assertThat((double) propertiesCounts[0], equalTo((double) sum / numTag1Docs)); - - bucket = filters.getBucketByKey("tag2"); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numTag2Docs)); - sum = 0; - for (int i = numTag1Docs; i < (numTag1Docs + numTag2Docs); ++i) { - sum += i; - } - assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); - avgValue = bucket.getAggregations().get("avg_value"); - assertThat(avgValue, notNullValue()); - assertThat(avgValue.getName(), equalTo("avg_value")); - assertThat(avgValue.getValue(), equalTo((double) sum / numTag2Docs)); - assertThat(propertiesKeys[1], equalTo("tag2")); - assertThat(propertiesDocCounts[1], equalTo((long) numTag2Docs)); - assertThat(propertiesCounts[1], equalTo((double) sum / numTag2Docs)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + filters( + "tags", + randomOrder(new KeyedFilter("tag1", termQuery("tag", "tag1")), new KeyedFilter("tag2", termQuery("tag", "tag2"))) + ).subAggregation(avg("avg_value").field("value")) + ), + response -> { + Filters filters = response.getAggregations().get("tags"); + assertThat(filters, notNullValue()); + assertThat(filters.getName(), equalTo("tags")); + + assertThat(filters.getBuckets().size(), equalTo(2)); + assertThat(((InternalAggregation) filters).getProperty("_bucket_count"), equalTo(2)); + Object[] propertiesKeys = (Object[]) ((InternalAggregation) filters).getProperty("_key"); + Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) filters).getProperty("_count"); + Object[] propertiesCounts = (Object[]) ((InternalAggregation) filters).getProperty("avg_value.value"); + + Filters.Bucket bucket = filters.getBucketByKey("tag1"); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numTag1Docs)); + long sum = 0; + for (int i = 0; i < numTag1Docs; ++i) { + sum += i + 1; + } + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + Avg avgValue = bucket.getAggregations().get("avg_value"); + assertThat(avgValue, notNullValue()); + assertThat(avgValue.getName(), equalTo("avg_value")); + assertThat(avgValue.getValue(), equalTo((double) sum / numTag1Docs)); + assertThat((String) propertiesKeys[0], equalTo("tag1")); + assertThat((long) propertiesDocCounts[0], equalTo((long) numTag1Docs)); + assertThat((double) propertiesCounts[0], equalTo((double) sum / numTag1Docs)); + + bucket = filters.getBucketByKey("tag2"); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numTag2Docs)); + sum = 0; + for (int i = numTag1Docs; i < (numTag1Docs + numTag2Docs); ++i) { + sum += i; + } + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + avgValue = bucket.getAggregations().get("avg_value"); + assertThat(avgValue, notNullValue()); + assertThat(avgValue.getName(), equalTo("avg_value")); + assertThat(avgValue.getValue(), equalTo((double) sum / numTag2Docs)); + assertThat(propertiesKeys[1], equalTo("tag2")); + assertThat(propertiesDocCounts[1], equalTo((long) numTag2Docs)); + assertThat(propertiesCounts[1], equalTo((double) sum / numTag2Docs)); + } + ); } public void testAsSubAggregation() { - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").field("value").interval(2L).subAggregation(filters("filters", matchAllQuery())) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getBuckets().size(), greaterThanOrEqualTo(1)); - - for (Histogram.Bucket bucket : histo.getBuckets()) { - Filters filters = bucket.getAggregations().get("filters"); - assertThat(filters, notNullValue()); - assertThat(filters.getBuckets().size(), equalTo(1)); - Filters.Bucket filterBucket = filters.getBuckets().get(0); - assertEquals(bucket.getDocCount(), filterBucket.getDocCount()); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").field("value").interval(2L).subAggregation(filters("filters", matchAllQuery())) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getBuckets().size(), greaterThanOrEqualTo(1)); + + for (Histogram.Bucket bucket : histo.getBuckets()) { + Filters filters = bucket.getAggregations().get("filters"); + assertThat(filters, notNullValue()); + assertThat(filters.getBuckets().size(), equalTo(1)); + Filters.Bucket filterBucket = filters.getBuckets().get(0); + assertEquals(bucket.getDocCount(), filterBucket.getDocCount()); + } + } + ); } public void testWithContextBasedSubAggregation() throws Exception { @@ -236,232 +239,238 @@ public void testWithContextBasedSubAggregation() throws Exception { } public void testEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field("value") - .interval(1L) - .minDocCount(0) - .subAggregation(filters("filters", new KeyedFilter("all", matchAllQuery()))) - ) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - Histogram histo = searchResponse.getAggregations().get("histo"); - assertThat(histo, Matchers.notNullValue()); - Histogram.Bucket bucket = histo.getBuckets().get(1); - assertThat(bucket, Matchers.notNullValue()); - - Filters filters = bucket.getAggregations().get("filters"); - assertThat(filters, notNullValue()); - Filters.Bucket all = filters.getBucketByKey("all"); - assertThat(all, Matchers.notNullValue()); - assertThat(all.getKeyAsString(), equalTo("all")); - assertThat(all.getDocCount(), is(0L)); + assertNoFailuresAndResponse( + prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(1L) + .minDocCount(0) + .subAggregation(filters("filters", new KeyedFilter("all", matchAllQuery()))) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, Matchers.notNullValue()); + Histogram.Bucket bucket = histo.getBuckets().get(1); + assertThat(bucket, Matchers.notNullValue()); + + Filters filters = bucket.getAggregations().get("filters"); + assertThat(filters, notNullValue()); + Filters.Bucket all = filters.getBucketByKey("all"); + assertThat(all, Matchers.notNullValue()); + assertThat(all.getKeyAsString(), equalTo("all")); + assertThat(all.getDocCount(), is(0L)); + } + ); } public void testSimpleNonKeyed() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation(filters("tags", termQuery("tag", "tag1"), termQuery("tag", "tag2"))) - .get(); - - assertNoFailures(response); - - Filters filters = response.getAggregations().get("tags"); - assertThat(filters, notNullValue()); - assertThat(filters.getName(), equalTo("tags")); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation(filters("tags", termQuery("tag", "tag1"), termQuery("tag", "tag2"))), + response -> { + Filters filters = response.getAggregations().get("tags"); + assertThat(filters, notNullValue()); + assertThat(filters.getName(), equalTo("tags")); - assertThat(filters.getBuckets().size(), equalTo(2)); + assertThat(filters.getBuckets().size(), equalTo(2)); - Collection buckets = filters.getBuckets(); - Iterator itr = buckets.iterator(); + Collection buckets = filters.getBuckets(); + Iterator itr = buckets.iterator(); - Filters.Bucket bucket = itr.next(); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numTag1Docs)); + Filters.Bucket bucket = itr.next(); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numTag1Docs)); - bucket = itr.next(); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numTag2Docs)); + bucket = itr.next(); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numTag2Docs)); + } + ); } public void testOtherBucket() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - filters( - "tags", - randomOrder(new KeyedFilter("tag1", termQuery("tag", "tag1")), new KeyedFilter("tag2", termQuery("tag", "tag2"))) - ).otherBucket(true) - ).get(); - - assertNoFailures(response); - - Filters filters = response.getAggregations().get("tags"); - assertThat(filters, notNullValue()); - assertThat(filters.getName(), equalTo("tags")); - - assertThat(filters.getBuckets().size(), equalTo(3)); - - Filters.Bucket bucket = filters.getBucketByKey("tag1"); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numTag1Docs)); - - bucket = filters.getBucketByKey("tag2"); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numTag2Docs)); - - bucket = filters.getBucketByKey("_other_"); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numOtherDocs)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + filters( + "tags", + randomOrder(new KeyedFilter("tag1", termQuery("tag", "tag1")), new KeyedFilter("tag2", termQuery("tag", "tag2"))) + ).otherBucket(true) + ), + response -> { + Filters filters = response.getAggregations().get("tags"); + assertThat(filters, notNullValue()); + assertThat(filters.getName(), equalTo("tags")); + + assertThat(filters.getBuckets().size(), equalTo(3)); + + Filters.Bucket bucket = filters.getBucketByKey("tag1"); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numTag1Docs)); + + bucket = filters.getBucketByKey("tag2"); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numTag2Docs)); + + bucket = filters.getBucketByKey("_other_"); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numOtherDocs)); + } + ); } public void testOtherNamedBucket() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - filters( - "tags", - randomOrder(new KeyedFilter("tag1", termQuery("tag", "tag1")), new KeyedFilter("tag2", termQuery("tag", "tag2"))) - ).otherBucket(true).otherBucketKey("foobar") - ).get(); - - assertNoFailures(response); - - Filters filters = response.getAggregations().get("tags"); - assertThat(filters, notNullValue()); - assertThat(filters.getName(), equalTo("tags")); - - assertThat(filters.getBuckets().size(), equalTo(3)); - - Filters.Bucket bucket = filters.getBucketByKey("tag1"); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numTag1Docs)); - - bucket = filters.getBucketByKey("tag2"); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numTag2Docs)); - - bucket = filters.getBucketByKey("foobar"); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numOtherDocs)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + filters( + "tags", + randomOrder(new KeyedFilter("tag1", termQuery("tag", "tag1")), new KeyedFilter("tag2", termQuery("tag", "tag2"))) + ).otherBucket(true).otherBucketKey("foobar") + ), + response -> { + Filters filters = response.getAggregations().get("tags"); + assertThat(filters, notNullValue()); + assertThat(filters.getName(), equalTo("tags")); + + assertThat(filters.getBuckets().size(), equalTo(3)); + + Filters.Bucket bucket = filters.getBucketByKey("tag1"); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numTag1Docs)); + + bucket = filters.getBucketByKey("tag2"); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numTag2Docs)); + + bucket = filters.getBucketByKey("foobar"); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numOtherDocs)); + } + ); } public void testOtherNonKeyed() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - filters("tags", termQuery("tag", "tag1"), termQuery("tag", "tag2")).otherBucket(true) - ).get(); - - assertNoFailures(response); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation(filters("tags", termQuery("tag", "tag1"), termQuery("tag", "tag2")).otherBucket(true)), + response -> { + Filters filters = response.getAggregations().get("tags"); + assertThat(filters, notNullValue()); + assertThat(filters.getName(), equalTo("tags")); - Filters filters = response.getAggregations().get("tags"); - assertThat(filters, notNullValue()); - assertThat(filters.getName(), equalTo("tags")); + assertThat(filters.getBuckets().size(), equalTo(3)); - assertThat(filters.getBuckets().size(), equalTo(3)); + Collection buckets = filters.getBuckets(); + Iterator itr = buckets.iterator(); - Collection buckets = filters.getBuckets(); - Iterator itr = buckets.iterator(); + Filters.Bucket bucket = itr.next(); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numTag1Docs)); - Filters.Bucket bucket = itr.next(); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numTag1Docs)); + bucket = itr.next(); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numTag2Docs)); - bucket = itr.next(); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numTag2Docs)); - - bucket = itr.next(); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numOtherDocs)); + bucket = itr.next(); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numOtherDocs)); + } + ); } public void testOtherWithSubAggregation() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - filters( - "tags", - randomOrder(new KeyedFilter("tag1", termQuery("tag", "tag1")), new KeyedFilter("tag2", termQuery("tag", "tag2"))) - ).otherBucket(true).subAggregation(avg("avg_value").field("value")) - ).get(); - - assertNoFailures(response); - - Filters filters = response.getAggregations().get("tags"); - assertThat(filters, notNullValue()); - assertThat(filters.getName(), equalTo("tags")); - - assertThat(filters.getBuckets().size(), equalTo(3)); - assertThat(((InternalAggregation) filters).getProperty("_bucket_count"), equalTo(3)); - Object[] propertiesKeys = (Object[]) ((InternalAggregation) filters).getProperty("_key"); - Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) filters).getProperty("_count"); - Object[] propertiesCounts = (Object[]) ((InternalAggregation) filters).getProperty("avg_value.value"); - - Filters.Bucket bucket = filters.getBucketByKey("tag1"); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numTag1Docs)); - long sum = 0; - for (int i = 0; i < numTag1Docs; ++i) { - sum += i + 1; - } - assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); - Avg avgValue = bucket.getAggregations().get("avg_value"); - assertThat(avgValue, notNullValue()); - assertThat(avgValue.getName(), equalTo("avg_value")); - assertThat(avgValue.getValue(), equalTo((double) sum / numTag1Docs)); - assertThat(propertiesKeys[0], equalTo("tag1")); - assertThat(propertiesDocCounts[0], equalTo((long) numTag1Docs)); - assertThat(propertiesCounts[0], equalTo((double) sum / numTag1Docs)); - - bucket = filters.getBucketByKey("tag2"); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numTag2Docs)); - sum = 0; - for (int i = numTag1Docs; i < (numTag1Docs + numTag2Docs); ++i) { - sum += i; - } - assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); - avgValue = bucket.getAggregations().get("avg_value"); - assertThat(avgValue, notNullValue()); - assertThat(avgValue.getName(), equalTo("avg_value")); - assertThat(avgValue.getValue(), equalTo((double) sum / numTag2Docs)); - assertThat(propertiesKeys[1], equalTo("tag2")); - assertThat(propertiesDocCounts[1], equalTo((long) numTag2Docs)); - assertThat(propertiesCounts[1], equalTo((double) sum / numTag2Docs)); - - bucket = filters.getBucketByKey("_other_"); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numOtherDocs)); - sum = 0; - for (int i = numTag1Docs + numTag2Docs; i < numDocs; ++i) { - sum += i; - } - assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); - avgValue = bucket.getAggregations().get("avg_value"); - assertThat(avgValue, notNullValue()); - assertThat(avgValue.getName(), equalTo("avg_value")); - assertThat(avgValue.getValue(), equalTo((double) sum / numOtherDocs)); - assertThat(propertiesKeys[2], equalTo("_other_")); - assertThat(propertiesDocCounts[2], equalTo((long) numOtherDocs)); - assertThat(propertiesCounts[2], equalTo((double) sum / numOtherDocs)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + filters( + "tags", + randomOrder(new KeyedFilter("tag1", termQuery("tag", "tag1")), new KeyedFilter("tag2", termQuery("tag", "tag2"))) + ).otherBucket(true).subAggregation(avg("avg_value").field("value")) + ), + response -> { + Filters filters = response.getAggregations().get("tags"); + assertThat(filters, notNullValue()); + assertThat(filters.getName(), equalTo("tags")); + + assertThat(filters.getBuckets().size(), equalTo(3)); + assertThat(((InternalAggregation) filters).getProperty("_bucket_count"), equalTo(3)); + Object[] propertiesKeys = (Object[]) ((InternalAggregation) filters).getProperty("_key"); + Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) filters).getProperty("_count"); + Object[] propertiesCounts = (Object[]) ((InternalAggregation) filters).getProperty("avg_value.value"); + + Filters.Bucket bucket = filters.getBucketByKey("tag1"); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numTag1Docs)); + long sum = 0; + for (int i = 0; i < numTag1Docs; ++i) { + sum += i + 1; + } + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + Avg avgValue = bucket.getAggregations().get("avg_value"); + assertThat(avgValue, notNullValue()); + assertThat(avgValue.getName(), equalTo("avg_value")); + assertThat(avgValue.getValue(), equalTo((double) sum / numTag1Docs)); + assertThat(propertiesKeys[0], equalTo("tag1")); + assertThat(propertiesDocCounts[0], equalTo((long) numTag1Docs)); + assertThat(propertiesCounts[0], equalTo((double) sum / numTag1Docs)); + + bucket = filters.getBucketByKey("tag2"); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numTag2Docs)); + sum = 0; + for (int i = numTag1Docs; i < (numTag1Docs + numTag2Docs); ++i) { + sum += i; + } + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + avgValue = bucket.getAggregations().get("avg_value"); + assertThat(avgValue, notNullValue()); + assertThat(avgValue.getName(), equalTo("avg_value")); + assertThat(avgValue.getValue(), equalTo((double) sum / numTag2Docs)); + assertThat(propertiesKeys[1], equalTo("tag2")); + assertThat(propertiesDocCounts[1], equalTo((long) numTag2Docs)); + assertThat(propertiesCounts[1], equalTo((double) sum / numTag2Docs)); + + bucket = filters.getBucketByKey("_other_"); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numOtherDocs)); + sum = 0; + for (int i = numTag1Docs + numTag2Docs; i < numDocs; ++i) { + sum += i; + } + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + avgValue = bucket.getAggregations().get("avg_value"); + assertThat(avgValue, notNullValue()); + assertThat(avgValue.getName(), equalTo("avg_value")); + assertThat(avgValue.getValue(), equalTo((double) sum / numOtherDocs)); + assertThat(propertiesKeys[2], equalTo("_other_")); + assertThat(propertiesDocCounts[2], equalTo((long) numOtherDocs)); + assertThat(propertiesCounts[2], equalTo((double) sum / numOtherDocs)); + } + ); } public void testEmptyAggregationWithOtherBucket() throws Exception { - SearchResponse searchResponse = prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field("value") - .interval(1L) - .minDocCount(0) - .subAggregation(filters("filters", new KeyedFilter("foo", matchAllQuery())).otherBucket(true).otherBucketKey("bar")) - ) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - Histogram histo = searchResponse.getAggregations().get("histo"); - assertThat(histo, Matchers.notNullValue()); - Histogram.Bucket bucket = histo.getBuckets().get(1); - assertThat(bucket, Matchers.notNullValue()); - - Filters filters = bucket.getAggregations().get("filters"); - assertThat(filters, notNullValue()); - - Filters.Bucket other = filters.getBucketByKey("bar"); - assertThat(other, Matchers.notNullValue()); - assertThat(other.getKeyAsString(), equalTo("bar")); - assertThat(other.getDocCount(), is(0L)); + assertNoFailuresAndResponse( + prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(1L) + .minDocCount(0) + .subAggregation(filters("filters", new KeyedFilter("foo", matchAllQuery())).otherBucket(true).otherBucketKey("bar")) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, Matchers.notNullValue()); + Histogram.Bucket bucket = histo.getBuckets().get(1); + assertThat(bucket, Matchers.notNullValue()); + + Filters filters = bucket.getAggregations().get("filters"); + assertThat(filters, notNullValue()); + + Filters.Bucket other = filters.getBucketByKey("bar"); + assertThat(other, Matchers.notNullValue()); + assertThat(other.getKeyAsString(), equalTo("bar")); + assertThat(other.getDocCount(), is(0L)); + } + ); } private static KeyedFilter[] randomOrder(KeyedFilter... filters) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java index 7639445f1f5ac..bb895c2564d39 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java @@ -9,7 +9,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.settings.Settings; @@ -39,7 +38,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.geoDistance; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -142,292 +141,298 @@ public void testSimple() throws Exception { for (Consumer range : ranges) { range.accept(builder); } - SearchResponse response = prepareSearch("idx").addAggregation(builder).get(); - - assertNoFailures(response); - - Range geoDist = response.getAggregations().get("amsterdam_rings"); - assertThat(geoDist, notNullValue()); - assertThat(geoDist.getName(), equalTo("amsterdam_rings")); - List buckets = geoDist.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("*-500.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(0.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(500.0)); - assertThat(bucket.getFromAsString(), equalTo("0.0")); - assertThat(bucket.getToAsString(), equalTo("500.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("500.0-1000.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(500.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(1000.0)); - assertThat(bucket.getFromAsString(), equalTo("500.0")); - assertThat(bucket.getToAsString(), equalTo("1000.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("1000.0-*")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(1000.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("1000.0")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(1L)); + assertNoFailuresAndResponse(prepareSearch("idx").addAggregation(builder), response -> { + Range geoDist = response.getAggregations().get("amsterdam_rings"); + assertThat(geoDist, notNullValue()); + assertThat(geoDist.getName(), equalTo("amsterdam_rings")); + List buckets = geoDist.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("*-500.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(0.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(500.0)); + assertThat(bucket.getFromAsString(), equalTo("0.0")); + assertThat(bucket.getToAsString(), equalTo("500.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("500.0-1000.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(500.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(1000.0)); + assertThat(bucket.getFromAsString(), equalTo("500.0")); + assertThat(bucket.getToAsString(), equalTo("1000.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("1000.0-*")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(1000.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("1000.0")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(1L)); + }); } public void testSimpleWithCustomKeys() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - geoDistance("amsterdam_rings", new GeoPoint(52.3760, 4.894)).field("location") - .unit(DistanceUnit.KILOMETERS) - .addUnboundedTo("ring1", 500) - .addRange("ring2", 500, 1000) - .addUnboundedFrom("ring3", 1000) - ).get(); - - assertNoFailures(response); - - Range geoDist = response.getAggregations().get("amsterdam_rings"); - assertThat(geoDist, notNullValue()); - assertThat(geoDist.getName(), equalTo("amsterdam_rings")); - List buckets = geoDist.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("ring1")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(0.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(500.0)); - assertThat(bucket.getFromAsString(), equalTo("0.0")); - assertThat(bucket.getToAsString(), equalTo("500.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("ring2")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(500.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(1000.0)); - assertThat(bucket.getFromAsString(), equalTo("500.0")); - assertThat(bucket.getToAsString(), equalTo("1000.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("ring3")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(1000.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("1000.0")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(1L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + geoDistance("amsterdam_rings", new GeoPoint(52.3760, 4.894)).field("location") + .unit(DistanceUnit.KILOMETERS) + .addUnboundedTo("ring1", 500) + .addRange("ring2", 500, 1000) + .addUnboundedFrom("ring3", 1000) + ), + response -> { + Range geoDist = response.getAggregations().get("amsterdam_rings"); + assertThat(geoDist, notNullValue()); + assertThat(geoDist.getName(), equalTo("amsterdam_rings")); + List buckets = geoDist.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("ring1")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(0.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(500.0)); + assertThat(bucket.getFromAsString(), equalTo("0.0")); + assertThat(bucket.getToAsString(), equalTo("500.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("ring2")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(500.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(1000.0)); + assertThat(bucket.getFromAsString(), equalTo("500.0")); + assertThat(bucket.getToAsString(), equalTo("1000.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("ring3")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(1000.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("1000.0")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + ); } public void testUnmapped() throws Exception { clusterAdmin().prepareHealth("idx_unmapped").setWaitForYellowStatus().get(); - SearchResponse response = prepareSearch("idx_unmapped").addAggregation( - geoDistance("amsterdam_rings", new GeoPoint(52.3760, 4.894)).field("location") - .unit(DistanceUnit.KILOMETERS) - .addUnboundedTo(500) - .addRange(500, 1000) - .addUnboundedFrom(1000) - ).get(); - - assertNoFailures(response); - - Range geoDist = response.getAggregations().get("amsterdam_rings"); - assertThat(geoDist, notNullValue()); - assertThat(geoDist.getName(), equalTo("amsterdam_rings")); - List buckets = geoDist.getBuckets(); - assertThat(geoDist.getBuckets().size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("*-500.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(0.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(500.0)); - assertThat(bucket.getFromAsString(), equalTo("0.0")); - assertThat(bucket.getToAsString(), equalTo("500.0")); - assertThat(bucket.getDocCount(), equalTo(0L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("500.0-1000.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(500.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(1000.0)); - assertThat(bucket.getFromAsString(), equalTo("500.0")); - assertThat(bucket.getToAsString(), equalTo("1000.0")); - assertThat(bucket.getDocCount(), equalTo(0L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("1000.0-*")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(1000.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("1000.0")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(0L)); + assertNoFailuresAndResponse( + prepareSearch("idx_unmapped").addAggregation( + geoDistance("amsterdam_rings", new GeoPoint(52.3760, 4.894)).field("location") + .unit(DistanceUnit.KILOMETERS) + .addUnboundedTo(500) + .addRange(500, 1000) + .addUnboundedFrom(1000) + ), + response -> { + Range geoDist = response.getAggregations().get("amsterdam_rings"); + assertThat(geoDist, notNullValue()); + assertThat(geoDist.getName(), equalTo("amsterdam_rings")); + List buckets = geoDist.getBuckets(); + assertThat(geoDist.getBuckets().size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("*-500.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(0.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(500.0)); + assertThat(bucket.getFromAsString(), equalTo("0.0")); + assertThat(bucket.getToAsString(), equalTo("500.0")); + assertThat(bucket.getDocCount(), equalTo(0L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("500.0-1000.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(500.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(1000.0)); + assertThat(bucket.getFromAsString(), equalTo("500.0")); + assertThat(bucket.getToAsString(), equalTo("1000.0")); + assertThat(bucket.getDocCount(), equalTo(0L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("1000.0-*")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(1000.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("1000.0")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(0L)); + } + ); } public void testPartiallyUnmapped() throws Exception { - SearchResponse response = prepareSearch("idx", "idx_unmapped").addAggregation( - geoDistance("amsterdam_rings", new GeoPoint(52.3760, 4.894)).field("location") - .unit(DistanceUnit.KILOMETERS) - .addUnboundedTo(500) - .addRange(500, 1000) - .addUnboundedFrom(1000) - ).get(); - - assertNoFailures(response); - - Range geoDist = response.getAggregations().get("amsterdam_rings"); - assertThat(geoDist, notNullValue()); - assertThat(geoDist.getName(), equalTo("amsterdam_rings")); - List buckets = geoDist.getBuckets(); - assertThat(geoDist.getBuckets().size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("*-500.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(0.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(500.0)); - assertThat(bucket.getFromAsString(), equalTo("0.0")); - assertThat(bucket.getToAsString(), equalTo("500.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("500.0-1000.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(500.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(1000.0)); - assertThat(bucket.getFromAsString(), equalTo("500.0")); - assertThat(bucket.getToAsString(), equalTo("1000.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("1000.0-*")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(1000.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("1000.0")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(1L)); + assertNoFailuresAndResponse( + prepareSearch("idx", "idx_unmapped").addAggregation( + geoDistance("amsterdam_rings", new GeoPoint(52.3760, 4.894)).field("location") + .unit(DistanceUnit.KILOMETERS) + .addUnboundedTo(500) + .addRange(500, 1000) + .addUnboundedFrom(1000) + ), + response -> { + Range geoDist = response.getAggregations().get("amsterdam_rings"); + assertThat(geoDist, notNullValue()); + assertThat(geoDist.getName(), equalTo("amsterdam_rings")); + List buckets = geoDist.getBuckets(); + assertThat(geoDist.getBuckets().size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("*-500.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(0.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(500.0)); + assertThat(bucket.getFromAsString(), equalTo("0.0")); + assertThat(bucket.getToAsString(), equalTo("500.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("500.0-1000.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(500.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(1000.0)); + assertThat(bucket.getFromAsString(), equalTo("500.0")); + assertThat(bucket.getToAsString(), equalTo("1000.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("1000.0-*")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(1000.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("1000.0")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + ); } public void testWithSubAggregation() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - geoDistance("amsterdam_rings", new GeoPoint(52.3760, 4.894)).field("location") - .unit(DistanceUnit.KILOMETERS) - .addUnboundedTo(500) - .addRange(500, 1000) - .addUnboundedFrom(1000) - .subAggregation(terms("cities").field("city").collectMode(randomFrom(SubAggCollectionMode.values()))) - ).get(); - - assertNoFailures(response); - - Range geoDist = response.getAggregations().get("amsterdam_rings"); - assertThat(geoDist, notNullValue()); - assertThat(geoDist.getName(), equalTo("amsterdam_rings")); - List buckets = geoDist.getBuckets(); - assertThat(geoDist.getBuckets().size(), equalTo(3)); - assertThat(((InternalAggregation) geoDist).getProperty("_bucket_count"), equalTo(3)); - Object[] propertiesKeys = (Object[]) ((InternalAggregation) geoDist).getProperty("_key"); - Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) geoDist).getProperty("_count"); - Object[] propertiesCities = (Object[]) ((InternalAggregation) geoDist).getProperty("cities"); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("*-500.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(0.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(500.0)); - assertThat(bucket.getFromAsString(), equalTo("0.0")); - assertThat(bucket.getToAsString(), equalTo("500.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); - Terms cities = bucket.getAggregations().get("cities"); - assertThat(cities, Matchers.notNullValue()); - Set names = new HashSet<>(); - for (Terms.Bucket city : cities.getBuckets()) { - names.add(city.getKeyAsString()); - } - assertThat(names.contains("utrecht") && names.contains("haarlem"), is(true)); - assertThat((String) propertiesKeys[0], equalTo("*-500.0")); - assertThat((long) propertiesDocCounts[0], equalTo(2L)); - assertThat((Terms) propertiesCities[0], sameInstance(cities)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("500.0-1000.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(500.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(1000.0)); - assertThat(bucket.getFromAsString(), equalTo("500.0")); - assertThat(bucket.getToAsString(), equalTo("1000.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); - cities = bucket.getAggregations().get("cities"); - assertThat(cities, Matchers.notNullValue()); - names = new HashSet<>(); - for (Terms.Bucket city : cities.getBuckets()) { - names.add(city.getKeyAsString()); - } - assertThat(names.contains("berlin") && names.contains("prague"), is(true)); - assertThat((String) propertiesKeys[1], equalTo("500.0-1000.0")); - assertThat((long) propertiesDocCounts[1], equalTo(2L)); - assertThat((Terms) propertiesCities[1], sameInstance(cities)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("1000.0-*")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(1000.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("1000.0")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(1L)); - assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); - cities = bucket.getAggregations().get("cities"); - assertThat(cities, Matchers.notNullValue()); - names = new HashSet<>(); - for (Terms.Bucket city : cities.getBuckets()) { - names.add(city.getKeyAsString()); - } - assertThat(names.contains("tel-aviv"), is(true)); - assertThat((String) propertiesKeys[2], equalTo("1000.0-*")); - assertThat((long) propertiesDocCounts[2], equalTo(1L)); - assertThat((Terms) propertiesCities[2], sameInstance(cities)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + geoDistance("amsterdam_rings", new GeoPoint(52.3760, 4.894)).field("location") + .unit(DistanceUnit.KILOMETERS) + .addUnboundedTo(500) + .addRange(500, 1000) + .addUnboundedFrom(1000) + .subAggregation(terms("cities").field("city").collectMode(randomFrom(SubAggCollectionMode.values()))) + ), + response -> { + Range geoDist = response.getAggregations().get("amsterdam_rings"); + assertThat(geoDist, notNullValue()); + assertThat(geoDist.getName(), equalTo("amsterdam_rings")); + List buckets = geoDist.getBuckets(); + assertThat(geoDist.getBuckets().size(), equalTo(3)); + assertThat(((InternalAggregation) geoDist).getProperty("_bucket_count"), equalTo(3)); + Object[] propertiesKeys = (Object[]) ((InternalAggregation) geoDist).getProperty("_key"); + Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) geoDist).getProperty("_count"); + Object[] propertiesCities = (Object[]) ((InternalAggregation) geoDist).getProperty("cities"); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("*-500.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(0.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(500.0)); + assertThat(bucket.getFromAsString(), equalTo("0.0")); + assertThat(bucket.getToAsString(), equalTo("500.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + Terms cities = bucket.getAggregations().get("cities"); + assertThat(cities, Matchers.notNullValue()); + Set names = new HashSet<>(); + for (Terms.Bucket city : cities.getBuckets()) { + names.add(city.getKeyAsString()); + } + assertThat(names.contains("utrecht") && names.contains("haarlem"), is(true)); + assertThat((String) propertiesKeys[0], equalTo("*-500.0")); + assertThat((long) propertiesDocCounts[0], equalTo(2L)); + assertThat((Terms) propertiesCities[0], sameInstance(cities)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("500.0-1000.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(500.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(1000.0)); + assertThat(bucket.getFromAsString(), equalTo("500.0")); + assertThat(bucket.getToAsString(), equalTo("1000.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + cities = bucket.getAggregations().get("cities"); + assertThat(cities, Matchers.notNullValue()); + names = new HashSet<>(); + for (Terms.Bucket city : cities.getBuckets()) { + names.add(city.getKeyAsString()); + } + assertThat(names.contains("berlin") && names.contains("prague"), is(true)); + assertThat((String) propertiesKeys[1], equalTo("500.0-1000.0")); + assertThat((long) propertiesDocCounts[1], equalTo(2L)); + assertThat((Terms) propertiesCities[1], sameInstance(cities)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("1000.0-*")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(1000.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("1000.0")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(1L)); + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + cities = bucket.getAggregations().get("cities"); + assertThat(cities, Matchers.notNullValue()); + names = new HashSet<>(); + for (Terms.Bucket city : cities.getBuckets()) { + names.add(city.getKeyAsString()); + } + assertThat(names.contains("tel-aviv"), is(true)); + assertThat((String) propertiesKeys[2], equalTo("1000.0-*")); + assertThat((long) propertiesDocCounts[2], equalTo(1L)); + assertThat((Terms) propertiesCities[2], sameInstance(cities)); + } + ); } public void testEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field("value") - .interval(1L) - .minDocCount(0) - .subAggregation(geoDistance("geo_dist", new GeoPoint(52.3760, 4.894)).field("location").addRange("0-100", 0.0, 100.0)) - ) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - Histogram histo = searchResponse.getAggregations().get("histo"); - assertThat(histo, Matchers.notNullValue()); - Histogram.Bucket bucket = histo.getBuckets().get(1); - assertThat(bucket, Matchers.notNullValue()); - - Range geoDistance = bucket.getAggregations().get("geo_dist"); - // TODO: use diamond once JI-9019884 is fixed - List buckets = new ArrayList<>(geoDistance.getBuckets()); - assertThat(geoDistance, Matchers.notNullValue()); - assertThat(geoDistance.getName(), equalTo("geo_dist")); - assertThat(buckets.size(), is(1)); - assertThat((String) buckets.get(0).getKey(), equalTo("0-100")); - assertThat(((Number) buckets.get(0).getFrom()).doubleValue(), equalTo(0.0)); - assertThat(((Number) buckets.get(0).getTo()).doubleValue(), equalTo(100.0)); - assertThat(buckets.get(0).getFromAsString(), equalTo("0.0")); - assertThat(buckets.get(0).getToAsString(), equalTo("100.0")); - assertThat(buckets.get(0).getDocCount(), equalTo(0L)); + assertNoFailuresAndResponse( + prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(1L) + .minDocCount(0) + .subAggregation( + geoDistance("geo_dist", new GeoPoint(52.3760, 4.894)).field("location").addRange("0-100", 0.0, 100.0) + ) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, Matchers.notNullValue()); + Histogram.Bucket bucket = histo.getBuckets().get(1); + assertThat(bucket, Matchers.notNullValue()); + + Range geoDistance = bucket.getAggregations().get("geo_dist"); + // TODO: use diamond once JI-9019884 is fixed + List buckets = new ArrayList<>(geoDistance.getBuckets()); + assertThat(geoDistance, Matchers.notNullValue()); + assertThat(geoDistance.getName(), equalTo("geo_dist")); + assertThat(buckets.size(), is(1)); + assertThat((String) buckets.get(0).getKey(), equalTo("0-100")); + assertThat(((Number) buckets.get(0).getFrom()).doubleValue(), equalTo(0.0)); + assertThat(((Number) buckets.get(0).getTo()).doubleValue(), equalTo(100.0)); + assertThat(buckets.get(0).getFromAsString(), equalTo("0.0")); + assertThat(buckets.get(0).getToAsString(), equalTo("100.0")); + assertThat(buckets.get(0).getDocCount(), equalTo(0L)); + } + ); } public void testNoRangesInQuery() { @@ -442,49 +447,50 @@ public void testNoRangesInQuery() { } public void testMultiValues() throws Exception { - SearchResponse response = prepareSearch("idx-multi").addAggregation( - geoDistance("amsterdam_rings", new GeoPoint(52.3760, 4.894)).field("location") - .unit(DistanceUnit.KILOMETERS) - .distanceType(org.elasticsearch.common.geo.GeoDistance.ARC) - .addUnboundedTo(500) - .addRange(500, 1000) - .addUnboundedFrom(1000) - ).get(); - - assertNoFailures(response); - - Range geoDist = response.getAggregations().get("amsterdam_rings"); - assertThat(geoDist, notNullValue()); - assertThat(geoDist.getName(), equalTo("amsterdam_rings")); - List buckets = geoDist.getBuckets(); - assertThat(geoDist.getBuckets().size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("*-500.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(0.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(500.0)); - assertThat(bucket.getFromAsString(), equalTo("0.0")); - assertThat(bucket.getToAsString(), equalTo("500.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("500.0-1000.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(500.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(1000.0)); - assertThat(bucket.getFromAsString(), equalTo("500.0")); - assertThat(bucket.getToAsString(), equalTo("1000.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("1000.0-*")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(1000.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("1000.0")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(1L)); + assertNoFailuresAndResponse( + prepareSearch("idx-multi").addAggregation( + geoDistance("amsterdam_rings", new GeoPoint(52.3760, 4.894)).field("location") + .unit(DistanceUnit.KILOMETERS) + .distanceType(org.elasticsearch.common.geo.GeoDistance.ARC) + .addUnboundedTo(500) + .addRange(500, 1000) + .addUnboundedFrom(1000) + ), + response -> { + Range geoDist = response.getAggregations().get("amsterdam_rings"); + assertThat(geoDist, notNullValue()); + assertThat(geoDist.getName(), equalTo("amsterdam_rings")); + List buckets = geoDist.getBuckets(); + assertThat(geoDist.getBuckets().size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("*-500.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(0.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(500.0)); + assertThat(bucket.getFromAsString(), equalTo("0.0")); + assertThat(bucket.getToAsString(), equalTo("500.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("500.0-1000.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(500.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(1000.0)); + assertThat(bucket.getFromAsString(), equalTo("500.0")); + assertThat(bucket.getToAsString(), equalTo("1000.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("1000.0-*")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(1000.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("1000.0")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java index 1cd8d5bc2fc3d..dcb56eeb10385 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.settings.Settings; @@ -36,7 +35,7 @@ import static org.elasticsearch.geometry.utils.Geohash.stringEncode; import static org.elasticsearch.search.aggregations.AggregationBuilders.geohashGrid; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -132,47 +131,47 @@ public void setupSuiteScopeCluster() throws Exception { public void testSimple() throws Exception { for (int precision = 1; precision <= PRECISION; precision++) { - SearchResponse response = prepareSearch("idx").addAggregation(geohashGrid("geohashgrid").field("location").precision(precision)) - .get(); - - assertNoFailures(response); - - GeoGrid geoGrid = response.getAggregations().get("geohashgrid"); - List buckets = geoGrid.getBuckets(); - Object[] propertiesKeys = (Object[]) ((InternalAggregation) geoGrid).getProperty("_key"); - Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) geoGrid).getProperty("_count"); - for (int i = 0; i < buckets.size(); i++) { - GeoGrid.Bucket cell = buckets.get(i); - String geohash = cell.getKeyAsString(); - - long bucketCount = cell.getDocCount(); - int expectedBucketCount = expectedDocCountsForGeoHash.get(geohash); - assertNotSame(bucketCount, 0); - assertEquals("Geohash " + geohash + " has wrong doc count ", expectedBucketCount, bucketCount); - GeoPoint geoPoint = (GeoPoint) propertiesKeys[i]; - assertThat(stringEncode(geoPoint.lon(), geoPoint.lat(), precision), equalTo(geohash)); - assertThat((long) propertiesDocCounts[i], equalTo(bucketCount)); - } + final int finalPrecision = precision; + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation(geohashGrid("geohashgrid").field("location").precision(precision)), + response -> { + GeoGrid geoGrid = response.getAggregations().get("geohashgrid"); + List buckets = geoGrid.getBuckets(); + Object[] propertiesKeys = (Object[]) ((InternalAggregation) geoGrid).getProperty("_key"); + Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) geoGrid).getProperty("_count"); + for (int i = 0; i < buckets.size(); i++) { + GeoGrid.Bucket cell = buckets.get(i); + String geohash = cell.getKeyAsString(); + + long bucketCount = cell.getDocCount(); + int expectedBucketCount = expectedDocCountsForGeoHash.get(geohash); + assertNotSame(bucketCount, 0); + assertEquals("Geohash " + geohash + " has wrong doc count ", expectedBucketCount, bucketCount); + GeoPoint geoPoint = (GeoPoint) propertiesKeys[i]; + assertThat(stringEncode(geoPoint.lon(), geoPoint.lat(), finalPrecision), equalTo(geohash)); + assertThat((long) propertiesDocCounts[i], equalTo(bucketCount)); + } + } + ); } } public void testMultivalued() throws Exception { for (int precision = 1; precision <= PRECISION; precision++) { - SearchResponse response = prepareSearch("multi_valued_idx").addAggregation( - geohashGrid("geohashgrid").field("location").precision(precision) - ).get(); - - assertNoFailures(response); - - GeoGrid geoGrid = response.getAggregations().get("geohashgrid"); - for (GeoGrid.Bucket cell : geoGrid.getBuckets()) { - String geohash = cell.getKeyAsString(); - - long bucketCount = cell.getDocCount(); - int expectedBucketCount = multiValuedExpectedDocCountsForGeoHash.get(geohash); - assertNotSame(bucketCount, 0); - assertEquals("Geohash " + geohash + " has wrong doc count ", expectedBucketCount, bucketCount); - } + assertNoFailuresAndResponse( + prepareSearch("multi_valued_idx").addAggregation(geohashGrid("geohashgrid").field("location").precision(precision)), + response -> { + GeoGrid geoGrid = response.getAggregations().get("geohashgrid"); + for (GeoGrid.Bucket cell : geoGrid.getBuckets()) { + String geohash = cell.getKeyAsString(); + + long bucketCount = cell.getDocCount(); + int expectedBucketCount = multiValuedExpectedDocCountsForGeoHash.get(geohash); + assertNotSame(bucketCount, 0); + assertEquals("Geohash " + geohash + " has wrong doc count ", expectedBucketCount, bucketCount); + } + } + ); } } @@ -180,85 +179,85 @@ public void testFiltered() throws Exception { GeoBoundingBoxQueryBuilder bbox = new GeoBoundingBoxQueryBuilder("location"); bbox.setCorners(smallestGeoHash).queryName("bbox"); for (int precision = 1; precision <= PRECISION; precision++) { - SearchResponse response = prepareSearch("idx").addAggregation( - AggregationBuilders.filter("filtered", bbox) - .subAggregation(geohashGrid("geohashgrid").field("location").precision(precision)) - ).get(); - - assertNoFailures(response); - - Filter filter = response.getAggregations().get("filtered"); - - GeoGrid geoGrid = filter.getAggregations().get("geohashgrid"); - for (GeoGrid.Bucket cell : geoGrid.getBuckets()) { - String geohash = cell.getKeyAsString(); - long bucketCount = cell.getDocCount(); - int expectedBucketCount = expectedDocCountsForGeoHash.get(geohash); - assertNotSame(bucketCount, 0); - assertTrue("Buckets must be filtered", geohash.startsWith(smallestGeoHash)); - assertEquals("Geohash " + geohash + " has wrong doc count ", expectedBucketCount, bucketCount); - - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + AggregationBuilders.filter("filtered", bbox) + .subAggregation(geohashGrid("geohashgrid").field("location").precision(precision)) + ), + response -> { + Filter filter = response.getAggregations().get("filtered"); + + GeoGrid geoGrid = filter.getAggregations().get("geohashgrid"); + for (GeoGrid.Bucket cell : geoGrid.getBuckets()) { + String geohash = cell.getKeyAsString(); + long bucketCount = cell.getDocCount(); + int expectedBucketCount = expectedDocCountsForGeoHash.get(geohash); + assertNotSame(bucketCount, 0); + assertTrue("Buckets must be filtered", geohash.startsWith(smallestGeoHash)); + assertEquals("Geohash " + geohash + " has wrong doc count ", expectedBucketCount, bucketCount); + } + } + ); } } public void testUnmapped() throws Exception { for (int precision = 1; precision <= PRECISION; precision++) { - SearchResponse response = prepareSearch("idx_unmapped").addAggregation( - geohashGrid("geohashgrid").field("location").precision(precision) - ).get(); - - assertNoFailures(response); - - GeoGrid geoGrid = response.getAggregations().get("geohashgrid"); - assertThat(geoGrid.getBuckets().size(), equalTo(0)); + assertNoFailuresAndResponse( + prepareSearch("idx_unmapped").addAggregation(geohashGrid("geohashgrid").field("location").precision(precision)), + response -> { + GeoGrid geoGrid = response.getAggregations().get("geohashgrid"); + assertThat(geoGrid.getBuckets().size(), equalTo(0)); + } + ); } } public void testPartiallyUnmapped() throws Exception { for (int precision = 1; precision <= PRECISION; precision++) { - SearchResponse response = prepareSearch("idx", "idx_unmapped").addAggregation( - geohashGrid("geohashgrid").field("location").precision(precision) - ).get(); - - assertNoFailures(response); - - GeoGrid geoGrid = response.getAggregations().get("geohashgrid"); - for (GeoGrid.Bucket cell : geoGrid.getBuckets()) { - String geohash = cell.getKeyAsString(); - - long bucketCount = cell.getDocCount(); - int expectedBucketCount = expectedDocCountsForGeoHash.get(geohash); - assertNotSame(bucketCount, 0); - assertEquals("Geohash " + geohash + " has wrong doc count ", expectedBucketCount, bucketCount); - } + assertNoFailuresAndResponse( + prepareSearch("idx", "idx_unmapped").addAggregation(geohashGrid("geohashgrid").field("location").precision(precision)), + response -> { + GeoGrid geoGrid = response.getAggregations().get("geohashgrid"); + for (GeoGrid.Bucket cell : geoGrid.getBuckets()) { + String geohash = cell.getKeyAsString(); + + long bucketCount = cell.getDocCount(); + int expectedBucketCount = expectedDocCountsForGeoHash.get(geohash); + assertNotSame(bucketCount, 0); + assertEquals("Geohash " + geohash + " has wrong doc count ", expectedBucketCount, bucketCount); + } + } + ); } } public void testTopMatch() throws Exception { for (int precision = 1; precision <= PRECISION; precision++) { - SearchResponse response = prepareSearch("idx").addAggregation( - geohashGrid("geohashgrid").field("location").size(1).shardSize(100).precision(precision) - ).get(); - - assertNoFailures(response); - - GeoGrid geoGrid = response.getAggregations().get("geohashgrid"); - // Check we only have one bucket with the best match for that resolution - assertThat(geoGrid.getBuckets().size(), equalTo(1)); - for (GeoGrid.Bucket cell : geoGrid.getBuckets()) { - String geohash = cell.getKeyAsString(); - long bucketCount = cell.getDocCount(); - int expectedBucketCount = 0; - for (var entry : expectedDocCountsForGeoHash.entrySet()) { - if (entry.getKey().length() == precision) { - expectedBucketCount = Math.max(expectedBucketCount, entry.getValue()); + final int finalPrecision = precision; + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + geohashGrid("geohashgrid").field("location").size(1).shardSize(100).precision(precision) + ), + response -> { + GeoGrid geoGrid = response.getAggregations().get("geohashgrid"); + // Check we only have one bucket with the best match for that resolution + assertThat(geoGrid.getBuckets().size(), equalTo(1)); + for (GeoGrid.Bucket cell : geoGrid.getBuckets()) { + String geohash = cell.getKeyAsString(); + long bucketCount = cell.getDocCount(); + int expectedBucketCount = 0; + for (var entry : expectedDocCountsForGeoHash.entrySet()) { + if (entry.getKey().length() == finalPrecision) { + expectedBucketCount = Math.max(expectedBucketCount, entry.getValue()); + } + } + assertNotSame(bucketCount, 0); + assertEquals("Geohash " + geohash + " has wrong doc count ", expectedBucketCount, bucketCount); } } - assertNotSame(bucketCount, 0); - assertEquals("Geohash " + geohash + " has wrong doc count ", expectedBucketCount, bucketCount); - } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GlobalIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GlobalIT.java index 347b2324027c0..3ed86b2efc03e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GlobalIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GlobalIT.java @@ -9,7 +9,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.global.Global; @@ -21,7 +20,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.global; import static org.elasticsearch.search.aggregations.AggregationBuilders.stats; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -60,32 +59,32 @@ public void setupSuiteScopeCluster() throws Exception { } public void testWithStatsSubAggregator() throws Exception { - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.termQuery("tag", "tag1")) - .addAggregation(global("global").subAggregation(stats("value_stats").field("value"))) - .get(); - - assertNoFailures(response); - - Global global = response.getAggregations().get("global"); - assertThat(global, notNullValue()); - assertThat(global.getName(), equalTo("global")); - assertThat(global.getDocCount(), equalTo((long) numDocs)); - assertThat((long) ((InternalAggregation) global).getProperty("_count"), equalTo((long) numDocs)); - assertThat(global.getAggregations().asList().isEmpty(), is(false)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(QueryBuilders.termQuery("tag", "tag1")) + .addAggregation(global("global").subAggregation(stats("value_stats").field("value"))), + response -> { + Global global = response.getAggregations().get("global"); + assertThat(global, notNullValue()); + assertThat(global.getName(), equalTo("global")); + assertThat(global.getDocCount(), equalTo((long) numDocs)); + assertThat((long) ((InternalAggregation) global).getProperty("_count"), equalTo((long) numDocs)); + assertThat(global.getAggregations().asList().isEmpty(), is(false)); - Stats stats = global.getAggregations().get("value_stats"); - assertThat((Stats) ((InternalAggregation) global).getProperty("value_stats"), sameInstance(stats)); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("value_stats")); - long sum = 0; - for (int i = 0; i < numDocs; ++i) { - sum += i + 1; - } - assertThat(stats.getAvg(), equalTo((double) sum / numDocs)); - assertThat(stats.getMin(), equalTo(1.0)); - assertThat(stats.getMax(), equalTo((double) numDocs)); - assertThat(stats.getCount(), equalTo((long) numDocs)); - assertThat(stats.getSum(), equalTo((double) sum)); + Stats stats = global.getAggregations().get("value_stats"); + assertThat((Stats) ((InternalAggregation) global).getProperty("value_stats"), sameInstance(stats)); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("value_stats")); + long sum = 0; + for (int i = 0; i < numDocs; ++i) { + sum += i + 1; + } + assertThat(stats.getAvg(), equalTo((double) sum / numDocs)); + assertThat(stats.getMin(), equalTo(1.0)); + assertThat(stats.getMax(), equalTo((double) numDocs)); + assertThat(stats.getCount(), equalTo((long) numDocs)); + assertThat(stats.getSum(), equalTo((double) sum)); + } + ); } public void testNonTopLevel() throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java index 07b678e89c024..0c65c92c71da4 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; @@ -52,6 +51,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -234,50 +234,52 @@ private void getMultiSortDocs(List builders) throws IOExcep } public void testSingleValuedField() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)) - .get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numValueBuckets)); - - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); - assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(numValueBuckets)); + + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); + assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); + } + } + ); } public void singleValuedField_withOffset() throws Exception { int interval1 = 10; int offset = 5; - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval1).offset(offset) - ).get(); - - // from setup we have between 6 and 20 documents, each with value 1 in test field - int expectedNumberOfBuckets = (offset >= (numDocs % interval + 1)) ? numValueBuckets : numValueBuckets + 1; - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(expectedNumberOfBuckets)); - - // first bucket should start at -5, contain 4 documents - Histogram.Bucket bucket = histo.getBuckets().get(0); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(-5L)); - assertThat(bucket.getDocCount(), equalTo(4L)); - - // last bucket should have (numDocs % interval + 1) docs - bucket = histo.getBuckets().get(0); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(numDocs % interval1 + 5L)); - assertThat(bucket.getDocCount(), equalTo((numDocs % interval) + 1L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval1).offset(offset)), + response -> { + + // from setup we have between 6 and 20 documents, each with value 1 in test field + int expectedNumberOfBuckets = (offset >= (numDocs % interval + 1)) ? numValueBuckets : numValueBuckets + 1; + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(expectedNumberOfBuckets)); + + // first bucket should start at -5, contain 4 documents + Histogram.Bucket bucket = histo.getBuckets().get(0); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(-5L)); + assertThat(bucket.getDocCount(), equalTo(4L)); + + // last bucket should have (numDocs % interval + 1) docs + bucket = histo.getBuckets().get(0); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(numDocs % interval1 + 5L)); + assertThat(bucket.getDocCount(), equalTo((numDocs % interval) + 1L)); + } + ); } /** @@ -286,352 +288,365 @@ public void singleValuedField_withOffset() throws Exception { */ public void testSingleValuedFieldWithRandomOffset() throws Exception { int offset = randomIntBetween(2, interval); - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).offset(offset) - ).get(); - assertNoFailures(response); - // shifting by offset>2 creates new extra bucket [0,offset-1] - // if offset is >= number of values in original last bucket, that effect is canceled - int expectedNumberOfBuckets = (offset >= (numDocs % interval + 1)) ? numValueBuckets : numValueBuckets + 1; - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(expectedNumberOfBuckets)); - - long docsCounted = 0; - for (int i = 0; i < expectedNumberOfBuckets; ++i) { - Histogram.Bucket bucket = histo.getBuckets().get(i); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) ((i - 1) * interval + offset))); - if (i == 0) { - // first bucket - long expectedFirstBucketCount = offset - 1; - assertThat(bucket.getDocCount(), equalTo(expectedFirstBucketCount)); - docsCounted += expectedFirstBucketCount; - } else if (i < expectedNumberOfBuckets - 1) { - assertThat(bucket.getDocCount(), equalTo((long) interval)); - docsCounted += interval; - } else { - assertThat(bucket.getDocCount(), equalTo((long) numDocs - docsCounted)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).offset(offset)), + response -> { + // shifting by offset>2 creates new extra bucket [0,offset-1] + // if offset is >= number of values in original last bucket, that effect is canceled + int expectedNumberOfBuckets = (offset >= (numDocs % interval + 1)) ? numValueBuckets : numValueBuckets + 1; + + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(expectedNumberOfBuckets)); + + long docsCounted = 0; + for (int i = 0; i < expectedNumberOfBuckets; ++i) { + Histogram.Bucket bucket = histo.getBuckets().get(i); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) ((i - 1) * interval + offset))); + if (i == 0) { + // first bucket + long expectedFirstBucketCount = offset - 1; + assertThat(bucket.getDocCount(), equalTo(expectedFirstBucketCount)); + docsCounted += expectedFirstBucketCount; + } else if (i < expectedNumberOfBuckets - 1) { + assertThat(bucket.getDocCount(), equalTo((long) interval)); + docsCounted += interval; + } else { + assertThat(bucket.getDocCount(), equalTo((long) numDocs - docsCounted)); + } + } } - } + ); } public void testSingleValuedFieldOrderedByKeyAsc() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(BucketOrder.key(true)) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); - - List buckets = new ArrayList<>(histo.getBuckets()); - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); - assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(BucketOrder.key(true)) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); + + List buckets = new ArrayList<>(histo.getBuckets()); + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); + assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); + } + } + ); } public void testsingleValuedFieldOrderedByKeyDesc() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(BucketOrder.key(false)) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); - - List buckets = new ArrayList<>(histo.getBuckets()); - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(numValueBuckets - i - 1); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); - assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(BucketOrder.key(false)) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); + + List buckets = new ArrayList<>(histo.getBuckets()); + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(numValueBuckets - i - 1); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); + assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); + } + } + ); } public void testSingleValuedFieldOrderedByCountAsc() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(BucketOrder.count(true)) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); - - Set buckets = new HashSet<>(); - List histoBuckets = new ArrayList<>(histo.getBuckets()); - long previousCount = Long.MIN_VALUE; - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = histoBuckets.get(i); - assertThat(bucket, notNullValue()); - long key = ((Number) bucket.getKey()).longValue(); - assertEquals(0, key % interval); - assertTrue(buckets.add(key)); - assertThat(bucket.getDocCount(), equalTo(valueCounts[(int) (key / interval)])); - assertThat(bucket.getDocCount(), greaterThanOrEqualTo(previousCount)); - previousCount = bucket.getDocCount(); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(BucketOrder.count(true)) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); + + Set buckets = new HashSet<>(); + List histoBuckets = new ArrayList<>(histo.getBuckets()); + long previousCount = Long.MIN_VALUE; + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = histoBuckets.get(i); + assertThat(bucket, notNullValue()); + long key = ((Number) bucket.getKey()).longValue(); + assertEquals(0, key % interval); + assertTrue(buckets.add(key)); + assertThat(bucket.getDocCount(), equalTo(valueCounts[(int) (key / interval)])); + assertThat(bucket.getDocCount(), greaterThanOrEqualTo(previousCount)); + previousCount = bucket.getDocCount(); + } + } + ); } public void testSingleValuedFieldOrderedByCountDesc() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(BucketOrder.count(false)) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); - - Set buckets = new HashSet<>(); - List histoBuckets = new ArrayList<>(histo.getBuckets()); - long previousCount = Long.MAX_VALUE; - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = histoBuckets.get(i); - assertThat(bucket, notNullValue()); - long key = ((Number) bucket.getKey()).longValue(); - assertEquals(0, key % interval); - assertTrue(buckets.add(key)); - assertThat(bucket.getDocCount(), equalTo(valueCounts[(int) (key / interval)])); - assertThat(bucket.getDocCount(), lessThanOrEqualTo(previousCount)); - previousCount = bucket.getDocCount(); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(BucketOrder.count(false)) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); + + Set buckets = new HashSet<>(); + List histoBuckets = new ArrayList<>(histo.getBuckets()); + long previousCount = Long.MAX_VALUE; + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = histoBuckets.get(i); + assertThat(bucket, notNullValue()); + long key = ((Number) bucket.getKey()).longValue(); + assertEquals(0, key % interval); + assertTrue(buckets.add(key)); + assertThat(bucket.getDocCount(), equalTo(valueCounts[(int) (key / interval)])); + assertThat(bucket.getDocCount(), lessThanOrEqualTo(previousCount)); + previousCount = bucket.getDocCount(); + } + } + ); } public void testSingleValuedFieldWithSubAggregation() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); - assertThat(((InternalAggregation) histo).getProperty("_bucket_count"), equalTo(numValueBuckets)); - Object[] propertiesKeys = (Object[]) ((InternalAggregation) histo).getProperty("_key"); - Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) histo).getProperty("_count"); - Object[] propertiesCounts = (Object[]) ((InternalAggregation) histo).getProperty("sum.value"); - - List buckets = new ArrayList<>(histo.getBuckets()); - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); - assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); - assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - long s = 0; - for (int j = 0; j < numDocs; ++j) { - if ((j + 1) / interval == i) { - s += j + 1; + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); + assertThat(((InternalAggregation) histo).getProperty("_bucket_count"), equalTo(numValueBuckets)); + Object[] propertiesKeys = (Object[]) ((InternalAggregation) histo).getProperty("_key"); + Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) histo).getProperty("_count"); + Object[] propertiesCounts = (Object[]) ((InternalAggregation) histo).getProperty("sum.value"); + + List buckets = new ArrayList<>(histo.getBuckets()); + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); + assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + long s = 0; + for (int j = 0; j < numDocs; ++j) { + if ((j + 1) / interval == i) { + s += j + 1; + } + } + assertThat(sum.value(), equalTo((double) s)); + assertEquals(propertiesKeys[i], (double) i * interval); + assertThat(propertiesDocCounts[i], equalTo(valueCounts[i])); + assertThat(propertiesCounts[i], equalTo((double) s)); } } - assertThat(sum.value(), equalTo((double) s)); - assertEquals(propertiesKeys[i], (double) i * interval); - assertThat(propertiesDocCounts[i], equalTo(valueCounts[i])); - assertThat(propertiesCounts[i], equalTo((double) s)); - } + ); } public void testSingleValuedFieldOrderedBySubAggregationAsc() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .order(BucketOrder.aggregation("sum", true)) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); - - Set visited = new HashSet<>(); - double previousSum = Double.NEGATIVE_INFINITY; - List buckets = new ArrayList<>(histo.getBuckets()); - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - long key = ((Number) bucket.getKey()).longValue(); - assertTrue(visited.add(key)); - int b = (int) (key / interval); - assertThat(bucket.getDocCount(), equalTo(valueCounts[b])); - assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - long s = 0; - for (int j = 0; j < numDocs; ++j) { - if ((j + 1) / interval == b) { - s += j + 1; + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .order(BucketOrder.aggregation("sum", true)) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); + + Set visited = new HashSet<>(); + double previousSum = Double.NEGATIVE_INFINITY; + List buckets = new ArrayList<>(histo.getBuckets()); + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + long key = ((Number) bucket.getKey()).longValue(); + assertTrue(visited.add(key)); + int b = (int) (key / interval); + assertThat(bucket.getDocCount(), equalTo(valueCounts[b])); + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + long s = 0; + for (int j = 0; j < numDocs; ++j) { + if ((j + 1) / interval == b) { + s += j + 1; + } + } + assertThat(sum.value(), equalTo((double) s)); + assertThat(sum.value(), greaterThanOrEqualTo(previousSum)); + previousSum = s; } } - assertThat(sum.value(), equalTo((double) s)); - assertThat(sum.value(), greaterThanOrEqualTo(previousSum)); - previousSum = s; - } + ); } public void testSingleValuedFieldOrderedBySubAggregationDesc() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .order(BucketOrder.aggregation("sum", false)) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); - - Set visited = new HashSet<>(); - double previousSum = Double.POSITIVE_INFINITY; - List buckets = new ArrayList<>(histo.getBuckets()); - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - long key = ((Number) bucket.getKey()).longValue(); - assertTrue(visited.add(key)); - int b = (int) (key / interval); - assertThat(bucket.getDocCount(), equalTo(valueCounts[b])); - assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - long s = 0; - for (int j = 0; j < numDocs; ++j) { - if ((j + 1) / interval == b) { - s += j + 1; + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .order(BucketOrder.aggregation("sum", false)) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); + + Set visited = new HashSet<>(); + double previousSum = Double.POSITIVE_INFINITY; + List buckets = new ArrayList<>(histo.getBuckets()); + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + long key = ((Number) bucket.getKey()).longValue(); + assertTrue(visited.add(key)); + int b = (int) (key / interval); + assertThat(bucket.getDocCount(), equalTo(valueCounts[b])); + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + long s = 0; + for (int j = 0; j < numDocs; ++j) { + if ((j + 1) / interval == b) { + s += j + 1; + } + } + assertThat(sum.value(), equalTo((double) s)); + assertThat(sum.value(), lessThanOrEqualTo(previousSum)); + previousSum = s; } } - assertThat(sum.value(), equalTo((double) s)); - assertThat(sum.value(), lessThanOrEqualTo(previousSum)); - previousSum = s; - } + ); } public void testSingleValuedFieldOrderedByMultiValuedSubAggregationDesc() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .order(BucketOrder.aggregation("stats.sum", false)) - .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); - - Set visited = new HashSet<>(); - double previousSum = Double.POSITIVE_INFINITY; - - List buckets = new ArrayList<>(histo.getBuckets()); - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - long key = ((Number) bucket.getKey()).longValue(); - assertTrue(visited.add(key)); - int b = (int) (key / interval); - assertThat(bucket.getDocCount(), equalTo(valueCounts[b])); - assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); - Stats stats = bucket.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - long s = 0; - for (int j = 0; j < numDocs; ++j) { - if ((j + 1) / interval == b) { - s += j + 1; + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .order(BucketOrder.aggregation("stats.sum", false)) + .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); + + Set visited = new HashSet<>(); + double previousSum = Double.POSITIVE_INFINITY; + + List buckets = new ArrayList<>(histo.getBuckets()); + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + long key = ((Number) bucket.getKey()).longValue(); + assertTrue(visited.add(key)); + int b = (int) (key / interval); + assertThat(bucket.getDocCount(), equalTo(valueCounts[b])); + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + Stats stats = bucket.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + long s = 0; + for (int j = 0; j < numDocs; ++j) { + if ((j + 1) / interval == b) { + s += j + 1; + } + } + assertThat(stats.getSum(), equalTo((double) s)); + assertThat(stats.getSum(), lessThanOrEqualTo(previousSum)); + previousSum = s; } } - assertThat(stats.getSum(), equalTo((double) s)); - assertThat(stats.getSum(), lessThanOrEqualTo(previousSum)); - previousSum = s; - } + ); } public void testSingleValuedFieldOrderedBySubAggregationDescDeepOrderPath() throws Exception { boolean asc = randomBoolean(); - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .order(BucketOrder.aggregation("filter>max", asc)) - .subAggregation(filter("filter", matchAllQuery()).subAggregation(max("max").field(SINGLE_VALUED_FIELD_NAME))) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); - - Set visited = new HashSet<>(); - double prevMax = asc ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY; - List buckets = new ArrayList<>(histo.getBuckets()); - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - long key = ((Number) bucket.getKey()).longValue(); - assertTrue(visited.add(key)); - int b = (int) (key / interval); - assertThat(bucket.getDocCount(), equalTo(valueCounts[b])); - assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); - Filter filter = bucket.getAggregations().get("filter"); - assertThat(filter, notNullValue()); - assertThat(bucket.getDocCount(), equalTo(filter.getDocCount())); - Max max = filter.getAggregations().get("max"); - assertThat(max, Matchers.notNullValue()); - assertThat(max.value(), asc ? greaterThanOrEqualTo(prevMax) : lessThanOrEqualTo(prevMax)); - prevMax = max.value(); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .order(BucketOrder.aggregation("filter>max", asc)) + .subAggregation(filter("filter", matchAllQuery()).subAggregation(max("max").field(SINGLE_VALUED_FIELD_NAME))) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); + + Set visited = new HashSet<>(); + double prevMax = asc ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY; + List buckets = new ArrayList<>(histo.getBuckets()); + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + long key = ((Number) bucket.getKey()).longValue(); + assertTrue(visited.add(key)); + int b = (int) (key / interval); + assertThat(bucket.getDocCount(), equalTo(valueCounts[b])); + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + Filter filter = bucket.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(bucket.getDocCount(), equalTo(filter.getDocCount())); + Max max = filter.getAggregations().get("max"); + assertThat(max, Matchers.notNullValue()); + assertThat(max.value(), asc ? greaterThanOrEqualTo(prevMax) : lessThanOrEqualTo(prevMax)); + prevMax = max.value(); + } + } + ); } public void testSingleValuedFieldOrderedByTieBreaker() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .order(BucketOrder.aggregation("max_constant", randomBoolean())) - .subAggregation(max("max_constant").field("constant")) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); - - List buckets = new ArrayList<>(histo.getBuckets()); - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); - assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .order(BucketOrder.aggregation("max_constant", randomBoolean())) + .subAggregation(max("max_constant").field("constant")) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); + + List buckets = new ArrayList<>(histo.getBuckets()); + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); + assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); + } + } + ); } public void testSingleValuedFieldOrderedByIllegalAgg() throws Exception { @@ -662,243 +677,249 @@ public void testSingleValuedFieldOrderedByIllegalAgg() throws Exception { } public void testSingleValuedFieldWithValueScript() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", emptyMap())) - .interval(interval) - ).get(); - - assertNoFailures(response); - - final int numBuckets = (numDocs + 1) / interval - 2 / interval + 1; - final long[] counts = new long[(numDocs + 1) / interval + 1]; - for (int i = 0; i < numDocs; ++i) { - ++counts[(i + 2) / interval]; - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", emptyMap())) + .interval(interval) + ), + response -> { + final int numBuckets = (numDocs + 1) / interval - 2 / interval + 1; + final long[] counts = new long[(numDocs + 1) / interval + 1]; + for (int i = 0; i < numDocs; ++i) { + ++counts[(i + 2) / interval]; + } - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numBuckets)); - - for (int i = 0; i < numBuckets; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - int key = ((2 / interval) + i) * interval; - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) key)); - assertThat(bucket.getDocCount(), equalTo(counts[key / interval])); - } + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(numBuckets)); + + for (int i = 0; i < numBuckets; i++) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + int key = ((2 / interval) + i) * interval; + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) key)); + assertThat(bucket.getDocCount(), equalTo(counts[key / interval])); + } + } + ); } public void testMultiValuedField() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation(histogram("histo").field(MULTI_VALUED_FIELD_NAME).interval(interval)) - .get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numValuesBuckets)); - - for (int i = 0; i < numValuesBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); - assertThat(bucket.getDocCount(), equalTo(valuesCounts[i])); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation(histogram("histo").field(MULTI_VALUED_FIELD_NAME).interval(interval)), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(numValuesBuckets)); + + for (int i = 0; i < numValuesBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); + assertThat(bucket.getDocCount(), equalTo(valuesCounts[i])); + } + } + ); } public void testMultiValuedFieldOrderedByKeyDesc() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").field(MULTI_VALUED_FIELD_NAME).interval(interval).order(BucketOrder.key(false)) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(numValuesBuckets)); - - List buckets = new ArrayList<>(histo.getBuckets()); - for (int i = 0; i < numValuesBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(numValuesBuckets - i - 1); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); - assertThat(bucket.getDocCount(), equalTo(valuesCounts[i])); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").field(MULTI_VALUED_FIELD_NAME).interval(interval).order(BucketOrder.key(false)) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(numValuesBuckets)); + + List buckets = new ArrayList<>(histo.getBuckets()); + for (int i = 0; i < numValuesBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(numValuesBuckets - i - 1); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); + assertThat(bucket.getDocCount(), equalTo(valuesCounts[i])); + } + } + ); } public void testMultiValuedFieldWithValueScript() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").field(MULTI_VALUED_FIELD_NAME) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", emptyMap())) - .interval(interval) - ).get(); - - assertNoFailures(response); - - final int numBuckets = (numDocs + 2) / interval - 2 / interval + 1; - final long[] counts = new long[(numDocs + 2) / interval + 1]; - for (int i = 0; i < numDocs; ++i) { - final int bucket1 = (i + 2) / interval; - final int bucket2 = (i + 3) / interval; - ++counts[bucket1]; - if (bucket1 != bucket2) { - ++counts[bucket2]; - } - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").field(MULTI_VALUED_FIELD_NAME) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", emptyMap())) + .interval(interval) + ), + response -> { + final int numBuckets = (numDocs + 2) / interval - 2 / interval + 1; + final long[] counts = new long[(numDocs + 2) / interval + 1]; + for (int i = 0; i < numDocs; ++i) { + final int bucket1 = (i + 2) / interval; + final int bucket2 = (i + 3) / interval; + ++counts[bucket1]; + if (bucket1 != bucket2) { + ++counts[bucket2]; + } + } - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numBuckets)); - - for (int i = 0; i < numBuckets; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - int key = ((2 / interval) + i) * interval; - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) key)); - assertThat(bucket.getDocCount(), equalTo(counts[key / interval])); - } + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(numBuckets)); + + for (int i = 0; i < numBuckets; i++) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + int key = ((2 / interval) + i) * interval; + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) key)); + assertThat(bucket.getDocCount(), equalTo(counts[key / interval])); + } + } + ); } public void testScriptSingleValue() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['l_value'].value", emptyMap())) - .interval(interval) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numValueBuckets)); - - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); - assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['l_value'].value", emptyMap())) + .interval(interval) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(numValueBuckets)); + + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); + assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); + } + } + ); } public void testScriptMultiValued() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['l_values']", emptyMap())) - .interval(interval) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numValuesBuckets)); - - for (int i = 0; i < numValuesBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); - assertThat(bucket.getDocCount(), equalTo(valuesCounts[i])); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['l_values']", emptyMap())) + .interval(interval) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(numValuesBuckets)); + + for (int i = 0; i < numValuesBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); + assertThat(bucket.getDocCount(), equalTo(valuesCounts[i])); + } + } + ); } public void testUnmapped() throws Exception { - SearchResponse response = prepareSearch("idx_unmapped").addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(0)); + assertNoFailuresAndResponse( + prepareSearch("idx_unmapped").addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(0)); + } + ); } public void testPartiallyUnmapped() throws Exception { - SearchResponse response = prepareSearch("idx", "idx_unmapped").addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numValueBuckets)); - - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); - assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); - } + assertNoFailuresAndResponse( + prepareSearch("idx", "idx_unmapped").addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(numValueBuckets)); + + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); + assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); + } + } + ); } public void testPartiallyUnmappedWithExtendedBounds() throws Exception { - SearchResponse response = prepareSearch("idx", "idx_unmapped").addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .extendedBounds(-1 * 2 * interval, valueCounts.length * interval) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numValueBuckets + 3)); - - Histogram.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) -1 * 2 * interval)); - assertThat(bucket.getDocCount(), equalTo(0L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) -1 * interval)); - assertThat(bucket.getDocCount(), equalTo(0L)); - - for (int i = 2; i < numValueBuckets + 2; ++i) { - bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) (i - 2) * interval)); - assertThat(bucket.getDocCount(), equalTo(valueCounts[i - 2])); - } + assertNoFailuresAndResponse( + prepareSearch("idx", "idx_unmapped").addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .extendedBounds(-1 * 2 * interval, valueCounts.length * interval) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(numValueBuckets + 3)); + + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) -1 * 2 * interval)); + assertThat(bucket.getDocCount(), equalTo(0L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) -1 * interval)); + assertThat(bucket.getDocCount(), equalTo(0L)); + + for (int i = 2; i < numValueBuckets + 2; ++i) { + bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) (i - 2) * interval)); + assertThat(bucket.getDocCount(), equalTo(valueCounts[i - 2])); + } + } + ); } public void testEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) - .interval(1L) - .minDocCount(0) - .subAggregation(histogram("sub_histo").field(SINGLE_VALUED_FIELD_NAME).interval(1L)) - ) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - Histogram histo = searchResponse.getAggregations().get("histo"); - assertThat(histo, Matchers.notNullValue()); - List buckets = histo.getBuckets(); - Histogram.Bucket bucket = buckets.get(1); - assertThat(bucket, Matchers.notNullValue()); - - histo = bucket.getAggregations().get("sub_histo"); - assertThat(histo, Matchers.notNullValue()); - assertThat(histo.getName(), equalTo("sub_histo")); - assertThat(histo.getBuckets().isEmpty(), is(true)); + assertNoFailuresAndResponse( + prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(1L) + .minDocCount(0) + .subAggregation(histogram("sub_histo").field(SINGLE_VALUED_FIELD_NAME).interval(1L)) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, Matchers.notNullValue()); + List buckets = histo.getBuckets(); + Histogram.Bucket bucket = buckets.get(1); + assertThat(bucket, Matchers.notNullValue()); + + histo = bucket.getAggregations().get("sub_histo"); + assertThat(histo, Matchers.notNullValue()); + assertThat(histo.getName(), equalTo("sub_histo")); + assertThat(histo.getBuckets().isEmpty(), is(true)); + } + ); } public void testSingleValuedFieldWithExtendedBounds() throws Exception { @@ -934,18 +955,35 @@ public void testSingleValuedFieldWithExtendedBounds() throws Exception { int bucketsCount = numValueBuckets + addedBucketsLeft + addedBucketsRight; long[] extendedValueCounts = new long[bucketsCount]; System.arraycopy(valueCounts, 0, extendedValueCounts, addedBucketsLeft, valueCounts.length); - - SearchResponse response = null; + final long startKey = Math.min(boundsMinKey, 0); try { - response = prepareSearch("idx").addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0).extendedBounds(boundsMin, boundsMax) - ).get(); - - if (invalidBoundsError) { - fail("Expected an exception to be thrown when bounds.min is greater than bounds.max"); - return; - } - + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .minDocCount(0) + .extendedBounds(boundsMin, boundsMax) + ), + response -> { + if (invalidBoundsError) { + fail("Expected an exception to be thrown when bounds.min is greater than bounds.max"); + } + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(bucketsCount)); + + long key = startKey; + for (int i = 0; i < bucketsCount; i++) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(extendedValueCounts[i])); + key += interval; + } + } + ); } catch (IllegalArgumentException e) { if (invalidBoundsError) { // expected @@ -954,22 +992,6 @@ public void testSingleValuedFieldWithExtendedBounds() throws Exception { throw e; } } - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(bucketsCount)); - - long key = Math.min(boundsMinKey, 0); - for (int i = 0; i < bucketsCount; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(extendedValueCounts[i])); - key += interval; - } } public void testEmptyWithExtendedBounds() throws Exception { @@ -1005,47 +1027,42 @@ public void testEmptyWithExtendedBounds() throws Exception { int bucketsCount = (int) ((boundsMaxKey - boundsMinKey) / interval) + 1; long[] extendedValueCounts = new long[valueCounts.length + addedBucketsLeft + addedBucketsRight]; System.arraycopy(valueCounts, 0, extendedValueCounts, addedBucketsLeft, valueCounts.length); - - SearchResponse response = null; + final long startKey = boundsMinKey; try { - response = prepareSearch("idx").setQuery(QueryBuilders.termQuery("foo", "bar")) - .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .minDocCount(0) - .extendedBounds(boundsMin, boundsMax) - ) - .get(); - - if (invalidBoundsError) { - fail("Expected an exception to be thrown when bounds.min is greater than bounds.max"); - return; - } - + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(QueryBuilders.termQuery("foo", "bar")) + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .minDocCount(0) + .extendedBounds(boundsMin, boundsMax) + ), + response -> { + if (invalidBoundsError) { + fail("Expected an exception to be thrown when bounds.min is greater than bounds.max"); + + } + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(bucketsCount)); + + long key = startKey; + for (int i = 0; i < bucketsCount; i++) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(0L)); + key += interval; + } + } + ); } catch (IllegalArgumentException e) { - if (invalidBoundsError) { - // expected - return; - } else { + if (invalidBoundsError == false) { throw e; } } - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(bucketsCount)); - - long key = boundsMinKey; - for (int i = 0; i < bucketsCount; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(0L)); - key += interval; - } } /** @@ -1069,16 +1086,18 @@ public void testDecimalIntervalAndOffset() throws Exception { client().prepareIndex("decimal_values").setId("2").setSource("d", 0.1) ); - SearchResponse r = prepareSearch("decimal_values").addAggregation(histogram("histo").field("d").interval(0.7).offset(0.05)).get(); - assertNoFailures(r); - - Histogram histogram = r.getAggregations().get("histo"); - List buckets = histogram.getBuckets(); - assertEquals(2, buckets.size()); - assertEquals(-0.65, (double) buckets.get(0).getKey(), 0.01d); - assertEquals(1, buckets.get(0).getDocCount()); - assertEquals(0.05, (double) buckets.get(1).getKey(), 0.01d); - assertEquals(1, buckets.get(1).getDocCount()); + assertNoFailuresAndResponse( + prepareSearch("decimal_values").addAggregation(histogram("histo").field("d").interval(0.7).offset(0.05)), + response -> { + Histogram histogram = response.getAggregations().get("histo"); + List buckets = histogram.getBuckets(); + assertEquals(2, buckets.size()); + assertEquals(-0.65, (double) buckets.get(0).getKey(), 0.01d); + assertEquals(1, buckets.get(0).getDocCount()); + assertEquals(0.05, (double) buckets.get(1).getKey(), 0.01d); + assertEquals(1, buckets.get(1).getDocCount()); + } + ); } /** @@ -1107,15 +1126,15 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - histogram("histo").field("d") - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", emptyMap())) - .interval(0.7) - .offset(0.05) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + histogram("histo").field("d") + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", emptyMap())) + .interval(0.7) + .offset(0.05) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -1127,15 +1146,15 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a deterministic script gets cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - histogram("histo").field("d") - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", emptyMap())) - .interval(0.7) - .offset(0.05) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + histogram("histo").field("d") + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", emptyMap())) + .interval(0.7) + .offset(0.05) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -1147,8 +1166,9 @@ public void testScriptCaching() throws Exception { ); // Ensure that non-scripted requests are cached as normal - r = prepareSearch("cache_test_idx").setSize(0).addAggregation(histogram("histo").field("d").interval(0.7).offset(0.05)).get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0).addAggregation(histogram("histo").field("d").interval(0.7).offset(0.05)) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -1227,64 +1247,67 @@ public void testHardBounds() throws Exception { client().prepareIndex("test").setId("3").setSource("d", 0.1) ); - SearchResponse r = prepareSearch("test").addAggregation( - histogram("histo").field("d").interval(0.1).hardBounds(new DoubleBounds(0.0, null)) - ).get(); - assertNoFailures(r); - - Histogram histogram = r.getAggregations().get("histo"); - List buckets = histogram.getBuckets(); - assertEquals(5, buckets.size()); - assertEquals(0.1, (double) buckets.get(0).getKey(), 0.01d); - assertEquals(0.5, (double) buckets.get(4).getKey(), 0.01d); - - r = prepareSearch("test").addAggregation(histogram("histo").field("d").interval(0.1).hardBounds(new DoubleBounds(null, 0.0))).get(); - assertNoFailures(r); - - histogram = r.getAggregations().get("histo"); - buckets = histogram.getBuckets(); - assertEquals(1, buckets.size()); - assertEquals(-0.6, (double) buckets.get(0).getKey(), 0.01d); - - r = prepareSearch("test").addAggregation(histogram("histo").field("d").interval(0.1).hardBounds(new DoubleBounds(0.0, 0.3))).get(); - assertNoFailures(r); - - histogram = r.getAggregations().get("histo"); - buckets = histogram.getBuckets(); - assertEquals(1, buckets.size()); - assertEquals(0.1, (double) buckets.get(0).getKey(), 0.01d); + assertNoFailuresAndResponse( + prepareSearch("test").addAggregation(histogram("histo").field("d").interval(0.1).hardBounds(new DoubleBounds(0.0, null))), + response -> { + Histogram histogram = response.getAggregations().get("histo"); + List buckets = histogram.getBuckets(); + assertEquals(5, buckets.size()); + assertEquals(0.1, (double) buckets.get(0).getKey(), 0.01d); + assertEquals(0.5, (double) buckets.get(4).getKey(), 0.01d); + } + ); + assertNoFailuresAndResponse( + prepareSearch("test").addAggregation(histogram("histo").field("d").interval(0.1).hardBounds(new DoubleBounds(null, 0.0))), + response -> { + Histogram histogram = response.getAggregations().get("histo"); + List buckets = histogram.getBuckets(); + assertEquals(1, buckets.size()); + assertEquals(-0.6, (double) buckets.get(0).getKey(), 0.01d); + } + ); + assertNoFailuresAndResponse( + prepareSearch("test").addAggregation(histogram("histo").field("d").interval(0.1).hardBounds(new DoubleBounds(0.0, 0.3))), + response -> { + Histogram histogram = response.getAggregations().get("histo"); + List buckets = histogram.getBuckets(); + assertEquals(1, buckets.size()); + assertEquals(0.1, (double) buckets.get(0).getKey(), 0.01d); + } + ); } private void assertMultiSortResponse(long[] expectedKeys, BucketOrder... order) { - SearchResponse response = prepareSearch("sort_idx").addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) - .interval(1) - .order(BucketOrder.compound(order)) - .subAggregation(avg("avg_l").field("l")) - .subAggregation(sum("sum_d").field("d")) - ).get(); - - assertNoFailures(response); - - Histogram histogram = response.getAggregations().get("histo"); - assertThat(histogram, notNullValue()); - assertThat(histogram.getName(), equalTo("histo")); - assertThat(histogram.getBuckets().size(), equalTo(expectedKeys.length)); - - int i = 0; - for (Histogram.Bucket bucket : histogram.getBuckets()) { - assertThat(bucket, notNullValue()); - assertThat(key(bucket), equalTo(expectedKeys[i])); - assertThat(bucket.getDocCount(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("_count"))); - Avg avg = bucket.getAggregations().get("avg_l"); - assertThat(avg, notNullValue()); - assertThat(avg.getValue(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("avg_l"))); - Sum sum = bucket.getAggregations().get("sum_d"); - assertThat(sum, notNullValue()); - assertThat(sum.value(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("sum_d"))); - i++; - } + assertNoFailuresAndResponse( + prepareSearch("sort_idx").addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(1) + .order(BucketOrder.compound(order)) + .subAggregation(avg("avg_l").field("l")) + .subAggregation(sum("sum_d").field("d")) + ), + response -> { + Histogram histogram = response.getAggregations().get("histo"); + assertThat(histogram, notNullValue()); + assertThat(histogram.getName(), equalTo("histo")); + assertThat(histogram.getBuckets().size(), equalTo(expectedKeys.length)); + + int i = 0; + for (Histogram.Bucket bucket : histogram.getBuckets()) { + assertThat(bucket, notNullValue()); + assertThat(key(bucket), equalTo(expectedKeys[i])); + assertThat(bucket.getDocCount(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("_count"))); + Avg avg = bucket.getAggregations().get("avg_l"); + assertThat(avg, notNullValue()); + assertThat(avg.getValue(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("avg_l"))); + Sum sum = bucket.getAggregations().get("sum_d"); + assertThat(sum, notNullValue()); + assertThat(sum.value(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("sum_d"))); + i++; + } + } + ); } private long key(Histogram.Bucket bucket) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/IpRangeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/IpRangeIT.java index 8e4c503b89bb5..449d0626ade3a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/IpRangeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/IpRangeIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.MockScriptPlugin; @@ -25,7 +24,7 @@ import java.util.function.Function; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; @@ -64,152 +63,167 @@ public void setupSuiteScopeCluster() throws Exception { } public void testSingleValuedField() { - SearchResponse rsp = prepareSearch("idx").addAggregation( - AggregationBuilders.ipRange("my_range") - .field("ip") - .addUnboundedTo("192.168.1.0") - .addRange("192.168.1.0", "192.168.1.10") - .addUnboundedFrom("192.168.1.10") - ).get(); - assertNoFailures(rsp); - Range range = rsp.getAggregations().get("my_range"); - assertEquals(3, range.getBuckets().size()); - - Range.Bucket bucket1 = range.getBuckets().get(0); - assertNull(bucket1.getFrom()); - assertEquals("192.168.1.0", bucket1.getTo()); - assertEquals("*-192.168.1.0", bucket1.getKey()); - assertEquals(0, bucket1.getDocCount()); - - Range.Bucket bucket2 = range.getBuckets().get(1); - assertEquals("192.168.1.0", bucket2.getFrom()); - assertEquals("192.168.1.10", bucket2.getTo()); - assertEquals("192.168.1.0-192.168.1.10", bucket2.getKey()); - assertEquals(1, bucket2.getDocCount()); - - Range.Bucket bucket3 = range.getBuckets().get(2); - assertEquals("192.168.1.10", bucket3.getFrom()); - assertNull(bucket3.getTo()); - assertEquals("192.168.1.10-*", bucket3.getKey()); - assertEquals(2, bucket3.getDocCount()); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + AggregationBuilders.ipRange("my_range") + .field("ip") + .addUnboundedTo("192.168.1.0") + .addRange("192.168.1.0", "192.168.1.10") + .addUnboundedFrom("192.168.1.10") + ), + response -> { + Range range = response.getAggregations().get("my_range"); + assertEquals(3, range.getBuckets().size()); + + Range.Bucket bucket1 = range.getBuckets().get(0); + assertNull(bucket1.getFrom()); + assertEquals("192.168.1.0", bucket1.getTo()); + assertEquals("*-192.168.1.0", bucket1.getKey()); + assertEquals(0, bucket1.getDocCount()); + + Range.Bucket bucket2 = range.getBuckets().get(1); + assertEquals("192.168.1.0", bucket2.getFrom()); + assertEquals("192.168.1.10", bucket2.getTo()); + assertEquals("192.168.1.0-192.168.1.10", bucket2.getKey()); + assertEquals(1, bucket2.getDocCount()); + + Range.Bucket bucket3 = range.getBuckets().get(2); + assertEquals("192.168.1.10", bucket3.getFrom()); + assertNull(bucket3.getTo()); + assertEquals("192.168.1.10-*", bucket3.getKey()); + assertEquals(2, bucket3.getDocCount()); + } + ); } public void testMultiValuedField() { - SearchResponse rsp = prepareSearch("idx").addAggregation( - AggregationBuilders.ipRange("my_range") - .field("ips") - .addUnboundedTo("192.168.1.0") - .addRange("192.168.1.0", "192.168.1.10") - .addUnboundedFrom("192.168.1.10") - ).get(); - assertNoFailures(rsp); - Range range = rsp.getAggregations().get("my_range"); - assertEquals(3, range.getBuckets().size()); - - Range.Bucket bucket1 = range.getBuckets().get(0); - assertNull(bucket1.getFrom()); - assertEquals("192.168.1.0", bucket1.getTo()); - assertEquals("*-192.168.1.0", bucket1.getKey()); - assertEquals(1, bucket1.getDocCount()); - - Range.Bucket bucket2 = range.getBuckets().get(1); - assertEquals("192.168.1.0", bucket2.getFrom()); - assertEquals("192.168.1.10", bucket2.getTo()); - assertEquals("192.168.1.0-192.168.1.10", bucket2.getKey()); - assertEquals(1, bucket2.getDocCount()); - - Range.Bucket bucket3 = range.getBuckets().get(2); - assertEquals("192.168.1.10", bucket3.getFrom()); - assertNull(bucket3.getTo()); - assertEquals("192.168.1.10-*", bucket3.getKey()); - assertEquals(2, bucket3.getDocCount()); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + AggregationBuilders.ipRange("my_range") + .field("ips") + .addUnboundedTo("192.168.1.0") + .addRange("192.168.1.0", "192.168.1.10") + .addUnboundedFrom("192.168.1.10") + ), + response -> { + Range range = response.getAggregations().get("my_range"); + assertEquals(3, range.getBuckets().size()); + + Range.Bucket bucket1 = range.getBuckets().get(0); + assertNull(bucket1.getFrom()); + assertEquals("192.168.1.0", bucket1.getTo()); + assertEquals("*-192.168.1.0", bucket1.getKey()); + assertEquals(1, bucket1.getDocCount()); + + Range.Bucket bucket2 = range.getBuckets().get(1); + assertEquals("192.168.1.0", bucket2.getFrom()); + assertEquals("192.168.1.10", bucket2.getTo()); + assertEquals("192.168.1.0-192.168.1.10", bucket2.getKey()); + assertEquals(1, bucket2.getDocCount()); + + Range.Bucket bucket3 = range.getBuckets().get(2); + assertEquals("192.168.1.10", bucket3.getFrom()); + assertNull(bucket3.getTo()); + assertEquals("192.168.1.10-*", bucket3.getKey()); + assertEquals(2, bucket3.getDocCount()); + } + ); } public void testIpMask() { - SearchResponse rsp = prepareSearch("idx").addAggregation( - AggregationBuilders.ipRange("my_range") - .field("ips") - .addMaskRange("::/0") - .addMaskRange("0.0.0.0/0") - .addMaskRange("2001:db8::/64") - ).get(); - assertNoFailures(rsp); - Range range = rsp.getAggregations().get("my_range"); - assertEquals(3, range.getBuckets().size()); - - Range.Bucket bucket1 = range.getBuckets().get(0); - assertEquals("::/0", bucket1.getKey()); - assertEquals(3, bucket1.getDocCount()); - - Range.Bucket bucket2 = range.getBuckets().get(1); - assertEquals("0.0.0.0/0", bucket2.getKey()); - assertEquals(2, bucket2.getDocCount()); - - Range.Bucket bucket3 = range.getBuckets().get(2); - assertEquals("2001:db8::/64", bucket3.getKey()); - assertEquals(1, bucket3.getDocCount()); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + AggregationBuilders.ipRange("my_range") + .field("ips") + .addMaskRange("::/0") + .addMaskRange("0.0.0.0/0") + .addMaskRange("2001:db8::/64") + ), + response -> { + Range range = response.getAggregations().get("my_range"); + assertEquals(3, range.getBuckets().size()); + + Range.Bucket bucket1 = range.getBuckets().get(0); + assertEquals("::/0", bucket1.getKey()); + assertEquals(3, bucket1.getDocCount()); + + Range.Bucket bucket2 = range.getBuckets().get(1); + assertEquals("0.0.0.0/0", bucket2.getKey()); + assertEquals(2, bucket2.getDocCount()); + + Range.Bucket bucket3 = range.getBuckets().get(2); + assertEquals("2001:db8::/64", bucket3.getKey()); + assertEquals(1, bucket3.getDocCount()); + } + ); } public void testPartiallyUnmapped() { - SearchResponse rsp = prepareSearch("idx", "idx_unmapped").addAggregation( - AggregationBuilders.ipRange("my_range") - .field("ip") - .addUnboundedTo("192.168.1.0") - .addRange("192.168.1.0", "192.168.1.10") - .addUnboundedFrom("192.168.1.10") - ).get(); - assertNoFailures(rsp); - Range range = rsp.getAggregations().get("my_range"); - assertEquals(3, range.getBuckets().size()); - - Range.Bucket bucket1 = range.getBuckets().get(0); - assertNull(bucket1.getFrom()); - assertEquals("192.168.1.0", bucket1.getTo()); - assertEquals("*-192.168.1.0", bucket1.getKey()); - assertEquals(0, bucket1.getDocCount()); - - Range.Bucket bucket2 = range.getBuckets().get(1); - assertEquals("192.168.1.0", bucket2.getFrom()); - assertEquals("192.168.1.10", bucket2.getTo()); - assertEquals("192.168.1.0-192.168.1.10", bucket2.getKey()); - assertEquals(1, bucket2.getDocCount()); - - Range.Bucket bucket3 = range.getBuckets().get(2); - assertEquals("192.168.1.10", bucket3.getFrom()); - assertNull(bucket3.getTo()); - assertEquals("192.168.1.10-*", bucket3.getKey()); - assertEquals(2, bucket3.getDocCount()); + assertNoFailuresAndResponse( + prepareSearch("idx", "idx_unmapped").addAggregation( + AggregationBuilders.ipRange("my_range") + .field("ip") + .addUnboundedTo("192.168.1.0") + .addRange("192.168.1.0", "192.168.1.10") + .addUnboundedFrom("192.168.1.10") + ), + response -> { + Range range = response.getAggregations().get("my_range"); + assertEquals(3, range.getBuckets().size()); + + Range.Bucket bucket1 = range.getBuckets().get(0); + assertNull(bucket1.getFrom()); + assertEquals("192.168.1.0", bucket1.getTo()); + assertEquals("*-192.168.1.0", bucket1.getKey()); + assertEquals(0, bucket1.getDocCount()); + + Range.Bucket bucket2 = range.getBuckets().get(1); + assertEquals("192.168.1.0", bucket2.getFrom()); + assertEquals("192.168.1.10", bucket2.getTo()); + assertEquals("192.168.1.0-192.168.1.10", bucket2.getKey()); + assertEquals(1, bucket2.getDocCount()); + + Range.Bucket bucket3 = range.getBuckets().get(2); + assertEquals("192.168.1.10", bucket3.getFrom()); + assertNull(bucket3.getTo()); + assertEquals("192.168.1.10-*", bucket3.getKey()); + assertEquals(2, bucket3.getDocCount()); + } + ); } public void testUnmapped() { - SearchResponse rsp = prepareSearch("idx_unmapped").addAggregation( - AggregationBuilders.ipRange("my_range") - .field("ip") - .addUnboundedTo("192.168.1.0") - .addRange("192.168.1.0", "192.168.1.10") - .addUnboundedFrom("192.168.1.10") - ).get(); - assertNoFailures(rsp); - Range range = rsp.getAggregations().get("my_range"); - assertEquals(3, range.getBuckets().size()); - - Range.Bucket bucket1 = range.getBuckets().get(0); - assertNull(bucket1.getFrom()); - assertEquals("192.168.1.0", bucket1.getTo()); - assertEquals("*-192.168.1.0", bucket1.getKey()); - assertEquals(0, bucket1.getDocCount()); - - Range.Bucket bucket2 = range.getBuckets().get(1); - assertEquals("192.168.1.0", bucket2.getFrom()); - assertEquals("192.168.1.10", bucket2.getTo()); - assertEquals("192.168.1.0-192.168.1.10", bucket2.getKey()); - assertEquals(0, bucket2.getDocCount()); - - Range.Bucket bucket3 = range.getBuckets().get(2); - assertEquals("192.168.1.10", bucket3.getFrom()); - assertNull(bucket3.getTo()); - assertEquals("192.168.1.10-*", bucket3.getKey()); - assertEquals(0, bucket3.getDocCount()); + assertNoFailuresAndResponse( + prepareSearch("idx_unmapped").addAggregation( + AggregationBuilders.ipRange("my_range") + .field("ip") + .addUnboundedTo("192.168.1.0") + .addRange("192.168.1.0", "192.168.1.10") + .addUnboundedFrom("192.168.1.10") + ), + response -> { + Range range = response.getAggregations().get("my_range"); + assertEquals(3, range.getBuckets().size()); + + Range.Bucket bucket1 = range.getBuckets().get(0); + assertNull(bucket1.getFrom()); + assertEquals("192.168.1.0", bucket1.getTo()); + assertEquals("*-192.168.1.0", bucket1.getKey()); + assertEquals(0, bucket1.getDocCount()); + + Range.Bucket bucket2 = range.getBuckets().get(1); + assertEquals("192.168.1.0", bucket2.getFrom()); + assertEquals("192.168.1.10", bucket2.getTo()); + assertEquals("192.168.1.0-192.168.1.10", bucket2.getKey()); + assertEquals(0, bucket2.getDocCount()); + + Range.Bucket bucket3 = range.getBuckets().get(2); + assertEquals("192.168.1.10", bucket3.getFrom()); + assertNull(bucket3.getTo()); + assertEquals("192.168.1.10-*", bucket3.getKey()); + assertEquals(0, bucket3.getDocCount()); + } + ); } public void testRejectsScript() { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/IpTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/IpTermsIT.java index d50ea294287eb..91702bd6d8159 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/IpTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/IpTermsIT.java @@ -7,7 +7,6 @@ */ package org.elasticsearch.search.aggregations.bucket; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; @@ -22,7 +21,7 @@ import java.util.function.Function; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; public class IpTermsIT extends AbstractTermsTestCase { @@ -61,22 +60,25 @@ public void testScriptValue() throws Exception { ); Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['ip'].value", Collections.emptyMap()); - SearchResponse response = prepareSearch("index").addAggregation( - new TermsAggregationBuilder("my_terms").script(script).executionHint(randomExecutionHint()) - ).get(); - assertNoFailures(response); - StringTerms terms = response.getAggregations().get("my_terms"); - assertEquals(2, terms.getBuckets().size()); - - StringTerms.Bucket bucket1 = terms.getBuckets().get(0); - assertEquals(2, bucket1.getDocCount()); - assertEquals("192.168.1.7", bucket1.getKey()); - assertEquals("192.168.1.7", bucket1.getKeyAsString()); - - StringTerms.Bucket bucket2 = terms.getBuckets().get(1); - assertEquals(1, bucket2.getDocCount()); - assertEquals("2001:db8::2:1", bucket2.getKey()); - assertEquals("2001:db8::2:1", bucket2.getKeyAsString()); + assertNoFailuresAndResponse( + prepareSearch("index").addAggregation( + new TermsAggregationBuilder("my_terms").script(script).executionHint(randomExecutionHint()) + ), + response -> { + StringTerms terms = response.getAggregations().get("my_terms"); + assertEquals(2, terms.getBuckets().size()); + + StringTerms.Bucket bucket1 = terms.getBuckets().get(0); + assertEquals(2, bucket1.getDocCount()); + assertEquals("192.168.1.7", bucket1.getKey()); + assertEquals("192.168.1.7", bucket1.getKeyAsString()); + + StringTerms.Bucket bucket2 = terms.getBuckets().get(1); + assertEquals(1, bucket2.getDocCount()); + assertEquals("2001:db8::2:1", bucket2.getKey()); + assertEquals("2001:db8::2:1", bucket2.getKeyAsString()); + } + ); } public void testScriptValues() throws Exception { @@ -89,22 +91,25 @@ public void testScriptValues() throws Exception { ); Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['ip']", Collections.emptyMap()); - SearchResponse response = prepareSearch("index").addAggregation( - new TermsAggregationBuilder("my_terms").script(script).executionHint(randomExecutionHint()) - ).get(); - assertNoFailures(response); - StringTerms terms = response.getAggregations().get("my_terms"); - assertEquals(2, terms.getBuckets().size()); - - StringTerms.Bucket bucket1 = terms.getBuckets().get(0); - assertEquals(2, bucket1.getDocCount()); - assertEquals("192.168.1.7", bucket1.getKey()); - assertEquals("192.168.1.7", bucket1.getKeyAsString()); - - StringTerms.Bucket bucket2 = terms.getBuckets().get(1); - assertEquals(1, bucket2.getDocCount()); - assertEquals("2001:db8::2:1", bucket2.getKey()); - assertEquals("2001:db8::2:1", bucket2.getKeyAsString()); + assertNoFailuresAndResponse( + prepareSearch("index").addAggregation( + new TermsAggregationBuilder("my_terms").script(script).executionHint(randomExecutionHint()) + ), + response -> { + StringTerms terms = response.getAggregations().get("my_terms"); + assertEquals(2, terms.getBuckets().size()); + + StringTerms.Bucket bucket1 = terms.getBuckets().get(0); + assertEquals(2, bucket1.getDocCount()); + assertEquals("192.168.1.7", bucket1.getKey()); + assertEquals("192.168.1.7", bucket1.getKeyAsString()); + + StringTerms.Bucket bucket2 = terms.getBuckets().get(1); + assertEquals(1, bucket2.getDocCount()); + assertEquals("2001:db8::2:1", bucket2.getKey()); + assertEquals("2001:db8::2:1", bucket2.getKeyAsString()); + } + ); } public void testMissingValue() throws Exception { @@ -116,22 +121,24 @@ public void testMissingValue() throws Exception { client().prepareIndex("index").setId("3").setSource("ip", "127.0.0.1"), client().prepareIndex("index").setId("4").setSource("not_ip", "something") ); - SearchResponse response = prepareSearch("index").addAggregation( - new TermsAggregationBuilder("my_terms").field("ip").missing("127.0.0.1").executionHint(randomExecutionHint()) - ).get(); - - assertNoFailures(response); - StringTerms terms = response.getAggregations().get("my_terms"); - assertEquals(2, terms.getBuckets().size()); - - StringTerms.Bucket bucket1 = terms.getBuckets().get(0); - assertEquals(2, bucket1.getDocCount()); - assertEquals("127.0.0.1", bucket1.getKey()); - assertEquals("127.0.0.1", bucket1.getKeyAsString()); - - StringTerms.Bucket bucket2 = terms.getBuckets().get(1); - assertEquals(2, bucket2.getDocCount()); - assertEquals("192.168.1.7", bucket2.getKey()); - assertEquals("192.168.1.7", bucket2.getKeyAsString()); + assertNoFailuresAndResponse( + prepareSearch("index").addAggregation( + new TermsAggregationBuilder("my_terms").field("ip").missing("127.0.0.1").executionHint(randomExecutionHint()) + ), + response -> { + StringTerms terms = response.getAggregations().get("my_terms"); + assertEquals(2, terms.getBuckets().size()); + + StringTerms.Bucket bucket1 = terms.getBuckets().get(0); + assertEquals(2, bucket1.getDocCount()); + assertEquals("127.0.0.1", bucket1.getKey()); + assertEquals("127.0.0.1", bucket1.getKeyAsString()); + + StringTerms.Bucket bucket2 = terms.getBuckets().get(1); + assertEquals(2, bucket2.getDocCount()); + assertEquals("192.168.1.7", bucket2.getKey()); + assertEquals("192.168.1.7", bucket2.getKeyAsString()); + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java index 6c3d1c44aafed..e734047172305 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java @@ -9,7 +9,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Strings; import org.elasticsearch.index.fielddata.ScriptDocValues; @@ -54,6 +53,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -263,108 +263,119 @@ public void testMultiValueFieldWithPartitionedFiltering() throws Exception { private void runTestFieldWithPartitionedFiltering(String field) throws Exception { // Find total number of unique terms - SearchResponse allResponse = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(field).collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - assertNoFailures(allResponse); - LongTerms terms = allResponse.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - int expectedCardinality = terms.getBuckets().size(); + int[] expectedCardinality = new int[1]; + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(field).collectMode(randomFrom(SubAggCollectionMode.values())) + ), + response -> { + assertNoFailures(response); + LongTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + expectedCardinality[0] = terms.getBuckets().size(); + } + ); // Gather terms using partitioned aggregations final int numPartitions = randomIntBetween(2, 4); Set foundTerms = new HashSet<>(); for (int partition = 0; partition < numPartitions; partition++) { - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(field) - .includeExclude(new IncludeExclude(partition, numPartitions)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - assertNoFailures(response); - terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - for (LongTerms.Bucket bucket : terms.getBuckets()) { - assertFalse(foundTerms.contains(bucket.getKeyAsNumber())); - foundTerms.add(bucket.getKeyAsNumber()); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(field) + .includeExclude(new IncludeExclude(partition, numPartitions)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + response -> { + LongTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + for (LongTerms.Bucket bucket : terms.getBuckets()) { + assertFalse(foundTerms.contains(bucket.getKeyAsNumber())); + foundTerms.add(bucket.getKeyAsNumber()); + } + } + ); } - assertEquals(expectedCardinality, foundTerms.size()); + assertEquals(expectedCardinality[0], foundTerms.size()); } public void testSingleValuedFieldWithValueScript() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) - ).get(); - - assertNoFailures(response); - - // Scripts force the results to doubles - DoubleTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (i + 1d)); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + (i + 1d))); - assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i + 1)); - assertThat(bucket.getDocCount(), equalTo(1L)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) + ), + response -> { + // Scripts force the results to doubles + DoubleTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (i + 1d)); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + (i + 1d))); + assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i + 1)); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + } + ); } public void testMultiValuedFieldWithValueScript() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(MULTI_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value - 1", Collections.emptyMap())) - ).get(); - - assertNoFailures(response); - - // Scripts force the results to doubles - DoubleTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(6)); - - for (int i = 0; i < 6; i++) { - DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (i - 1d)); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + (i - 1d))); - assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i - 1)); - if (i == 0 || i == 5) { - assertThat(bucket.getDocCount(), equalTo(1L)); - } else { - assertThat(bucket.getDocCount(), equalTo(2L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(MULTI_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value - 1", Collections.emptyMap())) + ), + response -> { + // Scripts force the results to doubles + DoubleTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(6)); + + for (int i = 0; i < 6; i++) { + DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (i - 1d)); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + (i - 1d))); + assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i - 1)); + if (i == 0 || i == 5) { + assertThat(bucket.getDocCount(), equalTo(1L)); + } else { + assertThat(bucket.getDocCount(), equalTo(2L)); + } + } } - } + ); } public void testMultiValuedFieldWithValueScriptNotUnique() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(MULTI_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "floor(_value / 1000 + 1)", Collections.emptyMap())) - ).get(); - - assertNoFailures(response); - - // The script always converts long to double - DoubleTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(1)); - - DoubleTerms.Bucket bucket = terms.getBucketByKey("1.0"); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("1.0")); - assertThat(bucket.getKeyAsNumber().intValue(), equalTo(1)); - assertThat(bucket.getDocCount(), equalTo(5L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(MULTI_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "floor(_value / 1000 + 1)", Collections.emptyMap())) + ), + response -> { + // The script always converts long to double + DoubleTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(1)); + + DoubleTerms.Bucket bucket = terms.getBucketByKey("1.0"); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("1.0")); + assertThat(bucket.getKeyAsNumber().intValue(), equalTo(1)); + assertThat(bucket.getDocCount(), equalTo(5L)); + } + ); } /* @@ -392,27 +403,28 @@ public void testScriptSingleValue() throws Exception { Collections.emptyMap() ); - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").collectMode(randomFrom(SubAggCollectionMode.values())) - .userValueTypeHint(ValueType.LONG) - .script(script) - ).get(); - - assertNoFailures(response); - - LongTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - LongTerms.Bucket bucket = terms.getBucketByKey("" + i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + i)); - assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); - assertThat(bucket.getKeyAsNumber(), instanceOf(Long.class)); - assertThat(bucket.getDocCount(), equalTo(1L)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").collectMode(randomFrom(SubAggCollectionMode.values())) + .userValueTypeHint(ValueType.LONG) + .script(script) + ), + response -> { + LongTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + LongTerms.Bucket bucket = terms.getBucketByKey("" + i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + i)); + assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); + assertThat(bucket.getKeyAsNumber(), instanceOf(Long.class)); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + } + ); } public void testScriptMultiValued() throws Exception { @@ -423,207 +435,213 @@ public void testScriptMultiValued() throws Exception { Collections.emptyMap() ); - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").collectMode(randomFrom(SubAggCollectionMode.values())) - .userValueTypeHint(ValueType.LONG) - .script(script) - ).get(); - - assertNoFailures(response); - - LongTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(6)); - - for (int i = 0; i < 6; i++) { - LongTerms.Bucket bucket = terms.getBucketByKey("" + i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + i)); - assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); - if (i == 0 || i == 5) { - assertThat(bucket.getDocCount(), equalTo(1L)); - } else { - assertThat(bucket.getDocCount(), equalTo(2L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").collectMode(randomFrom(SubAggCollectionMode.values())) + .userValueTypeHint(ValueType.LONG) + .script(script) + ), + response -> { + LongTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(6)); + + for (int i = 0; i < 6; i++) { + LongTerms.Bucket bucket = terms.getBucketByKey("" + i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + i)); + assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); + if (i == 0 || i == 5) { + assertThat(bucket.getDocCount(), equalTo(1L)); + } else { + assertThat(bucket.getDocCount(), equalTo(2L)); + } + } } - } + ); } public void testPartiallyUnmapped() throws Exception { - SearchResponse response = prepareSearch("idx_unmapped", "idx").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(response); - - LongTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - LongTerms.Bucket bucket = terms.getBucketByKey("" + i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + i)); - assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - } + assertNoFailuresAndResponse( + prepareSearch("idx_unmapped", "idx").addAggregation( + new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) + ), + response -> { + LongTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + LongTerms.Bucket bucket = terms.getBucketByKey("" + i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + i)); + assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + } + ); } public void testPartiallyUnmappedWithFormat() throws Exception { - SearchResponse response = prepareSearch("idx_unmapped", "idx").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .format("0000") - ).get(); - - assertNoFailures(response); - - LongTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - String key = Strings.format("%04d", i); - LongTerms.Bucket bucket = terms.getBucketByKey(key); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(key)); - assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - } + assertNoFailuresAndResponse( + prepareSearch("idx_unmapped", "idx").addAggregation( + new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .format("0000") + ), + response -> { + LongTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + String key = Strings.format("%04d", i); + LongTerms.Bucket bucket = terms.getBucketByKey(key); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(key)); + assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + } + ); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscWithTermsSubAgg() throws Exception { boolean asc = true; - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("avg_i", asc)) - .subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME)) - .subAggregation( - new TermsAggregationBuilder("subTerms").field(MULTI_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ) - ).get(); - - assertNoFailures(response); - - LongTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - LongTerms.Bucket bucket = terms.getBucketByKey("" + i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - Avg avg = bucket.getAggregations().get("avg_i"); - assertThat(avg, notNullValue()); - assertThat(avg.getValue(), equalTo((double) i)); - - LongTerms subTermsAgg = bucket.getAggregations().get("subTerms"); - assertThat(subTermsAgg, notNullValue()); - assertThat(subTermsAgg.getBuckets().size(), equalTo(2)); - int j = i; - for (LongTerms.Bucket subBucket : subTermsAgg.getBuckets()) { - assertThat(subBucket, notNullValue()); - assertThat(subBucket.getKeyAsString(), equalTo(String.valueOf(j))); - assertThat(subBucket.getDocCount(), equalTo(1L)); - j++; + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("avg_i", asc)) + .subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME)) + .subAggregation( + new TermsAggregationBuilder("subTerms").field(MULTI_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + ), + response -> { + LongTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + LongTerms.Bucket bucket = terms.getBucketByKey("" + i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + + Avg avg = bucket.getAggregations().get("avg_i"); + assertThat(avg, notNullValue()); + assertThat(avg.getValue(), equalTo((double) i)); + + LongTerms subTermsAgg = bucket.getAggregations().get("subTerms"); + assertThat(subTermsAgg, notNullValue()); + assertThat(subTermsAgg.getBuckets().size(), equalTo(2)); + int j = i; + for (LongTerms.Bucket subBucket : subTermsAgg.getBuckets()) { + assertThat(subBucket, notNullValue()); + assertThat(subBucket.getKeyAsString(), equalTo(String.valueOf(j))); + assertThat(subBucket.getDocCount(), equalTo(1L)); + j++; + } + } } - } + ); } public void testSingleValuedFieldOrderedBySingleBucketSubAggregationAsc() throws Exception { boolean asc = randomBoolean(); - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("num_tags").field("num_tag") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("filter", asc)) - .subAggregation(filter("filter", QueryBuilders.matchAllQuery())) - ).get(); - - assertNoFailures(response); - - LongTerms tags = response.getAggregations().get("num_tags"); - assertThat(tags, notNullValue()); - assertThat(tags.getName(), equalTo("num_tags")); - assertThat(tags.getBuckets().size(), equalTo(2)); - - Iterator iters = tags.getBuckets().iterator(); - - LongTerms.Bucket tag = iters.next(); - assertThat(tag, notNullValue()); - assertThat(tag.getKeyAsString(), equalTo(asc ? "0" : "1")); - assertThat(tag.getDocCount(), equalTo(asc ? 2L : 3L)); - Filter filter = tag.getAggregations().get("filter"); - assertThat(filter, notNullValue()); - assertThat(filter.getDocCount(), equalTo(asc ? 2L : 3L)); - - tag = iters.next(); - assertThat(tag, notNullValue()); - assertThat(tag.getKeyAsString(), equalTo(asc ? "1" : "0")); - assertThat(tag.getDocCount(), equalTo(asc ? 3L : 2L)); - filter = tag.getAggregations().get("filter"); - assertThat(filter, notNullValue()); - assertThat(filter.getDocCount(), equalTo(asc ? 3L : 2L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("num_tags").field("num_tag") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("filter", asc)) + .subAggregation(filter("filter", QueryBuilders.matchAllQuery())) + ), + response -> { + LongTerms tags = response.getAggregations().get("num_tags"); + assertThat(tags, notNullValue()); + assertThat(tags.getName(), equalTo("num_tags")); + assertThat(tags.getBuckets().size(), equalTo(2)); + + Iterator iters = tags.getBuckets().iterator(); + + LongTerms.Bucket tag = iters.next(); + assertThat(tag, notNullValue()); + assertThat(tag.getKeyAsString(), equalTo(asc ? "0" : "1")); + assertThat(tag.getDocCount(), equalTo(asc ? 2L : 3L)); + Filter filter = tag.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(asc ? 2L : 3L)); + + tag = iters.next(); + assertThat(tag, notNullValue()); + assertThat(tag.getKeyAsString(), equalTo(asc ? "1" : "0")); + assertThat(tag.getDocCount(), equalTo(asc ? 3L : 2L)); + filter = tag.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(asc ? 3L : 2L)); + } + ); } public void testSingleValuedFieldOrderedBySubAggregationAscMultiHierarchyLevels() throws Exception { boolean asc = randomBoolean(); - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("tags").field("num_tag") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("filter1>filter2>max", asc)) - .subAggregation( - filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( - filter("filter2", QueryBuilders.matchAllQuery()).subAggregation(max("max").field(SINGLE_VALUED_FIELD_NAME)) + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("tags").field("num_tag") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("filter1>filter2>max", asc)) + .subAggregation( + filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( + filter("filter2", QueryBuilders.matchAllQuery()).subAggregation(max("max").field(SINGLE_VALUED_FIELD_NAME)) + ) ) - ) - ).get(); - - assertNoFailures(response); - - LongTerms tags = response.getAggregations().get("tags"); - assertThat(tags, notNullValue()); - assertThat(tags.getName(), equalTo("tags")); - assertThat(tags.getBuckets().size(), equalTo(2)); - - Iterator iters = tags.getBuckets().iterator(); - - // the max for "1" is 2 - // the max for "0" is 4 - - LongTerms.Bucket tag = iters.next(); - assertThat(tag, notNullValue()); - assertThat(tag.getKeyAsString(), equalTo(asc ? "1" : "0")); - assertThat(tag.getDocCount(), equalTo(asc ? 3L : 2L)); - Filter filter1 = tag.getAggregations().get("filter1"); - assertThat(filter1, notNullValue()); - assertThat(filter1.getDocCount(), equalTo(asc ? 3L : 2L)); - Filter filter2 = filter1.getAggregations().get("filter2"); - assertThat(filter2, notNullValue()); - assertThat(filter2.getDocCount(), equalTo(asc ? 3L : 2L)); - Max max = filter2.getAggregations().get("max"); - assertThat(max, notNullValue()); - assertThat(max.value(), equalTo(asc ? 2.0 : 4.0)); - - tag = iters.next(); - assertThat(tag, notNullValue()); - assertThat(tag.getKeyAsString(), equalTo(asc ? "0" : "1")); - assertThat(tag.getDocCount(), equalTo(asc ? 2L : 3L)); - filter1 = tag.getAggregations().get("filter1"); - assertThat(filter1, notNullValue()); - assertThat(filter1.getDocCount(), equalTo(asc ? 2L : 3L)); - filter2 = filter1.getAggregations().get("filter2"); - assertThat(filter2, notNullValue()); - assertThat(filter2.getDocCount(), equalTo(asc ? 2L : 3L)); - max = filter2.getAggregations().get("max"); - assertThat(max, notNullValue()); - assertThat(max.value(), equalTo(asc ? 4.0 : 2.0)); + ), + response -> { + LongTerms tags = response.getAggregations().get("tags"); + assertThat(tags, notNullValue()); + assertThat(tags.getName(), equalTo("tags")); + assertThat(tags.getBuckets().size(), equalTo(2)); + + Iterator iters = tags.getBuckets().iterator(); + + // the max for "1" is 2 + // the max for "0" is 4 + + LongTerms.Bucket tag = iters.next(); + assertThat(tag, notNullValue()); + assertThat(tag.getKeyAsString(), equalTo(asc ? "1" : "0")); + assertThat(tag.getDocCount(), equalTo(asc ? 3L : 2L)); + Filter filter1 = tag.getAggregations().get("filter1"); + assertThat(filter1, notNullValue()); + assertThat(filter1.getDocCount(), equalTo(asc ? 3L : 2L)); + Filter filter2 = filter1.getAggregations().get("filter2"); + assertThat(filter2, notNullValue()); + assertThat(filter2.getDocCount(), equalTo(asc ? 3L : 2L)); + Max max = filter2.getAggregations().get("max"); + assertThat(max, notNullValue()); + assertThat(max.value(), equalTo(asc ? 2.0 : 4.0)); + + tag = iters.next(); + assertThat(tag, notNullValue()); + assertThat(tag.getKeyAsString(), equalTo(asc ? "0" : "1")); + assertThat(tag.getDocCount(), equalTo(asc ? 2L : 3L)); + filter1 = tag.getAggregations().get("filter1"); + assertThat(filter1, notNullValue()); + assertThat(filter1.getDocCount(), equalTo(asc ? 2L : 3L)); + filter2 = filter1.getAggregations().get("filter2"); + assertThat(filter2, notNullValue()); + assertThat(filter2.getDocCount(), equalTo(asc ? 2L : 3L)); + max = filter2.getAggregations().get("max"); + assertThat(max, notNullValue()); + assertThat(max.value(), equalTo(asc ? 4.0 : 2.0)); + } + ); } public void testSingleValuedFieldOrderedByMissingSubAggregation() throws Exception { @@ -707,89 +725,89 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithoutMetric public void testSingleValuedFieldOrderedByMultiValueSubAggregationAsc() throws Exception { boolean asc = true; - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("stats.avg", asc)) - .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) - ).get(); - - assertNoFailures(response); - - LongTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - LongTerms.Bucket bucket = terms.getBucketByKey("" + i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - Stats stats = bucket.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getMax(), equalTo((double) i)); - } - + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("stats.avg", asc)) + .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) + ), + response -> { + LongTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + LongTerms.Bucket bucket = terms.getBucketByKey("" + i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + + Stats stats = bucket.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getMax(), equalTo((double) i)); + } + } + ); } public void testSingleValuedFieldOrderedByMultiValueSubAggregationDesc() throws Exception { boolean asc = false; - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("stats.avg", asc)) - .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) - ).get(); - - assertNoFailures(response); - - LongTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 4; i >= 0; i--) { - LongTerms.Bucket bucket = terms.getBucketByKey("" + i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - Stats stats = bucket.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getMax(), equalTo((double) i)); - } - + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("stats.avg", asc)) + .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) + ), + response -> { + LongTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 4; i >= 0; i--) { + LongTerms.Bucket bucket = terms.getBucketByKey("" + i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + + Stats stats = bucket.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getMax(), equalTo((double) i)); + } + } + ); } public void testSingleValuedFieldOrderedByMultiValueExtendedStatsAsc() throws Exception { boolean asc = true; - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("stats.variance", asc)) - .subAggregation(extendedStats("stats").field(SINGLE_VALUED_FIELD_NAME)) - ).get(); - - assertNoFailures(response); - - LongTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - LongTerms.Bucket bucket = terms.getBucketByKey("" + i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - ExtendedStats stats = bucket.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getMax(), equalTo((double) i)); - } - + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("stats.variance", asc)) + .subAggregation(extendedStats("stats").field(SINGLE_VALUED_FIELD_NAME)) + ), + response -> { + LongTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + LongTerms.Bucket bucket = terms.getBucketByKey("" + i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + + ExtendedStats stats = bucket.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getMax(), equalTo((double) i)); + } + } + ); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAndTermsDesc() throws Exception { @@ -833,34 +851,35 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAsCompound } private void assertMultiSortResponse(long[] expectedKeys, BucketOrder... order) { - SearchResponse response = prepareSearch("sort_idx").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.compound(order)) - .subAggregation(avg("avg_l").field("l")) - .subAggregation(sum("sum_d").field("d")) - ).get(); - - assertNoFailures(response); - - LongTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(expectedKeys.length)); - - int i = 0; - for (LongTerms.Bucket bucket : terms.getBuckets()) { - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(String.valueOf(expectedKeys[i]))); - assertThat(bucket.getDocCount(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("_count"))); - Avg avg = bucket.getAggregations().get("avg_l"); - assertThat(avg, notNullValue()); - assertThat(avg.getValue(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("avg_l"))); - Sum sum = bucket.getAggregations().get("sum_d"); - assertThat(sum, notNullValue()); - assertThat(sum.value(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("sum_d"))); - i++; - } + assertNoFailuresAndResponse( + prepareSearch("sort_idx").addAggregation( + new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.compound(order)) + .subAggregation(avg("avg_l").field("l")) + .subAggregation(sum("sum_d").field("d")) + ), + response -> { + LongTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(expectedKeys.length)); + + int i = 0; + for (LongTerms.Bucket bucket : terms.getBuckets()) { + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(String.valueOf(expectedKeys[i]))); + assertThat(bucket.getDocCount(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("_count"))); + Avg avg = bucket.getAggregations().get("avg_l"); + assertThat(avg, notNullValue()); + assertThat(avg.getValue(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("avg_l"))); + Sum sum = bucket.getAggregations().get("sum_d"); + assertThat(sum, notNullValue()); + assertThat(sum.value(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("sum_d"))); + i++; + } + } + ); } public void testOtherDocCount() { @@ -893,13 +912,13 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - new TermsAggregationBuilder("terms").field("d") - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", Collections.emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + new TermsAggregationBuilder("terms").field("d") + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", Collections.emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -911,13 +930,13 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a deterministic script gets cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - new TermsAggregationBuilder("terms").field("d") - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + new TermsAggregationBuilder("terms").field("d") + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -929,8 +948,7 @@ public void testScriptCaching() throws Exception { ); // Ensure that non-scripted requests are cached as normal - r = prepareSearch("cache_test_idx").setSize(0).addAggregation(new TermsAggregationBuilder("terms").field("d")).get(); - assertNoFailures(r); + assertNoFailures(prepareSearch("cache_test_idx").setSize(0).addAggregation(new TermsAggregationBuilder("terms").field("d"))); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java index 36ba2a988668a..6404b06365967 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java @@ -11,8 +11,6 @@ import com.carrotsearch.randomizedtesting.generators.RandomStrings; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.query.QueryBuilder; @@ -49,6 +47,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; @ESIntegTestCase.SuiteScopeTestCase @@ -306,41 +305,47 @@ private void testMinDocCountOnTerms(String field, Script script, BucketOrder ord private void testMinDocCountOnTerms(String field, Script script, BucketOrder order, String include, boolean retry) throws Exception { // all terms - final SearchResponse allTermsResponse = prepareSearch("idx").setSize(0) - .setQuery(QUERY) - .addAggregation( - script.apply(terms("terms"), field) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .executionHint(randomExecutionHint()) - .order(order) - .size(cardinality + randomInt(10)) - .minDocCount(0) - ) - .get(); - assertAllSuccessful(allTermsResponse); - - final Terms allTerms = allTermsResponse.getAggregations().get("terms"); - assertEquals(cardinality, allTerms.getBuckets().size()); - - for (long minDocCount = 0; minDocCount < 20; ++minDocCount) { - final int size = randomIntBetween(1, cardinality + 2); - final SearchRequest request = prepareSearch("idx").setSize(0) + assertResponse( + prepareSearch("idx").setSize(0) .setQuery(QUERY) .addAggregation( script.apply(terms("terms"), field) .collectMode(randomFrom(SubAggCollectionMode.values())) .executionHint(randomExecutionHint()) .order(order) - .size(size) - .includeExclude(include == null ? null : new IncludeExclude(include, null, null, null)) - .shardSize(cardinality + randomInt(10)) - .minDocCount(minDocCount) - ) - .request(); - final SearchResponse response = client().search(request).get(); - assertAllSuccessful(response); - assertSubset(allTerms, (Terms) response.getAggregations().get("terms"), minDocCount, size, include); - } + .size(cardinality + randomInt(10)) + .minDocCount(0) + ), + allTermsResponse -> { + assertAllSuccessful(allTermsResponse); + + final Terms allTerms = allTermsResponse.getAggregations().get("terms"); + assertEquals(cardinality, allTerms.getBuckets().size()); + + for (long minDocCount = 0; minDocCount < 20; ++minDocCount) { + final int size = randomIntBetween(1, cardinality + 2); + final long finalMinDocCount = minDocCount; + assertResponse( + prepareSearch("idx").setSize(0) + .setQuery(QUERY) + .addAggregation( + script.apply(terms("terms"), field) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .executionHint(randomExecutionHint()) + .order(order) + .size(size) + .includeExclude(include == null ? null : new IncludeExclude(include, null, null, null)) + .shardSize(cardinality + randomInt(10)) + .minDocCount(minDocCount) + ), + response -> { + assertAllSuccessful(response); + assertSubset(allTerms, (Terms) response.getAggregations().get("terms"), finalMinDocCount, size, include); + } + ); + } + } + ); } public void testHistogramCountAsc() throws Exception { @@ -377,38 +382,52 @@ public void testDateHistogramKeyDesc() throws Exception { private void testMinDocCountOnHistogram(BucketOrder order) throws Exception { final int interval = randomIntBetween(1, 3); - final SearchResponse allResponse = prepareSearch("idx").setSize(0) - .setQuery(QUERY) - .addAggregation(histogram("histo").field("d").interval(interval).order(order).minDocCount(0)) - .get(); - - final Histogram allHisto = allResponse.getAggregations().get("histo"); - - for (long minDocCount = 0; minDocCount < 50; ++minDocCount) { - final SearchResponse response = prepareSearch("idx").setSize(0) + assertResponse( + prepareSearch("idx").setSize(0) .setQuery(QUERY) - .addAggregation(histogram("histo").field("d").interval(interval).order(order).minDocCount(minDocCount)) - .get(); - assertSubset(allHisto, (Histogram) response.getAggregations().get("histo"), minDocCount); - } + .addAggregation(histogram("histo").field("d").interval(interval).order(order).minDocCount(0)), + allResponse -> { + final Histogram allHisto = allResponse.getAggregations().get("histo"); + for (long minDocCount = 0; minDocCount < 50; ++minDocCount) { + final long finalMinDocCount = minDocCount; + assertResponse( + prepareSearch("idx").setSize(0) + .setQuery(QUERY) + .addAggregation(histogram("histo").field("d").interval(interval).order(order).minDocCount(minDocCount)), + response -> { + assertSubset(allHisto, response.getAggregations().get("histo"), finalMinDocCount); + } + ); + } + } + ); } private void testMinDocCountOnDateHistogram(BucketOrder order) throws Exception { - final SearchResponse allResponse = prepareSearch("idx").setSize(0) - .setQuery(QUERY) - .addAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).order(order).minDocCount(0)) - .get(); - - final Histogram allHisto = allResponse.getAggregations().get("histo"); - - for (long minDocCount = 0; minDocCount < 50; ++minDocCount) { - final SearchResponse response = prepareSearch("idx").setSize(0) + assertResponse( + prepareSearch("idx").setSize(0) .setQuery(QUERY) - .addAggregation( - dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).order(order).minDocCount(minDocCount) - ) - .get(); - assertSubset(allHisto, response.getAggregations().get("histo"), minDocCount); - } + .addAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).order(order).minDocCount(0)), + allResponse -> { + final Histogram allHisto = allResponse.getAggregations().get("histo"); + + for (long minDocCount = 0; minDocCount < 50; ++minDocCount) { + final long finalMinDocCount = minDocCount; + assertResponse( + prepareSearch("idx").setSize(0) + .setQuery(QUERY) + .addAggregation( + dateHistogram("histo").field("date") + .fixedInterval(DateHistogramInterval.DAY) + .order(order) + .minDocCount(minDocCount) + ), + response -> { + assertSubset(allHisto, response.getAggregations().get("histo"), finalMinDocCount); + } + ); + } + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NaNSortingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NaNSortingIT.java index eb2ad6de7789e..23908daf32607 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NaNSortingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NaNSortingIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.bucket; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.util.Comparators; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; @@ -28,7 +27,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.core.IsNull.notNullValue; @@ -145,16 +144,18 @@ private void assertCorrectlySorted(Histogram histo, boolean asc, SubAggregation public void testTerms(String fieldName) { final boolean asc = randomBoolean(); SubAggregation agg = randomFrom(SubAggregation.values()); - SearchResponse response = prepareSearch("idx").addAggregation( - terms("terms").field(fieldName) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(agg.builder()) - .order(BucketOrder.aggregation(agg.sortKey(), asc)) - ).get(); - - assertNoFailures(response); - final Terms terms = response.getAggregations().get("terms"); - assertCorrectlySorted(terms, asc, agg); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("terms").field(fieldName) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(agg.builder()) + .order(BucketOrder.aggregation(agg.sortKey(), asc)) + ), + response -> { + final Terms terms = response.getAggregations().get("terms"); + assertCorrectlySorted(terms, asc, agg); + } + ); } public void testStringTerms() { @@ -172,16 +173,17 @@ public void testDoubleTerms() { public void testLongHistogram() { final boolean asc = randomBoolean(); SubAggregation agg = randomFrom(SubAggregation.values()); - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").field("long_value") - .interval(randomIntBetween(1, 2)) - .subAggregation(agg.builder()) - .order(BucketOrder.aggregation(agg.sortKey(), asc)) - ).get(); - - assertNoFailures(response); - final Histogram histo = response.getAggregations().get("histo"); - assertCorrectlySorted(histo, asc, agg); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").field("long_value") + .interval(randomIntBetween(1, 2)) + .subAggregation(agg.builder()) + .order(BucketOrder.aggregation(agg.sortKey(), asc)) + ), + response -> { + final Histogram histo = response.getAggregations().get("histo"); + assertCorrectlySorted(histo, asc, agg); + } + ); } - } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java index 2ab107c2580c7..d832d7060ffda 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java @@ -10,7 +10,6 @@ import org.apache.lucene.search.join.ScoreMode; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.index.query.InnerHitBuilder; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; @@ -47,7 +46,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -177,176 +176,186 @@ public void setupSuiteScopeCluster() throws Exception { } public void testSimple() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - nested("nested", "nested").subAggregation(stats("nested_value_stats").field("nested.value")) - ).get(); - - assertNoFailures(response); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + nested("nested", "nested").subAggregation(stats("nested_value_stats").field("nested.value")) + ), + response -> { + double min = Double.POSITIVE_INFINITY; + double max = Double.NEGATIVE_INFINITY; + long sum = 0; + long count = 0; + for (int i = 0; i < numParents; ++i) { + for (int j = 0; j < numChildren[i]; ++j) { + final long value = i + 1 + j; + min = Math.min(min, value); + max = Math.max(max, value); + sum += value; + ++count; + } + } - double min = Double.POSITIVE_INFINITY; - double max = Double.NEGATIVE_INFINITY; - long sum = 0; - long count = 0; - for (int i = 0; i < numParents; ++i) { - for (int j = 0; j < numChildren[i]; ++j) { - final long value = i + 1 + j; - min = Math.min(min, value); - max = Math.max(max, value); - sum += value; - ++count; + Nested nested = response.getAggregations().get("nested"); + assertThat(nested, notNullValue()); + assertThat(nested.getName(), equalTo("nested")); + assertThat(nested.getDocCount(), equalTo(count)); + assertThat(nested.getAggregations().asList().isEmpty(), is(false)); + + Stats stats = nested.getAggregations().get("nested_value_stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getMin(), equalTo(min)); + assertThat(stats.getMax(), equalTo(max)); + assertThat(stats.getCount(), equalTo(count)); + assertThat(stats.getSum(), equalTo((double) sum)); + assertThat(stats.getAvg(), equalTo((double) sum / count)); } - } - - Nested nested = response.getAggregations().get("nested"); - assertThat(nested, notNullValue()); - assertThat(nested.getName(), equalTo("nested")); - assertThat(nested.getDocCount(), equalTo(count)); - assertThat(nested.getAggregations().asList().isEmpty(), is(false)); - - Stats stats = nested.getAggregations().get("nested_value_stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getMin(), equalTo(min)); - assertThat(stats.getMax(), equalTo(max)); - assertThat(stats.getCount(), equalTo(count)); - assertThat(stats.getSum(), equalTo((double) sum)); - assertThat(stats.getAvg(), equalTo((double) sum / count)); + ); } public void testNonExistingNestedField() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").addAggregation( - nested("nested", "value").subAggregation(stats("nested_value_stats").field("nested.value")) - ).get(); - - Nested nested = searchResponse.getAggregations().get("nested"); - assertThat(nested, Matchers.notNullValue()); - assertThat(nested.getName(), equalTo("nested")); - assertThat(nested.getDocCount(), is(0L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + nested("nested", "value").subAggregation(stats("nested_value_stats").field("nested.value")) + ), + response -> { + Nested nested = response.getAggregations().get("nested"); + assertThat(nested, Matchers.notNullValue()); + assertThat(nested.getName(), equalTo("nested")); + assertThat(nested.getDocCount(), is(0L)); + } + ); } public void testNestedWithSubTermsAgg() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - nested("nested", "nested").subAggregation(terms("values").field("nested.value").size(100).collectMode(aggCollectionMode)) - ).get(); - - assertNoFailures(response); - - long docCount = 0; - long[] counts = new long[numParents + 6]; - for (int i = 0; i < numParents; ++i) { - for (int j = 0; j < numChildren[i]; ++j) { - final int value = i + 1 + j; - ++counts[value]; - ++docCount; - } - } - int uniqueValues = 0; - for (long count : counts) { - if (count > 0) { - ++uniqueValues; - } - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + nested("nested", "nested").subAggregation(terms("values").field("nested.value").size(100).collectMode(aggCollectionMode)) + ), + response -> { + long docCount = 0; + long[] counts = new long[numParents + 6]; + for (int i = 0; i < numParents; ++i) { + for (int j = 0; j < numChildren[i]; ++j) { + final int value = i + 1 + j; + ++counts[value]; + ++docCount; + } + } + int uniqueValues = 0; + for (long count : counts) { + if (count > 0) { + ++uniqueValues; + } + } - Nested nested = response.getAggregations().get("nested"); - assertThat(nested, notNullValue()); - assertThat(nested.getName(), equalTo("nested")); - assertThat(nested.getDocCount(), equalTo(docCount)); - assertThat(((InternalAggregation) nested).getProperty("_count"), equalTo(docCount)); - assertThat(nested.getAggregations().asList().isEmpty(), is(false)); - - LongTerms values = nested.getAggregations().get("values"); - assertThat(values, notNullValue()); - assertThat(values.getName(), equalTo("values")); - assertThat(values.getBuckets(), notNullValue()); - assertThat(values.getBuckets().size(), equalTo(uniqueValues)); - for (int i = 0; i < counts.length; ++i) { - final String key = Long.toString(i); - if (counts[i] == 0) { - assertNull(values.getBucketByKey(key)); - } else { - Bucket bucket = values.getBucketByKey(key); - assertNotNull(bucket); - assertEquals(counts[i], bucket.getDocCount()); + Nested nested = response.getAggregations().get("nested"); + assertThat(nested, notNullValue()); + assertThat(nested.getName(), equalTo("nested")); + assertThat(nested.getDocCount(), equalTo(docCount)); + assertThat(((InternalAggregation) nested).getProperty("_count"), equalTo(docCount)); + assertThat(nested.getAggregations().asList().isEmpty(), is(false)); + + LongTerms values = nested.getAggregations().get("values"); + assertThat(values, notNullValue()); + assertThat(values.getName(), equalTo("values")); + assertThat(values.getBuckets(), notNullValue()); + assertThat(values.getBuckets().size(), equalTo(uniqueValues)); + for (int i = 0; i < counts.length; ++i) { + final String key = Long.toString(i); + if (counts[i] == 0) { + assertNull(values.getBucketByKey(key)); + } else { + Bucket bucket = values.getBucketByKey(key); + assertNotNull(bucket); + assertEquals(counts[i], bucket.getDocCount()); + } + } + assertThat(((InternalAggregation) nested).getProperty("values"), sameInstance(values)); } - } - assertThat(((InternalAggregation) nested).getProperty("values"), sameInstance(values)); + ); } public void testNestedAsSubAggregation() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - terms("top_values").field("value") - .size(100) - .collectMode(aggCollectionMode) - .subAggregation(nested("nested", "nested").subAggregation(max("max_value").field("nested.value"))) - ).get(); - - assertNoFailures(response); - - LongTerms values = response.getAggregations().get("top_values"); - assertThat(values, notNullValue()); - assertThat(values.getName(), equalTo("top_values")); - assertThat(values.getBuckets(), notNullValue()); - assertThat(values.getBuckets().size(), equalTo(numParents)); - - for (int i = 0; i < numParents; i++) { - String topValue = "" + (i + 1); - assertThat(values.getBucketByKey(topValue), notNullValue()); - Nested nested = values.getBucketByKey(topValue).getAggregations().get("nested"); - assertThat(nested, notNullValue()); - Max max = nested.getAggregations().get("max_value"); - assertThat(max, notNullValue()); - assertThat(max.value(), equalTo(numChildren[i] == 0 ? Double.NEGATIVE_INFINITY : (double) i + numChildren[i])); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("top_values").field("value") + .size(100) + .collectMode(aggCollectionMode) + .subAggregation(nested("nested", "nested").subAggregation(max("max_value").field("nested.value"))) + ), + response -> { + LongTerms values = response.getAggregations().get("top_values"); + assertThat(values, notNullValue()); + assertThat(values.getName(), equalTo("top_values")); + assertThat(values.getBuckets(), notNullValue()); + assertThat(values.getBuckets().size(), equalTo(numParents)); + + for (int i = 0; i < numParents; i++) { + String topValue = "" + (i + 1); + assertThat(values.getBucketByKey(topValue), notNullValue()); + Nested nested = values.getBucketByKey(topValue).getAggregations().get("nested"); + assertThat(nested, notNullValue()); + Max max = nested.getAggregations().get("max_value"); + assertThat(max, notNullValue()); + assertThat(max.value(), equalTo(numChildren[i] == 0 ? Double.NEGATIVE_INFINITY : (double) i + numChildren[i])); + } + } + ); } public void testNestNestedAggs() throws Exception { - SearchResponse response = prepareSearch("idx_nested_nested_aggs").addAggregation( - nested("level1", "nested1").subAggregation( - terms("a").field("nested1.a.keyword") - .collectMode(aggCollectionMode) - .subAggregation(nested("level2", "nested1.nested2").subAggregation(sum("sum").field("nested1.nested2.b"))) - ) - ).get(); - assertNoFailures(response); - - Nested level1 = response.getAggregations().get("level1"); - assertThat(level1, notNullValue()); - assertThat(level1.getName(), equalTo("level1")); - assertThat(level1.getDocCount(), equalTo(2L)); - - StringTerms a = level1.getAggregations().get("a"); - Terms.Bucket bBucket = a.getBucketByKey("a"); - assertThat(bBucket.getDocCount(), equalTo(1L)); - - Nested level2 = bBucket.getAggregations().get("level2"); - assertThat(level2.getDocCount(), equalTo(1L)); - Sum sum = level2.getAggregations().get("sum"); - assertThat(sum.value(), equalTo(2d)); - - a = level1.getAggregations().get("a"); - bBucket = a.getBucketByKey("b"); - assertThat(bBucket.getDocCount(), equalTo(1L)); - - level2 = bBucket.getAggregations().get("level2"); - assertThat(level2.getDocCount(), equalTo(1L)); - sum = level2.getAggregations().get("sum"); - assertThat(sum.value(), equalTo(2d)); + assertNoFailuresAndResponse( + prepareSearch("idx_nested_nested_aggs").addAggregation( + nested("level1", "nested1").subAggregation( + terms("a").field("nested1.a.keyword") + .collectMode(aggCollectionMode) + .subAggregation(nested("level2", "nested1.nested2").subAggregation(sum("sum").field("nested1.nested2.b"))) + ) + ), + response -> { + Nested level1 = response.getAggregations().get("level1"); + assertThat(level1, notNullValue()); + assertThat(level1.getName(), equalTo("level1")); + assertThat(level1.getDocCount(), equalTo(2L)); + + StringTerms a = level1.getAggregations().get("a"); + Terms.Bucket bBucket = a.getBucketByKey("a"); + assertThat(bBucket.getDocCount(), equalTo(1L)); + + Nested level2 = bBucket.getAggregations().get("level2"); + assertThat(level2.getDocCount(), equalTo(1L)); + Sum sum = level2.getAggregations().get("sum"); + assertThat(sum.value(), equalTo(2d)); + + a = level1.getAggregations().get("a"); + bBucket = a.getBucketByKey("b"); + assertThat(bBucket.getDocCount(), equalTo(1L)); + + level2 = bBucket.getAggregations().get("level2"); + assertThat(level2.getDocCount(), equalTo(1L)); + sum = level2.getAggregations().get("sum"); + assertThat(sum.value(), equalTo(2d)); + } + ); } public void testEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) - .addAggregation(histogram("histo").field("value").interval(1L).minDocCount(0).subAggregation(nested("nested", "nested"))) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - Histogram histo = searchResponse.getAggregations().get("histo"); - assertThat(histo, Matchers.notNullValue()); - Histogram.Bucket bucket = histo.getBuckets().get(1); - assertThat(bucket, Matchers.notNullValue()); - - Nested nested = bucket.getAggregations().get("nested"); - assertThat(nested, Matchers.notNullValue()); - assertThat(nested.getName(), equalTo("nested")); - assertThat(nested.getDocCount(), is(0L)); + assertNoFailuresAndResponse( + prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) + .addAggregation(histogram("histo").field("value").interval(1L).minDocCount(0).subAggregation(nested("nested", "nested"))), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, Matchers.notNullValue()); + Histogram.Bucket bucket = histo.getBuckets().get(1); + assertThat(bucket, Matchers.notNullValue()); + + Nested nested = bucket.getAggregations().get("nested"); + assertThat(nested, Matchers.notNullValue()); + assertThat(nested.getName(), equalTo("nested")); + assertThat(nested.getDocCount(), is(0L)); + } + ); } // TODO previously we would detect if you tried to do a nested agg on a non-nested object field, @@ -468,60 +477,65 @@ public void testParentFilterResolvedCorrectly() throws Exception { }""", XContentType.JSON)); indexRandom(true, indexRequests); - SearchResponse response = prepareSearch("idx2").addAggregation( - terms("startDate").field("dates.month.start") - .subAggregation( - terms("endDate").field("dates.month.end") - .subAggregation( - terms("period").field("dates.month.label") - .subAggregation( - nested("ctxt_idfier_nested", "comments").subAggregation( - filter("comment_filter", termQuery("comments.identifier", "29111")).subAggregation( - nested("nested_tags", "comments.tags").subAggregation(terms("tag").field("comments.tags.name")) + assertNoFailuresAndResponse( + prepareSearch("idx2").addAggregation( + terms("startDate").field("dates.month.start") + .subAggregation( + terms("endDate").field("dates.month.end") + .subAggregation( + terms("period").field("dates.month.label") + .subAggregation( + nested("ctxt_idfier_nested", "comments").subAggregation( + filter("comment_filter", termQuery("comments.identifier", "29111")).subAggregation( + nested("nested_tags", "comments.tags").subAggregation( + terms("tag").field("comments.tags.name") + ) + ) ) ) - ) - ) - ) - ).get(); - assertNoFailures(response); - assertHitCount(response, 2); - - Terms startDate = response.getAggregations().get("startDate"); - assertThat(startDate.getBuckets().size(), equalTo(2)); - Terms.Bucket bucket = startDate.getBucketByKey("2014-11-01T00:00:00.000Z"); - assertThat(bucket.getDocCount(), equalTo(1L)); - Terms endDate = bucket.getAggregations().get("endDate"); - bucket = endDate.getBucketByKey("2014-11-30T00:00:00.000Z"); - assertThat(bucket.getDocCount(), equalTo(1L)); - Terms period = bucket.getAggregations().get("period"); - bucket = period.getBucketByKey("2014-11"); - assertThat(bucket.getDocCount(), equalTo(1L)); - Nested comments = bucket.getAggregations().get("ctxt_idfier_nested"); - assertThat(comments.getDocCount(), equalTo(2L)); - Filter filter = comments.getAggregations().get("comment_filter"); - assertThat(filter.getDocCount(), equalTo(1L)); - Nested nestedTags = filter.getAggregations().get("nested_tags"); - assertThat(nestedTags.getDocCount(), equalTo(0L)); // This must be 0 - Terms tags = nestedTags.getAggregations().get("tag"); - assertThat(tags.getBuckets().size(), equalTo(0)); // and this must be empty - - bucket = startDate.getBucketByKey("2014-12-01T00:00:00.000Z"); - assertThat(bucket.getDocCount(), equalTo(1L)); - endDate = bucket.getAggregations().get("endDate"); - bucket = endDate.getBucketByKey("2014-12-31T00:00:00.000Z"); - assertThat(bucket.getDocCount(), equalTo(1L)); - period = bucket.getAggregations().get("period"); - bucket = period.getBucketByKey("2014-12"); - assertThat(bucket.getDocCount(), equalTo(1L)); - comments = bucket.getAggregations().get("ctxt_idfier_nested"); - assertThat(comments.getDocCount(), equalTo(2L)); - filter = comments.getAggregations().get("comment_filter"); - assertThat(filter.getDocCount(), equalTo(1L)); - nestedTags = filter.getAggregations().get("nested_tags"); - assertThat(nestedTags.getDocCount(), equalTo(0L)); // This must be 0 - tags = nestedTags.getAggregations().get("tag"); - assertThat(tags.getBuckets().size(), equalTo(0)); // and this must be empty + ) + ) + ), + response -> { + assertHitCount(response, 2); + + Terms startDate = response.getAggregations().get("startDate"); + assertThat(startDate.getBuckets().size(), equalTo(2)); + Terms.Bucket bucket = startDate.getBucketByKey("2014-11-01T00:00:00.000Z"); + assertThat(bucket.getDocCount(), equalTo(1L)); + Terms endDate = bucket.getAggregations().get("endDate"); + bucket = endDate.getBucketByKey("2014-11-30T00:00:00.000Z"); + assertThat(bucket.getDocCount(), equalTo(1L)); + Terms period = bucket.getAggregations().get("period"); + bucket = period.getBucketByKey("2014-11"); + assertThat(bucket.getDocCount(), equalTo(1L)); + Nested comments = bucket.getAggregations().get("ctxt_idfier_nested"); + assertThat(comments.getDocCount(), equalTo(2L)); + Filter filter = comments.getAggregations().get("comment_filter"); + assertThat(filter.getDocCount(), equalTo(1L)); + Nested nestedTags = filter.getAggregations().get("nested_tags"); + assertThat(nestedTags.getDocCount(), equalTo(0L)); // This must be 0 + Terms tags = nestedTags.getAggregations().get("tag"); + assertThat(tags.getBuckets().size(), equalTo(0)); // and this must be empty + + bucket = startDate.getBucketByKey("2014-12-01T00:00:00.000Z"); + assertThat(bucket.getDocCount(), equalTo(1L)); + endDate = bucket.getAggregations().get("endDate"); + bucket = endDate.getBucketByKey("2014-12-31T00:00:00.000Z"); + assertThat(bucket.getDocCount(), equalTo(1L)); + period = bucket.getAggregations().get("period"); + bucket = period.getBucketByKey("2014-12"); + assertThat(bucket.getDocCount(), equalTo(1L)); + comments = bucket.getAggregations().get("ctxt_idfier_nested"); + assertThat(comments.getDocCount(), equalTo(2L)); + filter = comments.getAggregations().get("comment_filter"); + assertThat(filter.getDocCount(), equalTo(1L)); + nestedTags = filter.getAggregations().get("nested_tags"); + assertThat(nestedTags.getDocCount(), equalTo(0L)); // This must be 0 + tags = nestedTags.getAggregations().get("tag"); + assertThat(tags.getBuckets().size(), equalTo(0)); // and this must be empty + } + ); } public void testNestedSameDocIdProcessedMultipleTime() throws Exception { @@ -573,59 +587,62 @@ public void testNestedSameDocIdProcessedMultipleTime() throws Exception { .get(); refresh(); - SearchResponse response = prepareSearch("idx4").addAggregation( - terms("category").field("categories") - .subAggregation(nested("property", "property").subAggregation(terms("property_id").field("property.id"))) - ).get(); - assertNoFailures(response); - assertHitCount(response, 2); - - Terms category = response.getAggregations().get("category"); - assertThat(category.getBuckets().size(), equalTo(4)); - - Terms.Bucket bucket = category.getBucketByKey("1"); - assertThat(bucket.getDocCount(), equalTo(2L)); - Nested property = bucket.getAggregations().get("property"); - assertThat(property.getDocCount(), equalTo(6L)); - Terms propertyId = property.getAggregations().get("property_id"); - assertThat(propertyId.getBuckets().size(), equalTo(5)); - assertThat(propertyId.getBucketByKey("1").getDocCount(), equalTo(2L)); - assertThat(propertyId.getBucketByKey("2").getDocCount(), equalTo(1L)); - assertThat(propertyId.getBucketByKey("3").getDocCount(), equalTo(1L)); - assertThat(propertyId.getBucketByKey("4").getDocCount(), equalTo(1L)); - assertThat(propertyId.getBucketByKey("5").getDocCount(), equalTo(1L)); - - bucket = category.getBucketByKey("2"); - assertThat(bucket.getDocCount(), equalTo(2L)); - property = bucket.getAggregations().get("property"); - assertThat(property.getDocCount(), equalTo(6L)); - propertyId = property.getAggregations().get("property_id"); - assertThat(propertyId.getBuckets().size(), equalTo(5)); - assertThat(propertyId.getBucketByKey("1").getDocCount(), equalTo(2L)); - assertThat(propertyId.getBucketByKey("2").getDocCount(), equalTo(1L)); - assertThat(propertyId.getBucketByKey("3").getDocCount(), equalTo(1L)); - assertThat(propertyId.getBucketByKey("4").getDocCount(), equalTo(1L)); - assertThat(propertyId.getBucketByKey("5").getDocCount(), equalTo(1L)); - - bucket = category.getBucketByKey("3"); - assertThat(bucket.getDocCount(), equalTo(1L)); - property = bucket.getAggregations().get("property"); - assertThat(property.getDocCount(), equalTo(3L)); - propertyId = property.getAggregations().get("property_id"); - assertThat(propertyId.getBuckets().size(), equalTo(3)); - assertThat(propertyId.getBucketByKey("1").getDocCount(), equalTo(1L)); - assertThat(propertyId.getBucketByKey("2").getDocCount(), equalTo(1L)); - assertThat(propertyId.getBucketByKey("3").getDocCount(), equalTo(1L)); - - bucket = category.getBucketByKey("4"); - assertThat(bucket.getDocCount(), equalTo(1L)); - property = bucket.getAggregations().get("property"); - assertThat(property.getDocCount(), equalTo(3L)); - propertyId = property.getAggregations().get("property_id"); - assertThat(propertyId.getBuckets().size(), equalTo(3)); - assertThat(propertyId.getBucketByKey("1").getDocCount(), equalTo(1L)); - assertThat(propertyId.getBucketByKey("2").getDocCount(), equalTo(1L)); - assertThat(propertyId.getBucketByKey("3").getDocCount(), equalTo(1L)); + assertNoFailuresAndResponse( + prepareSearch("idx4").addAggregation( + terms("category").field("categories") + .subAggregation(nested("property", "property").subAggregation(terms("property_id").field("property.id"))) + ), + response -> { + assertHitCount(response, 2); + + Terms category = response.getAggregations().get("category"); + assertThat(category.getBuckets().size(), equalTo(4)); + + Terms.Bucket bucket = category.getBucketByKey("1"); + assertThat(bucket.getDocCount(), equalTo(2L)); + Nested property = bucket.getAggregations().get("property"); + assertThat(property.getDocCount(), equalTo(6L)); + Terms propertyId = property.getAggregations().get("property_id"); + assertThat(propertyId.getBuckets().size(), equalTo(5)); + assertThat(propertyId.getBucketByKey("1").getDocCount(), equalTo(2L)); + assertThat(propertyId.getBucketByKey("2").getDocCount(), equalTo(1L)); + assertThat(propertyId.getBucketByKey("3").getDocCount(), equalTo(1L)); + assertThat(propertyId.getBucketByKey("4").getDocCount(), equalTo(1L)); + assertThat(propertyId.getBucketByKey("5").getDocCount(), equalTo(1L)); + + bucket = category.getBucketByKey("2"); + assertThat(bucket.getDocCount(), equalTo(2L)); + property = bucket.getAggregations().get("property"); + assertThat(property.getDocCount(), equalTo(6L)); + propertyId = property.getAggregations().get("property_id"); + assertThat(propertyId.getBuckets().size(), equalTo(5)); + assertThat(propertyId.getBucketByKey("1").getDocCount(), equalTo(2L)); + assertThat(propertyId.getBucketByKey("2").getDocCount(), equalTo(1L)); + assertThat(propertyId.getBucketByKey("3").getDocCount(), equalTo(1L)); + assertThat(propertyId.getBucketByKey("4").getDocCount(), equalTo(1L)); + assertThat(propertyId.getBucketByKey("5").getDocCount(), equalTo(1L)); + + bucket = category.getBucketByKey("3"); + assertThat(bucket.getDocCount(), equalTo(1L)); + property = bucket.getAggregations().get("property"); + assertThat(property.getDocCount(), equalTo(3L)); + propertyId = property.getAggregations().get("property_id"); + assertThat(propertyId.getBuckets().size(), equalTo(3)); + assertThat(propertyId.getBucketByKey("1").getDocCount(), equalTo(1L)); + assertThat(propertyId.getBucketByKey("2").getDocCount(), equalTo(1L)); + assertThat(propertyId.getBucketByKey("3").getDocCount(), equalTo(1L)); + + bucket = category.getBucketByKey("4"); + assertThat(bucket.getDocCount(), equalTo(1L)); + property = bucket.getAggregations().get("property"); + assertThat(property.getDocCount(), equalTo(3L)); + propertyId = property.getAggregations().get("property_id"); + assertThat(propertyId.getBuckets().size(), equalTo(3)); + assertThat(propertyId.getBucketByKey("1").getDocCount(), equalTo(1L)); + assertThat(propertyId.getBucketByKey("2").getDocCount(), equalTo(1L)); + assertThat(propertyId.getBucketByKey("3").getDocCount(), equalTo(1L)); + } + ); } public void testFilterAggInsideNestedAgg() throws Exception { @@ -747,45 +764,52 @@ public void testFilterAggInsideNestedAgg() throws Exception { .get(); refresh(); - SearchResponse response = prepareSearch("classes").addAggregation( - nested("to_method", "methods").subAggregation( - filter( - "num_string_params", - nestedQuery("methods.parameters", termQuery("methods.parameters.type", "String"), ScoreMode.None) + assertNoFailuresAndResponse( + prepareSearch("classes").addAggregation( + nested("to_method", "methods").subAggregation( + filter( + "num_string_params", + nestedQuery("methods.parameters", termQuery("methods.parameters.type", "String"), ScoreMode.None) + ) ) - ) - ).get(); - Nested toMethods = response.getAggregations().get("to_method"); - Filter numStringParams = toMethods.getAggregations().get("num_string_params"); - assertThat(numStringParams.getDocCount(), equalTo(3L)); - - response = prepareSearch("classes").addAggregation( - nested("to_method", "methods").subAggregation( - terms("return_type").field("methods.return_type") - .subAggregation( - filter( - "num_string_params", - nestedQuery("methods.parameters", termQuery("methods.parameters.type", "String"), ScoreMode.None) + ), + response -> { + Nested toMethods = response.getAggregations().get("to_method"); + Filter numStringParams = toMethods.getAggregations().get("num_string_params"); + assertThat(numStringParams.getDocCount(), equalTo(3L)); + } + ); + assertNoFailuresAndResponse( + prepareSearch("classes").addAggregation( + nested("to_method", "methods").subAggregation( + terms("return_type").field("methods.return_type") + .subAggregation( + filter( + "num_string_params", + nestedQuery("methods.parameters", termQuery("methods.parameters.type", "String"), ScoreMode.None) + ) ) - ) - ) - ).get(); - toMethods = response.getAggregations().get("to_method"); - Terms terms = toMethods.getAggregations().get("return_type"); - Bucket bucket = terms.getBucketByKey("void"); - assertThat(bucket.getDocCount(), equalTo(3L)); - numStringParams = bucket.getAggregations().get("num_string_params"); - assertThat(numStringParams.getDocCount(), equalTo(2L)); - - bucket = terms.getBucketByKey("QueryBuilder"); - assertThat(bucket.getDocCount(), equalTo(2L)); - numStringParams = bucket.getAggregations().get("num_string_params"); - assertThat(numStringParams.getDocCount(), equalTo(1L)); - - bucket = terms.getBucketByKey("Query"); - assertThat(bucket.getDocCount(), equalTo(1L)); - numStringParams = bucket.getAggregations().get("num_string_params"); - assertThat(numStringParams.getDocCount(), equalTo(0L)); + ) + ), + response -> { + Nested toMethods = response.getAggregations().get("to_method"); + Terms terms = toMethods.getAggregations().get("return_type"); + Bucket bucket = terms.getBucketByKey("void"); + assertThat(bucket.getDocCount(), equalTo(3L)); + Filter numStringParams = bucket.getAggregations().get("num_string_params"); + assertThat(numStringParams.getDocCount(), equalTo(2L)); + + bucket = terms.getBucketByKey("QueryBuilder"); + assertThat(bucket.getDocCount(), equalTo(2L)); + numStringParams = bucket.getAggregations().get("num_string_params"); + assertThat(numStringParams.getDocCount(), equalTo(1L)); + + bucket = terms.getBucketByKey("Query"); + assertThat(bucket.getDocCount(), equalTo(1L)); + numStringParams = bucket.getAggregations().get("num_string_params"); + assertThat(numStringParams.getDocCount(), equalTo(0L)); + } + ); } public void testExtractInnerHitBuildersWithDuplicateHitName() throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RandomSamplerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RandomSamplerIT.java index 66978eba00e26..32496434d32d3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RandomSamplerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RandomSamplerIT.java @@ -9,8 +9,6 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.sampler.random.InternalRandomSampler; import org.elasticsearch.search.aggregations.bucket.sampler.random.RandomSamplerAggregationBuilder; @@ -24,6 +22,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.avg; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.lessThan; @@ -87,40 +86,47 @@ public void setupSuiteScopeCluster() throws Exception { } public void testRandomSampler() { - double sampleMonotonicValue = 0.0; - double sampleNumericValue = 0.0; - double sampledDocCount = 0.0; + double[] sampleMonotonicValue = new double[1]; + double[] sampleNumericValue = new double[1]; + double[] sampledDocCount = new double[1]; for (int i = 0; i < NUM_SAMPLE_RUNS; i++) { - SearchRequest sampledRequest = prepareSearch("idx").addAggregation( - new RandomSamplerAggregationBuilder("sampler").setProbability(PROBABILITY) - .subAggregation(avg("mean_monotonic").field(MONOTONIC_VALUE)) - .subAggregation(avg("mean_numeric").field(NUMERIC_VALUE)) - ).request(); - InternalRandomSampler sampler = client().search(sampledRequest).actionGet().getAggregations().get("sampler"); - sampleMonotonicValue += ((Avg) sampler.getAggregations().get("mean_monotonic")).getValue(); - sampleNumericValue += ((Avg) sampler.getAggregations().get("mean_numeric")).getValue(); - sampledDocCount += sampler.getDocCount(); + assertResponse( + prepareSearch("idx").addAggregation( + new RandomSamplerAggregationBuilder("sampler").setProbability(PROBABILITY) + .subAggregation(avg("mean_monotonic").field(MONOTONIC_VALUE)) + .subAggregation(avg("mean_numeric").field(NUMERIC_VALUE)) + ), + response -> { + InternalRandomSampler sampler = response.getAggregations().get("sampler"); + sampleMonotonicValue[0] += ((Avg) sampler.getAggregations().get("mean_monotonic")).getValue(); + sampleNumericValue[0] += ((Avg) sampler.getAggregations().get("mean_numeric")).getValue(); + sampledDocCount[0] += sampler.getDocCount(); + } + ); } - sampledDocCount /= NUM_SAMPLE_RUNS; - sampleMonotonicValue /= NUM_SAMPLE_RUNS; - sampleNumericValue /= NUM_SAMPLE_RUNS; + sampledDocCount[0] /= NUM_SAMPLE_RUNS; + sampleMonotonicValue[0] /= NUM_SAMPLE_RUNS; + sampleNumericValue[0] /= NUM_SAMPLE_RUNS; double expectedDocCount = PROBABILITY * numDocs; // We're taking the mean of NUM_SAMPLE_RUNS for which each run has standard deviation // sqrt(PROBABILITY * numDocs) so the 6 sigma error, for which we expect 1 failure in // 500M runs, is 6 * sqrt(PROBABILITY * numDocs / NUM_SAMPLE_RUNS). double maxCountError = 6.0 * Math.sqrt(PROBABILITY * numDocs / NUM_SAMPLE_RUNS); - assertThat(Math.abs(sampledDocCount - expectedDocCount), lessThan(maxCountError)); - - SearchResponse trueValueResponse = prepareSearch("idx").addAggregation(avg("mean_monotonic").field(MONOTONIC_VALUE)) - .addAggregation(avg("mean_numeric").field(NUMERIC_VALUE)) - .get(); - double trueMonotonic = ((Avg) trueValueResponse.getAggregations().get("mean_monotonic")).getValue(); - double trueNumeric = ((Avg) trueValueResponse.getAggregations().get("mean_numeric")).getValue(); - double maxMonotonicError = 6.0 * Math.sqrt(varMonotonic / (numDocs * PROBABILITY * NUM_SAMPLE_RUNS)); - double maxNumericError = 6.0 * Math.sqrt(varNumeric / (numDocs * PROBABILITY * NUM_SAMPLE_RUNS)); - assertThat(Math.abs(sampleMonotonicValue - trueMonotonic), lessThan(maxMonotonicError)); - assertThat(Math.abs(sampleNumericValue - trueNumeric), lessThan(maxNumericError)); + assertThat(Math.abs(sampledDocCount[0] - expectedDocCount), lessThan(maxCountError)); + + assertResponse( + prepareSearch("idx").addAggregation(avg("mean_monotonic").field(MONOTONIC_VALUE)) + .addAggregation(avg("mean_numeric").field(NUMERIC_VALUE)), + response -> { + double trueMonotonic = ((Avg) response.getAggregations().get("mean_monotonic")).getValue(); + double trueNumeric = ((Avg) response.getAggregations().get("mean_numeric")).getValue(); + double maxMonotonicError = 6.0 * Math.sqrt(varMonotonic / (numDocs * PROBABILITY * NUM_SAMPLE_RUNS)); + double maxNumericError = 6.0 * Math.sqrt(varNumeric / (numDocs * PROBABILITY * NUM_SAMPLE_RUNS)); + assertThat(Math.abs(sampleMonotonicValue[0] - trueMonotonic), lessThan(maxMonotonicError)); + assertThat(Math.abs(sampleNumericValue[0] - trueNumeric), lessThan(maxNumericError)); + } + ); } public void testRandomSamplerHistogram() { @@ -129,28 +135,32 @@ public void testRandomSamplerHistogram() { Map sampledDocCount = new HashMap<>(); for (int i = 0; i < NUM_SAMPLE_RUNS; i++) { - SearchRequest sampledRequest = prepareSearch("idx").addAggregation( - new RandomSamplerAggregationBuilder("sampler").setProbability(PROBABILITY) - .subAggregation( - histogram("histo").field(NUMERIC_VALUE) - .interval(5.0) - .subAggregation(avg("mean_monotonic").field(MONOTONIC_VALUE)) - .subAggregation(avg("mean_numeric").field(NUMERIC_VALUE)) - ) - ).request(); - InternalRandomSampler sampler = client().search(sampledRequest).actionGet().getAggregations().get("sampler"); - Histogram histo = sampler.getAggregations().get("histo"); - for (Histogram.Bucket bucket : histo.getBuckets()) { - sampleMonotonicValue.compute( - bucket.getKeyAsString(), - (k, v) -> ((Avg) bucket.getAggregations().get("mean_monotonic")).getValue() + (v == null ? 0 : v) - ); - sampleNumericValue.compute( - bucket.getKeyAsString(), - (k, v) -> ((Avg) bucket.getAggregations().get("mean_numeric")).getValue() + (v == null ? 0 : v) - ); - sampledDocCount.compute(bucket.getKeyAsString(), (k, v) -> bucket.getDocCount() + (v == null ? 0 : v)); - } + assertResponse( + prepareSearch("idx").addAggregation( + new RandomSamplerAggregationBuilder("sampler").setProbability(PROBABILITY) + .subAggregation( + histogram("histo").field(NUMERIC_VALUE) + .interval(5.0) + .subAggregation(avg("mean_monotonic").field(MONOTONIC_VALUE)) + .subAggregation(avg("mean_numeric").field(NUMERIC_VALUE)) + ) + ), + response -> { + InternalRandomSampler sampler = response.getAggregations().get("sampler"); + Histogram histo = sampler.getAggregations().get("histo"); + for (Histogram.Bucket bucket : histo.getBuckets()) { + sampleMonotonicValue.compute( + bucket.getKeyAsString(), + (k, v) -> ((Avg) bucket.getAggregations().get("mean_monotonic")).getValue() + (v == null ? 0 : v) + ); + sampleNumericValue.compute( + bucket.getKeyAsString(), + (k, v) -> ((Avg) bucket.getAggregations().get("mean_numeric")).getValue() + (v == null ? 0 : v) + ); + sampledDocCount.compute(bucket.getKeyAsString(), (k, v) -> bucket.getDocCount() + (v == null ? 0 : v)); + } + } + ); } for (String key : sampledDocCount.keySet()) { sampledDocCount.put(key, sampledDocCount.get(key) / NUM_SAMPLE_RUNS); @@ -158,25 +168,29 @@ public void testRandomSamplerHistogram() { sampleMonotonicValue.put(key, sampleMonotonicValue.get(key) / NUM_SAMPLE_RUNS); } - SearchResponse trueValueResponse = prepareSearch("idx").addAggregation( - histogram("histo").field(NUMERIC_VALUE) - .interval(5.0) - .subAggregation(avg("mean_monotonic").field(MONOTONIC_VALUE)) - .subAggregation(avg("mean_numeric").field(NUMERIC_VALUE)) - ).get(); - Histogram histogram = trueValueResponse.getAggregations().get("histo"); - for (Histogram.Bucket bucket : histogram.getBuckets()) { - long numDocs = bucket.getDocCount(); - // Note the true count is estimated by dividing the bucket sample doc count by PROBABILITY. - double maxCountError = 6.0 * Math.sqrt(numDocs / NUM_SAMPLE_RUNS / (0.5 * PROBABILITY)); - assertThat(Math.abs(sampledDocCount.get(bucket.getKeyAsString()) - numDocs), lessThan(maxCountError)); - double trueMonotonic = ((Avg) bucket.getAggregations().get("mean_monotonic")).getValue(); - double trueNumeric = ((Avg) bucket.getAggregations().get("mean_numeric")).getValue(); - double maxMonotonicError = 6.0 * Math.sqrt(varMonotonic / (numDocs * 0.5 * PROBABILITY * NUM_SAMPLE_RUNS)); - double maxNumericError = 6.0 * Math.sqrt(varNumeric / (numDocs * 0.5 * PROBABILITY * NUM_SAMPLE_RUNS)); - assertThat(Math.abs(sampleMonotonicValue.get(bucket.getKeyAsString()) - trueMonotonic), lessThan(maxMonotonicError)); - assertThat(Math.abs(sampleNumericValue.get(bucket.getKeyAsString()) - trueNumeric), lessThan(maxNumericError)); - } + assertResponse( + prepareSearch("idx").addAggregation( + histogram("histo").field(NUMERIC_VALUE) + .interval(5.0) + .subAggregation(avg("mean_monotonic").field(MONOTONIC_VALUE)) + .subAggregation(avg("mean_numeric").field(NUMERIC_VALUE)) + ), + response -> { + Histogram histogram = response.getAggregations().get("histo"); + for (Histogram.Bucket bucket : histogram.getBuckets()) { + long numDocs = bucket.getDocCount(); + // Note the true count is estimated by dividing the bucket sample doc count by PROBABILITY. + double maxCountError = 6.0 * Math.sqrt(numDocs / NUM_SAMPLE_RUNS / (0.5 * PROBABILITY)); + assertThat(Math.abs(sampledDocCount.get(bucket.getKeyAsString()) - numDocs), lessThan(maxCountError)); + double trueMonotonic = ((Avg) bucket.getAggregations().get("mean_monotonic")).getValue(); + double trueNumeric = ((Avg) bucket.getAggregations().get("mean_numeric")).getValue(); + double maxMonotonicError = 6.0 * Math.sqrt(varMonotonic / (numDocs * 0.5 * PROBABILITY * NUM_SAMPLE_RUNS)); + double maxNumericError = 6.0 * Math.sqrt(varNumeric / (numDocs * 0.5 * PROBABILITY * NUM_SAMPLE_RUNS)); + assertThat(Math.abs(sampleMonotonicValue.get(bucket.getKeyAsString()) - trueMonotonic), lessThan(maxMonotonicError)); + assertThat(Math.abs(sampleNumericValue.get(bucket.getKeyAsString()) - trueNumeric), lessThan(maxNumericError)); + } + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java index 742d403ba42b0..441187916cb7f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java @@ -9,7 +9,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.plugins.Plugin; @@ -41,6 +40,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -133,302 +133,309 @@ public void setupSuiteScopeCluster() throws Exception { } public void testRangeAsSubAggregation() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - terms("terms").field(MULTI_VALUED_FIELD_NAME) - .size(100) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(range("range").field(SINGLE_VALUED_FIELD_NAME).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6)) - ).get(); - - assertNoFailures(response); - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getBuckets().size(), equalTo(numDocs + 1)); - for (int i = 1; i < numDocs + 2; ++i) { - Terms.Bucket bucket = terms.getBucketByKey("" + i); - assertThat(bucket, notNullValue()); - final long docCount = i == 1 || i == numDocs + 1 ? 1 : 2; - assertThat(bucket.getDocCount(), equalTo(docCount)); - Range range = bucket.getAggregations().get("range"); - List buckets = range.getBuckets(); - Range.Bucket rangeBucket = buckets.get(0); - assertThat(rangeBucket.getKey(), equalTo("*-3.0")); - assertThat(rangeBucket.getKeyAsString(), equalTo("*-3.0")); - assertThat(rangeBucket, notNullValue()); - assertThat(rangeBucket.getFromAsString(), nullValue()); - assertThat(rangeBucket.getToAsString(), equalTo("3.0")); - if (i == 1 || i == 3) { - assertThat(rangeBucket.getDocCount(), equalTo(1L)); - } else if (i == 2) { - assertThat(rangeBucket.getDocCount(), equalTo(2L)); - } else { - assertThat(rangeBucket.getDocCount(), equalTo(0L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("terms").field(MULTI_VALUED_FIELD_NAME) + .size(100) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(range("range").field(SINGLE_VALUED_FIELD_NAME).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6)) + ), + response -> { + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getBuckets().size(), equalTo(numDocs + 1)); + for (int i = 1; i < numDocs + 2; ++i) { + Terms.Bucket bucket = terms.getBucketByKey("" + i); + assertThat(bucket, notNullValue()); + final long docCount = i == 1 || i == numDocs + 1 ? 1 : 2; + assertThat(bucket.getDocCount(), equalTo(docCount)); + Range range = bucket.getAggregations().get("range"); + List buckets = range.getBuckets(); + Range.Bucket rangeBucket = buckets.get(0); + assertThat(rangeBucket.getKey(), equalTo("*-3.0")); + assertThat(rangeBucket.getKeyAsString(), equalTo("*-3.0")); + assertThat(rangeBucket, notNullValue()); + assertThat(rangeBucket.getFromAsString(), nullValue()); + assertThat(rangeBucket.getToAsString(), equalTo("3.0")); + if (i == 1 || i == 3) { + assertThat(rangeBucket.getDocCount(), equalTo(1L)); + } else if (i == 2) { + assertThat(rangeBucket.getDocCount(), equalTo(2L)); + } else { + assertThat(rangeBucket.getDocCount(), equalTo(0L)); + } + rangeBucket = buckets.get(1); + assertThat(rangeBucket.getKey(), equalTo("3.0-6.0")); + assertThat(rangeBucket.getKeyAsString(), equalTo("3.0-6.0")); + assertThat(rangeBucket, notNullValue()); + assertThat(rangeBucket.getFromAsString(), equalTo("3.0")); + assertThat(rangeBucket.getToAsString(), equalTo("6.0")); + if (i == 3 || i == 6) { + assertThat(rangeBucket.getDocCount(), equalTo(1L)); + } else if (i == 4 || i == 5) { + assertThat(rangeBucket.getDocCount(), equalTo(2L)); + } else { + assertThat(rangeBucket.getDocCount(), equalTo(0L)); + } + rangeBucket = buckets.get(2); + assertThat(rangeBucket.getKey(), equalTo("6.0-*")); + assertThat(rangeBucket.getKeyAsString(), equalTo("6.0-*")); + assertThat(rangeBucket, notNullValue()); + assertThat(rangeBucket.getFromAsString(), equalTo("6.0")); + assertThat(rangeBucket.getToAsString(), nullValue()); + if (i == 6 || i == numDocs + 1) { + assertThat(rangeBucket.getDocCount(), equalTo(1L)); + } else if (i < 6) { + assertThat(rangeBucket.getDocCount(), equalTo(0L)); + } else { + assertThat(rangeBucket.getDocCount(), equalTo(2L)); + } + } } - rangeBucket = buckets.get(1); - assertThat(rangeBucket.getKey(), equalTo("3.0-6.0")); - assertThat(rangeBucket.getKeyAsString(), equalTo("3.0-6.0")); - assertThat(rangeBucket, notNullValue()); - assertThat(rangeBucket.getFromAsString(), equalTo("3.0")); - assertThat(rangeBucket.getToAsString(), equalTo("6.0")); - if (i == 3 || i == 6) { - assertThat(rangeBucket.getDocCount(), equalTo(1L)); - } else if (i == 4 || i == 5) { - assertThat(rangeBucket.getDocCount(), equalTo(2L)); - } else { - assertThat(rangeBucket.getDocCount(), equalTo(0L)); - } - rangeBucket = buckets.get(2); - assertThat(rangeBucket.getKey(), equalTo("6.0-*")); - assertThat(rangeBucket.getKeyAsString(), equalTo("6.0-*")); - assertThat(rangeBucket, notNullValue()); - assertThat(rangeBucket.getFromAsString(), equalTo("6.0")); - assertThat(rangeBucket.getToAsString(), nullValue()); - if (i == 6 || i == numDocs + 1) { - assertThat(rangeBucket.getDocCount(), equalTo(1L)); - } else if (i < 6) { - assertThat(rangeBucket.getDocCount(), equalTo(0L)); - } else { - assertThat(rangeBucket.getDocCount(), equalTo(2L)); - } - } + ); } public void testSingleValueField() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - range("range").field(SINGLE_VALUED_FIELD_NAME).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("*-3.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("3.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("3.0-6.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); - assertThat(bucket.getFromAsString(), equalTo("3.0")); - assertThat(bucket.getToAsString(), equalTo("6.0")); - assertThat(bucket.getDocCount(), equalTo(3L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("6.0-*")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("6.0")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 5L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + range("range").field(SINGLE_VALUED_FIELD_NAME).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6) + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("*-3.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("3.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("3.0-6.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); + assertThat(bucket.getFromAsString(), equalTo("3.0")); + assertThat(bucket.getToAsString(), equalTo("6.0")); + assertThat(bucket.getDocCount(), equalTo(3L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("6.0-*")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("6.0")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 5L)); + } + ); } public void testSingleValueFieldWithFormat() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - range("range").field(SINGLE_VALUED_FIELD_NAME).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6).format("#") - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(range.getBuckets().size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("*-3")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("3")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("3-6")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); - assertThat(bucket.getFromAsString(), equalTo("3")); - assertThat(bucket.getToAsString(), equalTo("6")); - assertThat(bucket.getDocCount(), equalTo(3L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("6-*")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("6")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 5L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + range("range").field(SINGLE_VALUED_FIELD_NAME).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6).format("#") + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(range.getBuckets().size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("*-3")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("3")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("3-6")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); + assertThat(bucket.getFromAsString(), equalTo("3")); + assertThat(bucket.getToAsString(), equalTo("6")); + assertThat(bucket.getDocCount(), equalTo(3L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("6-*")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("6")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 5L)); + } + ); } public void testSingleValueFieldWithCustomKey() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - range("range").field(SINGLE_VALUED_FIELD_NAME).addUnboundedTo("r1", 3).addRange("r2", 3, 6).addUnboundedFrom("r3", 6) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(range.getBuckets().size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("r1")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("3.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("r2")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); - assertThat(bucket.getFromAsString(), equalTo("3.0")); - assertThat(bucket.getToAsString(), equalTo("6.0")); - assertThat(bucket.getDocCount(), equalTo(3L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("r3")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("6.0")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 5L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + range("range").field(SINGLE_VALUED_FIELD_NAME).addUnboundedTo("r1", 3).addRange("r2", 3, 6).addUnboundedFrom("r3", 6) + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(range.getBuckets().size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("r1")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("3.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("r2")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); + assertThat(bucket.getFromAsString(), equalTo("3.0")); + assertThat(bucket.getToAsString(), equalTo("6.0")); + assertThat(bucket.getDocCount(), equalTo(3L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("r3")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("6.0")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 5L)); + } + ); } public void testSingleValuedFieldWithSubAggregation() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - range("range").field(SINGLE_VALUED_FIELD_NAME) - .addUnboundedTo(3) - .addRange(3, 6) - .addUnboundedFrom(6) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(range.getBuckets().size(), equalTo(3)); - Object[] propertiesKeys = (Object[]) ((InternalAggregation) range).getProperty("_key"); - Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) range).getProperty("_count"); - Object[] propertiesCounts = (Object[]) ((InternalAggregation) range).getProperty("sum.value"); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("*-3.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("3.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - assertThat(sum.value(), equalTo(3.0)); // 1 + 2 - assertThat(propertiesKeys[0], equalTo("*-3.0")); - assertThat(propertiesDocCounts[0], equalTo(2L)); - assertThat(propertiesCounts[0], equalTo(3.0)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("3.0-6.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); - assertThat(bucket.getFromAsString(), equalTo("3.0")); - assertThat(bucket.getToAsString(), equalTo("6.0")); - assertThat(bucket.getDocCount(), equalTo(3L)); - sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - assertThat(sum.value(), equalTo(12.0)); // 3 + 4 + 5 - assertThat(propertiesKeys[1], equalTo("3.0-6.0")); - assertThat(propertiesDocCounts[1], equalTo(3L)); - assertThat(propertiesCounts[1], equalTo(12.0)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("6.0-*")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("6.0")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 5L)); - sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - long total = 0; - for (int i = 5; i < numDocs; ++i) { - total += i + 1; - } - assertThat(sum.value(), equalTo((double) total)); - assertThat(propertiesKeys[2], equalTo("6.0-*")); - assertThat(propertiesDocCounts[2], equalTo(numDocs - 5L)); - assertThat(propertiesCounts[2], equalTo((double) total)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + range("range").field(SINGLE_VALUED_FIELD_NAME) + .addUnboundedTo(3) + .addRange(3, 6) + .addUnboundedFrom(6) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(range.getBuckets().size(), equalTo(3)); + Object[] propertiesKeys = (Object[]) ((InternalAggregation) range).getProperty("_key"); + Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) range).getProperty("_count"); + Object[] propertiesCounts = (Object[]) ((InternalAggregation) range).getProperty("sum.value"); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("*-3.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("3.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat(sum.value(), equalTo(3.0)); // 1 + 2 + assertThat(propertiesKeys[0], equalTo("*-3.0")); + assertThat(propertiesDocCounts[0], equalTo(2L)); + assertThat(propertiesCounts[0], equalTo(3.0)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("3.0-6.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); + assertThat(bucket.getFromAsString(), equalTo("3.0")); + assertThat(bucket.getToAsString(), equalTo("6.0")); + assertThat(bucket.getDocCount(), equalTo(3L)); + sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat(sum.value(), equalTo(12.0)); // 3 + 4 + 5 + assertThat(propertiesKeys[1], equalTo("3.0-6.0")); + assertThat(propertiesDocCounts[1], equalTo(3L)); + assertThat(propertiesCounts[1], equalTo(12.0)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("6.0-*")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("6.0")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 5L)); + sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + long total = 0; + for (int i = 5; i < numDocs; ++i) { + total += i + 1; + } + assertThat(sum.value(), equalTo((double) total)); + assertThat(propertiesKeys[2], equalTo("6.0-*")); + assertThat(propertiesDocCounts[2], equalTo(numDocs - 5L)); + assertThat(propertiesCounts[2], equalTo((double) total)); + } + ); } public void testSingleValuedFieldWithValueScript() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - range("range").field(SINGLE_VALUED_FIELD_NAME) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) - .addUnboundedTo(3) - .addRange(3, 6) - .addUnboundedFrom(6) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(range.getBuckets().size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("*-3.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("3.0")); - assertThat(bucket.getDocCount(), equalTo(1L)); // 2 - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("3.0-6.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); - assertThat(bucket.getFromAsString(), equalTo("3.0")); - assertThat(bucket.getToAsString(), equalTo("6.0")); - assertThat(bucket.getDocCount(), equalTo(3L)); // 3, 4, 5 - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("6.0-*")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("6.0")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + range("range").field(SINGLE_VALUED_FIELD_NAME) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) + .addUnboundedTo(3) + .addRange(3, 6) + .addUnboundedFrom(6) + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(range.getBuckets().size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("*-3.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("3.0")); + assertThat(bucket.getDocCount(), equalTo(1L)); // 2 + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("3.0-6.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); + assertThat(bucket.getFromAsString(), equalTo("3.0")); + assertThat(bucket.getToAsString(), equalTo("6.0")); + assertThat(bucket.getDocCount(), equalTo(3L)); // 3, 4, 5 + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("6.0-*")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("6.0")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); + } + ); } /* @@ -445,44 +452,45 @@ public void testSingleValuedFieldWithValueScript() throws Exception { */ public void testMultiValuedField() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - range("range").field(MULTI_VALUED_FIELD_NAME).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(range.getBuckets().size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("*-3.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("3.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("3.0-6.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); - assertThat(bucket.getFromAsString(), equalTo("3.0")); - assertThat(bucket.getToAsString(), equalTo("6.0")); - assertThat(bucket.getDocCount(), equalTo(4L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("6.0-*")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("6.0")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + range("range").field(MULTI_VALUED_FIELD_NAME).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6) + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(range.getBuckets().size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("*-3.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("3.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("3.0-6.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); + assertThat(bucket.getFromAsString(), equalTo("3.0")); + assertThat(bucket.getToAsString(), equalTo("6.0")); + assertThat(bucket.getDocCount(), equalTo(4L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("6.0-*")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("6.0")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); + } + ); } /* @@ -499,48 +507,49 @@ public void testMultiValuedField() throws Exception { */ public void testMultiValuedFieldWithValueScript() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - range("range").field(MULTI_VALUED_FIELD_NAME) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) - .addUnboundedTo(3) - .addRange(3, 6) - .addUnboundedFrom(6) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(range.getBuckets().size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("*-3.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("3.0")); - assertThat(bucket.getDocCount(), equalTo(1L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("3.0-6.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); - assertThat(bucket.getFromAsString(), equalTo("3.0")); - assertThat(bucket.getToAsString(), equalTo("6.0")); - assertThat(bucket.getDocCount(), equalTo(4L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("6.0-*")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("6.0")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 3L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + range("range").field(MULTI_VALUED_FIELD_NAME) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) + .addUnboundedTo(3) + .addRange(3, 6) + .addUnboundedFrom(6) + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(range.getBuckets().size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("*-3.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("3.0")); + assertThat(bucket.getDocCount(), equalTo(1L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("3.0-6.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); + assertThat(bucket.getFromAsString(), equalTo("3.0")); + assertThat(bucket.getToAsString(), equalTo("6.0")); + assertThat(bucket.getDocCount(), equalTo(4L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("6.0-*")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("6.0")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 3L)); + } + ); } /* @@ -567,76 +576,74 @@ public void testScriptSingleValue() throws Exception { "doc['" + SINGLE_VALUED_FIELD_NAME + "'].value", Collections.emptyMap() ); - SearchResponse response = prepareSearch("idx").addAggregation( - range("range").script(script).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(range.getBuckets().size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("*-3.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("3.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("3.0-6.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); - assertThat(bucket.getFromAsString(), equalTo("3.0")); - assertThat(bucket.getToAsString(), equalTo("6.0")); - assertThat(bucket.getDocCount(), equalTo(3L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("6.0-*")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("6.0")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 5L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation(range("range").script(script).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6)), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(range.getBuckets().size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("*-3.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("3.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("3.0-6.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); + assertThat(bucket.getFromAsString(), equalTo("3.0")); + assertThat(bucket.getToAsString(), equalTo("6.0")); + assertThat(bucket.getDocCount(), equalTo(3L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("6.0-*")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("6.0")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 5L)); + } + ); } public void testEmptyRange() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - range("range").field(MULTI_VALUED_FIELD_NAME).addUnboundedTo(-1).addUnboundedFrom(1000) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(range.getBuckets().size(), equalTo(2)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("*--1.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(-1.0)); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("-1.0")); - assertThat(bucket.getDocCount(), equalTo(0L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("1000.0-*")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(1000d)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("1000.0")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(0L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation(range("range").field(MULTI_VALUED_FIELD_NAME).addUnboundedTo(-1).addUnboundedFrom(1000)), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(range.getBuckets().size(), equalTo(2)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("*--1.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(-1.0)); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("-1.0")); + assertThat(bucket.getDocCount(), equalTo(0L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("1000.0-*")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(1000d)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("1000.0")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(0L)); + } + ); } public void testNoRangesInQuery() { @@ -658,44 +665,43 @@ public void testScriptMultiValued() throws Exception { Collections.emptyMap() ); - SearchResponse response = prepareSearch("idx").addAggregation( - range("range").script(script).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(range.getBuckets().size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("*-3.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("3.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("3.0-6.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); - assertThat(bucket.getFromAsString(), equalTo("3.0")); - assertThat(bucket.getToAsString(), equalTo("6.0")); - assertThat(bucket.getDocCount(), equalTo(4L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("6.0-*")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("6.0")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation(range("range").script(script).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6)), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(range.getBuckets().size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("*-3.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("3.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("3.0-6.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); + assertThat(bucket.getFromAsString(), equalTo("3.0")); + assertThat(bucket.getToAsString(), equalTo("6.0")); + assertThat(bucket.getDocCount(), equalTo(4L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("6.0-*")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("6.0")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); + } + ); } /* @@ -716,167 +722,172 @@ public void testScriptMultiValued() throws Exception { */ public void testUnmapped() throws Exception { - SearchResponse response = prepareSearch("idx_unmapped").addAggregation( - range("range").field(SINGLE_VALUED_FIELD_NAME).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(range.getBuckets().size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("*-3.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("3.0")); - assertThat(bucket.getDocCount(), equalTo(0L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("3.0-6.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); - assertThat(bucket.getFromAsString(), equalTo("3.0")); - assertThat(bucket.getToAsString(), equalTo("6.0")); - assertThat(bucket.getDocCount(), equalTo(0L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("6.0-*")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("6.0")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(0L)); + assertNoFailuresAndResponse( + prepareSearch("idx_unmapped").addAggregation( + range("range").field(SINGLE_VALUED_FIELD_NAME).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6) + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(range.getBuckets().size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("*-3.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("3.0")); + assertThat(bucket.getDocCount(), equalTo(0L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("3.0-6.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); + assertThat(bucket.getFromAsString(), equalTo("3.0")); + assertThat(bucket.getToAsString(), equalTo("6.0")); + assertThat(bucket.getDocCount(), equalTo(0L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("6.0-*")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("6.0")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(0L)); + } + ); } public void testPartiallyUnmapped() throws Exception { clusterAdmin().prepareHealth("idx_unmapped").setWaitForYellowStatus().get(); - SearchResponse response = prepareSearch("idx", "idx_unmapped").addAggregation( - range("range").field(SINGLE_VALUED_FIELD_NAME).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(range.getBuckets().size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("*-3.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("3.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("3.0-6.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); - assertThat(bucket.getFromAsString(), equalTo("3.0")); - assertThat(bucket.getToAsString(), equalTo("6.0")); - assertThat(bucket.getDocCount(), equalTo(3L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("6.0-*")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("6.0")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 5L)); + assertNoFailuresAndResponse( + prepareSearch("idx", "idx_unmapped").addAggregation( + range("range").field(SINGLE_VALUED_FIELD_NAME).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6) + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(range.getBuckets().size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("*-3.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("3.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("3.0-6.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); + assertThat(bucket.getFromAsString(), equalTo("3.0")); + assertThat(bucket.getToAsString(), equalTo("6.0")); + assertThat(bucket.getDocCount(), equalTo(3L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("6.0-*")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("6.0")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 5L)); + } + ); } public void testOverlappingRanges() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - range("range").field(MULTI_VALUED_FIELD_NAME).addUnboundedTo(5).addRange(3, 6).addRange(4, 5).addUnboundedFrom(4) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(range.getBuckets().size(), equalTo(4)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("*-5.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(5.0)); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("5.0")); - assertThat(bucket.getDocCount(), equalTo(4L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("3.0-6.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); - assertThat(bucket.getFromAsString(), equalTo("3.0")); - assertThat(bucket.getToAsString(), equalTo("6.0")); - assertThat(bucket.getDocCount(), equalTo(4L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("4.0-5.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(4.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(5.0)); - assertThat(bucket.getFromAsString(), equalTo("4.0")); - assertThat(bucket.getToAsString(), equalTo("5.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(3); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("4.0-*")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(4.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("4.0")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 2L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + range("range").field(MULTI_VALUED_FIELD_NAME).addUnboundedTo(5).addRange(3, 6).addRange(4, 5).addUnboundedFrom(4) + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(range.getBuckets().size(), equalTo(4)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("*-5.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(5.0)); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("5.0")); + assertThat(bucket.getDocCount(), equalTo(4L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("3.0-6.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); + assertThat(bucket.getFromAsString(), equalTo("3.0")); + assertThat(bucket.getToAsString(), equalTo("6.0")); + assertThat(bucket.getDocCount(), equalTo(4L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("4.0-5.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(4.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(5.0)); + assertThat(bucket.getFromAsString(), equalTo("4.0")); + assertThat(bucket.getToAsString(), equalTo("5.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(3); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("4.0-*")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(4.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("4.0")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 2L)); + } + ); } public void testEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) - .interval(1L) - .minDocCount(0) - .subAggregation(range("range").field(SINGLE_VALUED_FIELD_NAME).addRange("0-2", 0.0, 2.0)) - ) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - Histogram histo = searchResponse.getAggregations().get("histo"); - assertThat(histo, Matchers.notNullValue()); - Histogram.Bucket bucket = histo.getBuckets().get(1); - assertThat(bucket, Matchers.notNullValue()); - - Range range = bucket.getAggregations().get("range"); - // TODO: use diamond once JI-9019884 is fixed - List buckets = new ArrayList<>(range.getBuckets()); - assertThat(range, Matchers.notNullValue()); - assertThat(range.getName(), equalTo("range")); - assertThat(buckets.size(), is(1)); - assertThat(buckets.get(0).getKey(), equalTo("0-2")); - assertThat(((Number) buckets.get(0).getFrom()).doubleValue(), equalTo(0.0)); - assertThat(((Number) buckets.get(0).getTo()).doubleValue(), equalTo(2.0)); - assertThat(buckets.get(0).getFromAsString(), equalTo("0.0")); - assertThat(buckets.get(0).getToAsString(), equalTo("2.0")); - assertThat(buckets.get(0).getDocCount(), equalTo(0L)); + assertNoFailuresAndResponse( + prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(1L) + .minDocCount(0) + .subAggregation(range("range").field(SINGLE_VALUED_FIELD_NAME).addRange("0-2", 0.0, 2.0)) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, Matchers.notNullValue()); + Histogram.Bucket bucket = histo.getBuckets().get(1); + assertThat(bucket, Matchers.notNullValue()); + + Range range = bucket.getAggregations().get("range"); + // TODO: use diamond once JI-9019884 is fixed + List buckets = new ArrayList<>(range.getBuckets()); + assertThat(range, Matchers.notNullValue()); + assertThat(range.getName(), equalTo("range")); + assertThat(buckets.size(), is(1)); + assertThat(buckets.get(0).getKey(), equalTo("0-2")); + assertThat(((Number) buckets.get(0).getFrom()).doubleValue(), equalTo(0.0)); + assertThat(((Number) buckets.get(0).getTo()).doubleValue(), equalTo(2.0)); + assertThat(buckets.get(0).getFromAsString(), equalTo("0.0")); + assertThat(buckets.get(0).getToAsString(), equalTo("2.0")); + assertThat(buckets.get(0).getDocCount(), equalTo(0L)); + } + ); } @@ -908,14 +919,14 @@ public void testScriptCaching() throws Exception { // Test that a request using a nondeterministic script does not get cached Map params = new HashMap<>(); params.put("fieldname", "date"); - SearchResponse r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - range("foo").field("i") - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", Collections.emptyMap())) - .addRange(0, 10) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + range("foo").field("i") + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", Collections.emptyMap())) + .addRange(0, 10) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -927,14 +938,14 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a deterministic script gets cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - range("foo").field("i") - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) - .addRange(0, 10) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + range("foo").field("i") + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) + .addRange(0, 10) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -946,8 +957,7 @@ public void testScriptCaching() throws Exception { ); // Ensure that non-scripted requests are cached as normal - r = prepareSearch("cache_test_idx").setSize(0).addAggregation(range("foo").field("i").addRange(0, 10)).get(); - assertNoFailures(r); + assertNoFailures(prepareSearch("cache_test_idx").setSize(0).addAggregation(range("foo").field("i").addRange(0, 10))); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -960,60 +970,62 @@ public void testScriptCaching() throws Exception { } public void testFieldAlias() { - SearchResponse response = prepareSearch("old_index", "new_index").addAggregation( - range("range").field("route_length_miles").addUnboundedTo(50.0).addRange(50.0, 150.0).addUnboundedFrom(150.0) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("*-50.0")); - assertThat(bucket.getDocCount(), equalTo(1L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("50.0-150.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("150.0-*")); - assertThat(bucket.getDocCount(), equalTo(0L)); + assertNoFailuresAndResponse( + prepareSearch("old_index", "new_index").addAggregation( + range("range").field("route_length_miles").addUnboundedTo(50.0).addRange(50.0, 150.0).addUnboundedFrom(150.0) + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("*-50.0")); + assertThat(bucket.getDocCount(), equalTo(1L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("50.0-150.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("150.0-*")); + assertThat(bucket.getDocCount(), equalTo(0L)); + } + ); } public void testFieldAliasWithMissingValue() { - SearchResponse response = prepareSearch("old_index", "new_index").addAggregation( - range("range").field("route_length_miles").missing(0.0).addUnboundedTo(50.0).addRange(50.0, 150.0).addUnboundedFrom(150.0) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("*-50.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("50.0-150.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("150.0-*")); - assertThat(bucket.getDocCount(), equalTo(0L)); + assertNoFailuresAndResponse( + prepareSearch("old_index", "new_index").addAggregation( + range("range").field("route_length_miles").missing(0.0).addUnboundedTo(50.0).addRange(50.0, 150.0).addUnboundedFrom(150.0) + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("*-50.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("50.0-150.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("150.0-*")); + assertThat(bucket.getDocCount(), equalTo(0L)); + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ReverseNestedIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ReverseNestedIT.java index e90e73eec5bb3..5f42eb3b2ab19 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ReverseNestedIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ReverseNestedIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.InternalAggregation; @@ -34,7 +33,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -143,310 +142,314 @@ private void insertIdx2(String[][] values) throws Exception { } public void testSimpleReverseNestedToRoot() throws Exception { - SearchResponse response = prepareSearch("idx1").addAggregation( - nested("nested1", "nested1").subAggregation( - terms("field2").field("nested1.field2") - .subAggregation( - reverseNested("nested1_to_field1").subAggregation( - terms("field1").field("field1").collectMode(randomFrom(SubAggCollectionMode.values())) + assertNoFailuresAndResponse( + prepareSearch("idx1").addAggregation( + nested("nested1", "nested1").subAggregation( + terms("field2").field("nested1.field2") + .subAggregation( + reverseNested("nested1_to_field1").subAggregation( + terms("field1").field("field1").collectMode(randomFrom(SubAggCollectionMode.values())) + ) ) - ) - ) - ).get(); - - assertNoFailures(response); - - Nested nested = response.getAggregations().get("nested1"); - assertThat(nested, notNullValue()); - assertThat(nested.getName(), equalTo("nested1")); - assertThat(nested.getDocCount(), equalTo(25L)); - assertThat(nested.getAggregations().asList().isEmpty(), is(false)); - - Terms usernames = nested.getAggregations().get("field2"); - assertThat(usernames, notNullValue()); - assertThat(usernames.getBuckets().size(), equalTo(9)); - List usernameBuckets = new ArrayList<>(usernames.getBuckets()); - - // nested.field2: 1 - Terms.Bucket bucket = usernameBuckets.get(0); - assertThat(bucket.getKeyAsString(), equalTo("1")); - assertThat(bucket.getDocCount(), equalTo(6L)); - ReverseNested reverseNested = bucket.getAggregations().get("nested1_to_field1"); - assertThat(((InternalAggregation) reverseNested).getProperty("_count"), equalTo(5L)); - Terms tags = reverseNested.getAggregations().get("field1"); - assertThat(((InternalAggregation) reverseNested).getProperty("field1"), sameInstance(tags)); - List tagsBuckets = new ArrayList<>(tags.getBuckets()); - assertThat(tagsBuckets.size(), equalTo(6)); - assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("c")); - assertThat(tagsBuckets.get(0).getDocCount(), equalTo(4L)); - assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("a")); - assertThat(tagsBuckets.get(1).getDocCount(), equalTo(3L)); - assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("e")); - assertThat(tagsBuckets.get(2).getDocCount(), equalTo(2L)); - assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("b")); - assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(4).getKeyAsString(), equalTo("d")); - assertThat(tagsBuckets.get(4).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(5).getKeyAsString(), equalTo("x")); - assertThat(tagsBuckets.get(5).getDocCount(), equalTo(1L)); - - // nested.field2: 4 - bucket = usernameBuckets.get(1); - assertThat(bucket.getKeyAsString(), equalTo("4")); - assertThat(bucket.getDocCount(), equalTo(4L)); - reverseNested = bucket.getAggregations().get("nested1_to_field1"); - tags = reverseNested.getAggregations().get("field1"); - tagsBuckets = new ArrayList<>(tags.getBuckets()); - assertThat(tagsBuckets.size(), equalTo(5)); - assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a")); - assertThat(tagsBuckets.get(0).getDocCount(), equalTo(3L)); - assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b")); - assertThat(tagsBuckets.get(1).getDocCount(), equalTo(2L)); - assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("c")); - assertThat(tagsBuckets.get(2).getDocCount(), equalTo(2L)); - assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("d")); - assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(4).getKeyAsString(), equalTo("e")); - assertThat(tagsBuckets.get(4).getDocCount(), equalTo(1L)); - - // nested.field2: 7 - bucket = usernameBuckets.get(2); - assertThat(bucket.getKeyAsString(), equalTo("7")); - assertThat(bucket.getDocCount(), equalTo(3L)); - reverseNested = bucket.getAggregations().get("nested1_to_field1"); - tags = reverseNested.getAggregations().get("field1"); - tagsBuckets = new ArrayList<>(tags.getBuckets()); - assertThat(tagsBuckets.size(), equalTo(5)); - assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("c")); - assertThat(tagsBuckets.get(0).getDocCount(), equalTo(2L)); - assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("d")); - assertThat(tagsBuckets.get(1).getDocCount(), equalTo(2L)); - assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("e")); - assertThat(tagsBuckets.get(2).getDocCount(), equalTo(2L)); - assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("a")); - assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(4).getKeyAsString(), equalTo("b")); - assertThat(tagsBuckets.get(4).getDocCount(), equalTo(1L)); - - // nested.field2: 2 - bucket = usernameBuckets.get(3); - assertThat(bucket.getKeyAsString(), equalTo("2")); - assertThat(bucket.getDocCount(), equalTo(2L)); - reverseNested = bucket.getAggregations().get("nested1_to_field1"); - tags = reverseNested.getAggregations().get("field1"); - tagsBuckets = new ArrayList<>(tags.getBuckets()); - assertThat(tagsBuckets.size(), equalTo(3)); - assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a")); - assertThat(tagsBuckets.get(0).getDocCount(), equalTo(2L)); - assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("c")); - assertThat(tagsBuckets.get(1).getDocCount(), equalTo(2L)); - assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("b")); - assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); - - // nested.field2: 3 - bucket = usernameBuckets.get(4); - assertThat(bucket.getKeyAsString(), equalTo("3")); - assertThat(bucket.getDocCount(), equalTo(2L)); - reverseNested = bucket.getAggregations().get("nested1_to_field1"); - tags = reverseNested.getAggregations().get("field1"); - tagsBuckets = new ArrayList<>(tags.getBuckets()); - assertThat(tagsBuckets.size(), equalTo(3)); - assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a")); - assertThat(tagsBuckets.get(0).getDocCount(), equalTo(2L)); - assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b")); - assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("c")); - assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); - - // nested.field2: 5 - bucket = usernameBuckets.get(5); - assertThat(bucket.getKeyAsString(), equalTo("5")); - assertThat(bucket.getDocCount(), equalTo(2L)); - reverseNested = bucket.getAggregations().get("nested1_to_field1"); - tags = reverseNested.getAggregations().get("field1"); - tagsBuckets = new ArrayList<>(tags.getBuckets()); - assertThat(tagsBuckets.size(), equalTo(4)); - assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("b")); - assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("c")); - assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("d")); - assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("z")); - assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); - - // nested.field2: 6 - bucket = usernameBuckets.get(6); - assertThat(bucket.getKeyAsString(), equalTo("6")); - assertThat(bucket.getDocCount(), equalTo(2L)); - reverseNested = bucket.getAggregations().get("nested1_to_field1"); - tags = reverseNested.getAggregations().get("field1"); - tagsBuckets = new ArrayList<>(tags.getBuckets()); - assertThat(tagsBuckets.size(), equalTo(4)); - assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("c")); - assertThat(tagsBuckets.get(0).getDocCount(), equalTo(2L)); - assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b")); - assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("d")); - assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("y")); - assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); - - // nested.field2: 8 - bucket = usernameBuckets.get(7); - assertThat(bucket.getKeyAsString(), equalTo("8")); - assertThat(bucket.getDocCount(), equalTo(2L)); - reverseNested = bucket.getAggregations().get("nested1_to_field1"); - tags = reverseNested.getAggregations().get("field1"); - tagsBuckets = new ArrayList<>(tags.getBuckets()); - assertThat(tagsBuckets.size(), equalTo(4)); - assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("c")); - assertThat(tagsBuckets.get(0).getDocCount(), equalTo(2L)); - assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("d")); - assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("e")); - assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("x")); - assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); - - // nested.field2: 9 - bucket = usernameBuckets.get(8); - assertThat(bucket.getKeyAsString(), equalTo("9")); - assertThat(bucket.getDocCount(), equalTo(2L)); - reverseNested = bucket.getAggregations().get("nested1_to_field1"); - tags = reverseNested.getAggregations().get("field1"); - tagsBuckets = new ArrayList<>(tags.getBuckets()); - assertThat(tagsBuckets.size(), equalTo(4)); - assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("c")); - assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("d")); - assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("e")); - assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("z")); - assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); + ) + ), + response -> { + Nested nested = response.getAggregations().get("nested1"); + assertThat(nested, notNullValue()); + assertThat(nested.getName(), equalTo("nested1")); + assertThat(nested.getDocCount(), equalTo(25L)); + assertThat(nested.getAggregations().asList().isEmpty(), is(false)); + + Terms usernames = nested.getAggregations().get("field2"); + assertThat(usernames, notNullValue()); + assertThat(usernames.getBuckets().size(), equalTo(9)); + List usernameBuckets = new ArrayList<>(usernames.getBuckets()); + + // nested.field2: 1 + Terms.Bucket bucket = usernameBuckets.get(0); + assertThat(bucket.getKeyAsString(), equalTo("1")); + assertThat(bucket.getDocCount(), equalTo(6L)); + ReverseNested reverseNested = bucket.getAggregations().get("nested1_to_field1"); + assertThat(((InternalAggregation) reverseNested).getProperty("_count"), equalTo(5L)); + Terms tags = reverseNested.getAggregations().get("field1"); + assertThat(((InternalAggregation) reverseNested).getProperty("field1"), sameInstance(tags)); + List tagsBuckets = new ArrayList<>(tags.getBuckets()); + assertThat(tagsBuckets.size(), equalTo(6)); + assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("c")); + assertThat(tagsBuckets.get(0).getDocCount(), equalTo(4L)); + assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("a")); + assertThat(tagsBuckets.get(1).getDocCount(), equalTo(3L)); + assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("e")); + assertThat(tagsBuckets.get(2).getDocCount(), equalTo(2L)); + assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("b")); + assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(4).getKeyAsString(), equalTo("d")); + assertThat(tagsBuckets.get(4).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(5).getKeyAsString(), equalTo("x")); + assertThat(tagsBuckets.get(5).getDocCount(), equalTo(1L)); + + // nested.field2: 4 + bucket = usernameBuckets.get(1); + assertThat(bucket.getKeyAsString(), equalTo("4")); + assertThat(bucket.getDocCount(), equalTo(4L)); + reverseNested = bucket.getAggregations().get("nested1_to_field1"); + tags = reverseNested.getAggregations().get("field1"); + tagsBuckets = new ArrayList<>(tags.getBuckets()); + assertThat(tagsBuckets.size(), equalTo(5)); + assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a")); + assertThat(tagsBuckets.get(0).getDocCount(), equalTo(3L)); + assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b")); + assertThat(tagsBuckets.get(1).getDocCount(), equalTo(2L)); + assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("c")); + assertThat(tagsBuckets.get(2).getDocCount(), equalTo(2L)); + assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("d")); + assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(4).getKeyAsString(), equalTo("e")); + assertThat(tagsBuckets.get(4).getDocCount(), equalTo(1L)); + + // nested.field2: 7 + bucket = usernameBuckets.get(2); + assertThat(bucket.getKeyAsString(), equalTo("7")); + assertThat(bucket.getDocCount(), equalTo(3L)); + reverseNested = bucket.getAggregations().get("nested1_to_field1"); + tags = reverseNested.getAggregations().get("field1"); + tagsBuckets = new ArrayList<>(tags.getBuckets()); + assertThat(tagsBuckets.size(), equalTo(5)); + assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("c")); + assertThat(tagsBuckets.get(0).getDocCount(), equalTo(2L)); + assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("d")); + assertThat(tagsBuckets.get(1).getDocCount(), equalTo(2L)); + assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("e")); + assertThat(tagsBuckets.get(2).getDocCount(), equalTo(2L)); + assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("a")); + assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(4).getKeyAsString(), equalTo("b")); + assertThat(tagsBuckets.get(4).getDocCount(), equalTo(1L)); + + // nested.field2: 2 + bucket = usernameBuckets.get(3); + assertThat(bucket.getKeyAsString(), equalTo("2")); + assertThat(bucket.getDocCount(), equalTo(2L)); + reverseNested = bucket.getAggregations().get("nested1_to_field1"); + tags = reverseNested.getAggregations().get("field1"); + tagsBuckets = new ArrayList<>(tags.getBuckets()); + assertThat(tagsBuckets.size(), equalTo(3)); + assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a")); + assertThat(tagsBuckets.get(0).getDocCount(), equalTo(2L)); + assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("c")); + assertThat(tagsBuckets.get(1).getDocCount(), equalTo(2L)); + assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("b")); + assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); + + // nested.field2: 3 + bucket = usernameBuckets.get(4); + assertThat(bucket.getKeyAsString(), equalTo("3")); + assertThat(bucket.getDocCount(), equalTo(2L)); + reverseNested = bucket.getAggregations().get("nested1_to_field1"); + tags = reverseNested.getAggregations().get("field1"); + tagsBuckets = new ArrayList<>(tags.getBuckets()); + assertThat(tagsBuckets.size(), equalTo(3)); + assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a")); + assertThat(tagsBuckets.get(0).getDocCount(), equalTo(2L)); + assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b")); + assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("c")); + assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); + + // nested.field2: 5 + bucket = usernameBuckets.get(5); + assertThat(bucket.getKeyAsString(), equalTo("5")); + assertThat(bucket.getDocCount(), equalTo(2L)); + reverseNested = bucket.getAggregations().get("nested1_to_field1"); + tags = reverseNested.getAggregations().get("field1"); + tagsBuckets = new ArrayList<>(tags.getBuckets()); + assertThat(tagsBuckets.size(), equalTo(4)); + assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("b")); + assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("c")); + assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("d")); + assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("z")); + assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); + + // nested.field2: 6 + bucket = usernameBuckets.get(6); + assertThat(bucket.getKeyAsString(), equalTo("6")); + assertThat(bucket.getDocCount(), equalTo(2L)); + reverseNested = bucket.getAggregations().get("nested1_to_field1"); + tags = reverseNested.getAggregations().get("field1"); + tagsBuckets = new ArrayList<>(tags.getBuckets()); + assertThat(tagsBuckets.size(), equalTo(4)); + assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("c")); + assertThat(tagsBuckets.get(0).getDocCount(), equalTo(2L)); + assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b")); + assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("d")); + assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("y")); + assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); + + // nested.field2: 8 + bucket = usernameBuckets.get(7); + assertThat(bucket.getKeyAsString(), equalTo("8")); + assertThat(bucket.getDocCount(), equalTo(2L)); + reverseNested = bucket.getAggregations().get("nested1_to_field1"); + tags = reverseNested.getAggregations().get("field1"); + tagsBuckets = new ArrayList<>(tags.getBuckets()); + assertThat(tagsBuckets.size(), equalTo(4)); + assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("c")); + assertThat(tagsBuckets.get(0).getDocCount(), equalTo(2L)); + assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("d")); + assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("e")); + assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("x")); + assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); + + // nested.field2: 9 + bucket = usernameBuckets.get(8); + assertThat(bucket.getKeyAsString(), equalTo("9")); + assertThat(bucket.getDocCount(), equalTo(2L)); + reverseNested = bucket.getAggregations().get("nested1_to_field1"); + tags = reverseNested.getAggregations().get("field1"); + tagsBuckets = new ArrayList<>(tags.getBuckets()); + assertThat(tagsBuckets.size(), equalTo(4)); + assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("c")); + assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("d")); + assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("e")); + assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("z")); + assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); + } + ); } public void testSimpleNested1ToRootToNested2() throws Exception { - SearchResponse response = prepareSearch("idx2").addAggregation( - nested("nested1", "nested1").subAggregation( - reverseNested("nested1_to_root").subAggregation(nested("root_to_nested2", "nested1.nested2")) - ) - ).get(); - - assertNoFailures(response); - Nested nested = response.getAggregations().get("nested1"); - assertThat(nested.getName(), equalTo("nested1")); - assertThat(nested.getDocCount(), equalTo(9L)); - ReverseNested reverseNested = nested.getAggregations().get("nested1_to_root"); - assertThat(reverseNested.getName(), equalTo("nested1_to_root")); - assertThat(reverseNested.getDocCount(), equalTo(4L)); - nested = reverseNested.getAggregations().get("root_to_nested2"); - assertThat(nested.getName(), equalTo("root_to_nested2")); - assertThat(nested.getDocCount(), equalTo(27L)); + assertNoFailuresAndResponse( + prepareSearch("idx2").addAggregation( + nested("nested1", "nested1").subAggregation( + reverseNested("nested1_to_root").subAggregation(nested("root_to_nested2", "nested1.nested2")) + ) + ), + response -> { + Nested nested = response.getAggregations().get("nested1"); + assertThat(nested.getName(), equalTo("nested1")); + assertThat(nested.getDocCount(), equalTo(9L)); + ReverseNested reverseNested = nested.getAggregations().get("nested1_to_root"); + assertThat(reverseNested.getName(), equalTo("nested1_to_root")); + assertThat(reverseNested.getDocCount(), equalTo(4L)); + nested = reverseNested.getAggregations().get("root_to_nested2"); + assertThat(nested.getName(), equalTo("root_to_nested2")); + assertThat(nested.getDocCount(), equalTo(27L)); + } + ); } public void testSimpleReverseNestedToNested1() throws Exception { - SearchResponse response = prepareSearch("idx2").addAggregation( - nested("nested1", "nested1.nested2").subAggregation( - terms("field2").field("nested1.nested2.field2") - .order(BucketOrder.key(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .size(10000) - .subAggregation( - reverseNested("nested1_to_field1").path("nested1") - .subAggregation( - terms("field1").field("nested1.field1") - .order(BucketOrder.key(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ) - ) - ) - ).get(); - - assertNoFailures(response); - - Nested nested = response.getAggregations().get("nested1"); - assertThat(nested, notNullValue()); - assertThat(nested.getName(), equalTo("nested1")); - assertThat(nested.getDocCount(), equalTo(27L)); - assertThat(nested.getAggregations().asList().isEmpty(), is(false)); - - Terms usernames = nested.getAggregations().get("field2"); - assertThat(usernames, notNullValue()); - assertThat(usernames.getBuckets().size(), equalTo(5)); - List usernameBuckets = new ArrayList<>(usernames.getBuckets()); - - Terms.Bucket bucket = usernameBuckets.get(0); - assertThat(bucket.getKeyAsString(), equalTo("0")); - assertThat(bucket.getDocCount(), equalTo(12L)); - ReverseNested reverseNested = bucket.getAggregations().get("nested1_to_field1"); - assertThat(reverseNested.getDocCount(), equalTo(5L)); - Terms tags = reverseNested.getAggregations().get("field1"); - List tagsBuckets = new ArrayList<>(tags.getBuckets()); - assertThat(tagsBuckets.size(), equalTo(2)); - assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a")); - assertThat(tagsBuckets.get(0).getDocCount(), equalTo(3L)); - assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b")); - assertThat(tagsBuckets.get(1).getDocCount(), equalTo(2L)); - - bucket = usernameBuckets.get(1); - assertThat(bucket.getKeyAsString(), equalTo("1")); - assertThat(bucket.getDocCount(), equalTo(6L)); - reverseNested = bucket.getAggregations().get("nested1_to_field1"); - assertThat(reverseNested.getDocCount(), equalTo(4L)); - tags = reverseNested.getAggregations().get("field1"); - tagsBuckets = new ArrayList<>(tags.getBuckets()); - assertThat(tagsBuckets.size(), equalTo(4)); - assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a")); - assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b")); - assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("c")); - assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("e")); - assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); - - bucket = usernameBuckets.get(2); - assertThat(bucket.getKeyAsString(), equalTo("2")); - assertThat(bucket.getDocCount(), equalTo(5L)); - reverseNested = bucket.getAggregations().get("nested1_to_field1"); - assertThat(reverseNested.getDocCount(), equalTo(4L)); - tags = reverseNested.getAggregations().get("field1"); - tagsBuckets = new ArrayList<>(tags.getBuckets()); - assertThat(tagsBuckets.size(), equalTo(4)); - assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a")); - assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b")); - assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("c")); - assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("e")); - assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); - - bucket = usernameBuckets.get(3); - assertThat(bucket.getKeyAsString(), equalTo("3")); - assertThat(bucket.getDocCount(), equalTo(2L)); - reverseNested = bucket.getAggregations().get("nested1_to_field1"); - assertThat(reverseNested.getDocCount(), equalTo(2L)); - tags = reverseNested.getAggregations().get("field1"); - tagsBuckets = new ArrayList<>(tags.getBuckets()); - assertThat(tagsBuckets.size(), equalTo(2)); - assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("d")); - assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("f")); - - bucket = usernameBuckets.get(4); - assertThat(bucket.getKeyAsString(), equalTo("4")); - assertThat(bucket.getDocCount(), equalTo(2L)); - reverseNested = bucket.getAggregations().get("nested1_to_field1"); - assertThat(reverseNested.getDocCount(), equalTo(2L)); - tags = reverseNested.getAggregations().get("field1"); - tagsBuckets = new ArrayList<>(tags.getBuckets()); - assertThat(tagsBuckets.size(), equalTo(2)); - assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("d")); - assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("f")); + assertNoFailuresAndResponse( + prepareSearch("idx2").addAggregation( + nested("nested1", "nested1.nested2").subAggregation( + terms("field2").field("nested1.nested2.field2") + .order(BucketOrder.key(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .size(10000) + .subAggregation( + reverseNested("nested1_to_field1").path("nested1") + .subAggregation( + terms("field1").field("nested1.field1") + .order(BucketOrder.key(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + ) + ) + ), + response -> { + Nested nested = response.getAggregations().get("nested1"); + assertThat(nested, notNullValue()); + assertThat(nested.getName(), equalTo("nested1")); + assertThat(nested.getDocCount(), equalTo(27L)); + assertThat(nested.getAggregations().asList().isEmpty(), is(false)); + + Terms usernames = nested.getAggregations().get("field2"); + assertThat(usernames, notNullValue()); + assertThat(usernames.getBuckets().size(), equalTo(5)); + List usernameBuckets = new ArrayList<>(usernames.getBuckets()); + + Terms.Bucket bucket = usernameBuckets.get(0); + assertThat(bucket.getKeyAsString(), equalTo("0")); + assertThat(bucket.getDocCount(), equalTo(12L)); + ReverseNested reverseNested = bucket.getAggregations().get("nested1_to_field1"); + assertThat(reverseNested.getDocCount(), equalTo(5L)); + Terms tags = reverseNested.getAggregations().get("field1"); + List tagsBuckets = new ArrayList<>(tags.getBuckets()); + assertThat(tagsBuckets.size(), equalTo(2)); + assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a")); + assertThat(tagsBuckets.get(0).getDocCount(), equalTo(3L)); + assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b")); + assertThat(tagsBuckets.get(1).getDocCount(), equalTo(2L)); + + bucket = usernameBuckets.get(1); + assertThat(bucket.getKeyAsString(), equalTo("1")); + assertThat(bucket.getDocCount(), equalTo(6L)); + reverseNested = bucket.getAggregations().get("nested1_to_field1"); + assertThat(reverseNested.getDocCount(), equalTo(4L)); + tags = reverseNested.getAggregations().get("field1"); + tagsBuckets = new ArrayList<>(tags.getBuckets()); + assertThat(tagsBuckets.size(), equalTo(4)); + assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a")); + assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b")); + assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("c")); + assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("e")); + assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); + + bucket = usernameBuckets.get(2); + assertThat(bucket.getKeyAsString(), equalTo("2")); + assertThat(bucket.getDocCount(), equalTo(5L)); + reverseNested = bucket.getAggregations().get("nested1_to_field1"); + assertThat(reverseNested.getDocCount(), equalTo(4L)); + tags = reverseNested.getAggregations().get("field1"); + tagsBuckets = new ArrayList<>(tags.getBuckets()); + assertThat(tagsBuckets.size(), equalTo(4)); + assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a")); + assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b")); + assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("c")); + assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("e")); + assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); + + bucket = usernameBuckets.get(3); + assertThat(bucket.getKeyAsString(), equalTo("3")); + assertThat(bucket.getDocCount(), equalTo(2L)); + reverseNested = bucket.getAggregations().get("nested1_to_field1"); + assertThat(reverseNested.getDocCount(), equalTo(2L)); + tags = reverseNested.getAggregations().get("field1"); + tagsBuckets = new ArrayList<>(tags.getBuckets()); + assertThat(tagsBuckets.size(), equalTo(2)); + assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("d")); + assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("f")); + + bucket = usernameBuckets.get(4); + assertThat(bucket.getKeyAsString(), equalTo("4")); + assertThat(bucket.getDocCount(), equalTo(2L)); + reverseNested = bucket.getAggregations().get("nested1_to_field1"); + assertThat(reverseNested.getDocCount(), equalTo(2L)); + tags = reverseNested.getAggregations().get("field1"); + tagsBuckets = new ArrayList<>(tags.getBuckets()); + assertThat(tagsBuckets.size(), equalTo(2)); + assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("d")); + assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("f")); + } + ); } public void testReverseNestedAggWithoutNestedAgg() { @@ -467,26 +470,32 @@ public void testReverseNestedAggWithoutNestedAgg() { } public void testNonExistingNestedField() throws Exception { - SearchResponse searchResponse = prepareSearch("idx2").setQuery(matchAllQuery()) - .addAggregation(nested("nested2", "nested1.nested2").subAggregation(reverseNested("incorrect").path("nested3"))) - .get(); + assertNoFailuresAndResponse( + prepareSearch("idx2").setQuery(matchAllQuery()) + .addAggregation(nested("nested2", "nested1.nested2").subAggregation(reverseNested("incorrect").path("nested3"))), + response -> { - Nested nested = searchResponse.getAggregations().get("nested2"); - assertThat(nested, notNullValue()); - assertThat(nested.getName(), equalTo("nested2")); + Nested nested = response.getAggregations().get("nested2"); + assertThat(nested, notNullValue()); + assertThat(nested.getName(), equalTo("nested2")); - ReverseNested reverseNested = nested.getAggregations().get("incorrect"); - assertThat(reverseNested.getDocCount(), is(0L)); + ReverseNested reverseNested = nested.getAggregations().get("incorrect"); + assertThat(reverseNested.getDocCount(), is(0L)); + } + ); // Test that parsing the reverse_nested agg doesn't fail, because the parent nested agg is unmapped: - searchResponse = prepareSearch("idx1").setQuery(matchAllQuery()) - .addAggregation(nested("incorrect1", "incorrect1").subAggregation(reverseNested("incorrect2").path("incorrect2"))) - .get(); - - nested = searchResponse.getAggregations().get("incorrect1"); - assertThat(nested, notNullValue()); - assertThat(nested.getName(), equalTo("incorrect1")); - assertThat(nested.getDocCount(), is(0L)); + assertNoFailuresAndResponse( + prepareSearch("idx1").setQuery(matchAllQuery()) + .addAggregation(nested("incorrect1", "incorrect1").subAggregation(reverseNested("incorrect2").path("incorrect2"))), + response -> { + + Nested nested = response.getAggregations().get("incorrect1"); + assertThat(nested, notNullValue()); + assertThat(nested.getName(), equalTo("incorrect1")); + assertThat(nested.getDocCount(), is(0L)); + } + ); } public void testSameParentDocHavingMultipleBuckets() throws Exception { @@ -603,110 +612,117 @@ public void testSameParentDocHavingMultipleBuckets() throws Exception { ) .get(); - SearchResponse response = prepareSearch("idx3").addAggregation( - nested("nested_0", "category").subAggregation( - terms("group_by_category").field("category.name") - .subAggregation( - reverseNested("to_root").subAggregation( - nested("nested_1", "sku").subAggregation( - filter("filter_by_sku", termQuery("sku.sku_type", "bar1")).subAggregation( - count("sku_count").field("sku.sku_type") + assertNoFailuresAndResponse( + prepareSearch("idx3").addAggregation( + nested("nested_0", "category").subAggregation( + terms("group_by_category").field("category.name") + .subAggregation( + reverseNested("to_root").subAggregation( + nested("nested_1", "sku").subAggregation( + filter("filter_by_sku", termQuery("sku.sku_type", "bar1")).subAggregation( + count("sku_count").field("sku.sku_type") + ) ) ) ) - ) - ) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - - Nested nested0 = response.getAggregations().get("nested_0"); - assertThat(nested0.getDocCount(), equalTo(3L)); - Terms terms = nested0.getAggregations().get("group_by_category"); - assertThat(terms.getBuckets().size(), equalTo(3)); - for (String bucketName : new String[] { "abc", "klm", "xyz" }) { - logger.info("Checking results for bucket {}", bucketName); - Terms.Bucket bucket = terms.getBucketByKey(bucketName); - assertThat(bucket.getDocCount(), equalTo(1L)); - ReverseNested toRoot = bucket.getAggregations().get("to_root"); - assertThat(toRoot.getDocCount(), equalTo(1L)); - Nested nested1 = toRoot.getAggregations().get("nested_1"); - assertThat(nested1.getDocCount(), equalTo(5L)); - Filter filterByBar = nested1.getAggregations().get("filter_by_sku"); - assertThat(filterByBar.getDocCount(), equalTo(3L)); - ValueCount barCount = filterByBar.getAggregations().get("sku_count"); - assertThat(barCount.getValue(), equalTo(3L)); - } + ) + ), + response -> { + assertHitCount(response, 1); + + Nested nested0 = response.getAggregations().get("nested_0"); + assertThat(nested0.getDocCount(), equalTo(3L)); + Terms terms = nested0.getAggregations().get("group_by_category"); + assertThat(terms.getBuckets().size(), equalTo(3)); + for (String bucketName : new String[] { "abc", "klm", "xyz" }) { + logger.info("Checking results for bucket {}", bucketName); + Terms.Bucket bucket = terms.getBucketByKey(bucketName); + assertThat(bucket.getDocCount(), equalTo(1L)); + ReverseNested toRoot = bucket.getAggregations().get("to_root"); + assertThat(toRoot.getDocCount(), equalTo(1L)); + Nested nested1 = toRoot.getAggregations().get("nested_1"); + assertThat(nested1.getDocCount(), equalTo(5L)); + Filter filterByBar = nested1.getAggregations().get("filter_by_sku"); + assertThat(filterByBar.getDocCount(), equalTo(3L)); + ValueCount barCount = filterByBar.getAggregations().get("sku_count"); + assertThat(barCount.getValue(), equalTo(3L)); + } + } + ); - response = prepareSearch("idx3").addAggregation( - nested("nested_0", "category").subAggregation( - terms("group_by_category").field("category.name") - .subAggregation( - reverseNested("to_root").subAggregation( - nested("nested_1", "sku").subAggregation( - filter("filter_by_sku", termQuery("sku.sku_type", "bar1")).subAggregation( - nested("nested_2", "sku.colors").subAggregation( - filter("filter_sku_color", termQuery("sku.colors.name", "red")).subAggregation( - reverseNested("reverse_to_sku").path("sku") - .subAggregation(count("sku_count").field("sku.sku_type")) + assertNoFailuresAndResponse( + prepareSearch("idx3").addAggregation( + nested("nested_0", "category").subAggregation( + terms("group_by_category").field("category.name") + .subAggregation( + reverseNested("to_root").subAggregation( + nested("nested_1", "sku").subAggregation( + filter("filter_by_sku", termQuery("sku.sku_type", "bar1")).subAggregation( + nested("nested_2", "sku.colors").subAggregation( + filter("filter_sku_color", termQuery("sku.colors.name", "red")).subAggregation( + reverseNested("reverse_to_sku").path("sku") + .subAggregation(count("sku_count").field("sku.sku_type")) + ) ) ) ) ) ) - ) - ) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - - nested0 = response.getAggregations().get("nested_0"); - assertThat(nested0.getDocCount(), equalTo(3L)); - terms = nested0.getAggregations().get("group_by_category"); - assertThat(terms.getBuckets().size(), equalTo(3)); - for (String bucketName : new String[] { "abc", "klm", "xyz" }) { - logger.info("Checking results for bucket {}", bucketName); - Terms.Bucket bucket = terms.getBucketByKey(bucketName); - assertThat(bucket.getDocCount(), equalTo(1L)); - ReverseNested toRoot = bucket.getAggregations().get("to_root"); - assertThat(toRoot.getDocCount(), equalTo(1L)); - Nested nested1 = toRoot.getAggregations().get("nested_1"); - assertThat(nested1.getDocCount(), equalTo(5L)); - Filter filterByBar = nested1.getAggregations().get("filter_by_sku"); - assertThat(filterByBar.getDocCount(), equalTo(3L)); - Nested nested2 = filterByBar.getAggregations().get("nested_2"); - assertThat(nested2.getDocCount(), equalTo(8L)); - Filter filterBarColor = nested2.getAggregations().get("filter_sku_color"); - assertThat(filterBarColor.getDocCount(), equalTo(2L)); - ReverseNested reverseToBar = filterBarColor.getAggregations().get("reverse_to_sku"); - assertThat(reverseToBar.getDocCount(), equalTo(2L)); - ValueCount barCount = reverseToBar.getAggregations().get("sku_count"); - assertThat(barCount.getValue(), equalTo(2L)); - } + ) + ), + response -> { + assertHitCount(response, 1); + + Nested nested0 = response.getAggregations().get("nested_0"); + assertThat(nested0.getDocCount(), equalTo(3L)); + Terms terms = nested0.getAggregations().get("group_by_category"); + assertThat(terms.getBuckets().size(), equalTo(3)); + for (String bucketName : new String[] { "abc", "klm", "xyz" }) { + logger.info("Checking results for bucket {}", bucketName); + Terms.Bucket bucket = terms.getBucketByKey(bucketName); + assertThat(bucket.getDocCount(), equalTo(1L)); + ReverseNested toRoot = bucket.getAggregations().get("to_root"); + assertThat(toRoot.getDocCount(), equalTo(1L)); + Nested nested1 = toRoot.getAggregations().get("nested_1"); + assertThat(nested1.getDocCount(), equalTo(5L)); + Filter filterByBar = nested1.getAggregations().get("filter_by_sku"); + assertThat(filterByBar.getDocCount(), equalTo(3L)); + Nested nested2 = filterByBar.getAggregations().get("nested_2"); + assertThat(nested2.getDocCount(), equalTo(8L)); + Filter filterBarColor = nested2.getAggregations().get("filter_sku_color"); + assertThat(filterBarColor.getDocCount(), equalTo(2L)); + ReverseNested reverseToBar = filterBarColor.getAggregations().get("reverse_to_sku"); + assertThat(reverseToBar.getDocCount(), equalTo(2L)); + ValueCount barCount = reverseToBar.getAggregations().get("sku_count"); + assertThat(barCount.getValue(), equalTo(2L)); + } + } + ); } public void testFieldAlias() { - SearchResponse response = prepareSearch("idx1").addAggregation( - nested("nested1", "nested1").subAggregation( - terms("field2").field("nested1.field2") - .subAggregation( - reverseNested("nested1_to_field1").subAggregation( - terms("field1").field("alias").collectMode(randomFrom(SubAggCollectionMode.values())) + assertNoFailuresAndResponse( + prepareSearch("idx1").addAggregation( + nested("nested1", "nested1").subAggregation( + terms("field2").field("nested1.field2") + .subAggregation( + reverseNested("nested1_to_field1").subAggregation( + terms("field1").field("alias").collectMode(randomFrom(SubAggCollectionMode.values())) + ) ) - ) - ) - ).get(); - - assertNoFailures(response); - - Nested nested = response.getAggregations().get("nested1"); - Terms nestedTerms = nested.getAggregations().get("field2"); - Terms.Bucket bucket = nestedTerms.getBuckets().iterator().next(); - - ReverseNested reverseNested = bucket.getAggregations().get("nested1_to_field1"); - Terms reverseNestedTerms = reverseNested.getAggregations().get("field1"); - - assertThat(((InternalAggregation) reverseNested).getProperty("field1"), sameInstance(reverseNestedTerms)); - assertThat(reverseNestedTerms.getBuckets().size(), equalTo(6)); + ) + ), + response -> { + Nested nested = response.getAggregations().get("nested1"); + Terms nestedTerms = nested.getAggregations().get("field2"); + Terms.Bucket bucket = nestedTerms.getBuckets().iterator().next(); + + ReverseNested reverseNested = bucket.getAggregations().get("nested1_to_field1"); + Terms reverseNestedTerms = reverseNested.getAggregations().get("field1"); + + assertThat(((InternalAggregation) reverseNested).getProperty("field1"), sameInstance(reverseNestedTerms)); + assertThat(reverseNestedTerms.getBuckets().size(), equalTo(6)); + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java index f6d7d37a29136..c367752cc0460 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.search.aggregations.BucketOrder; @@ -27,6 +26,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -91,96 +91,103 @@ public void testIssue10719() throws Exception { // Tests that we can refer to nested elements under a sample in a path // statement boolean asc = randomBoolean(); - SearchResponse response = prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) - .addAggregation( - terms("genres").field("genre") - .order(BucketOrder.aggregation("sample>max_price.value", asc)) - .subAggregation(sampler("sample").shardSize(100).subAggregation(max("max_price").field("price"))) - ) - .get(); - assertNoFailures(response); - Terms genres = response.getAggregations().get("genres"); - List genreBuckets = genres.getBuckets(); - // For this test to be useful we need >1 genre bucket to compare - assertThat(genreBuckets.size(), greaterThan(1)); - double lastMaxPrice = asc ? Double.MIN_VALUE : Double.MAX_VALUE; - for (Terms.Bucket genreBucket : genres.getBuckets()) { - Sampler sample = genreBucket.getAggregations().get("sample"); - Max maxPriceInGenre = sample.getAggregations().get("max_price"); - double price = maxPriceInGenre.value(); - if (asc) { - assertThat(price, greaterThanOrEqualTo(lastMaxPrice)); - } else { - assertThat(price, lessThanOrEqualTo(lastMaxPrice)); + assertNoFailuresAndResponse( + prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) + .addAggregation( + terms("genres").field("genre") + .order(BucketOrder.aggregation("sample>max_price.value", asc)) + .subAggregation(sampler("sample").shardSize(100).subAggregation(max("max_price").field("price"))) + ), + response -> { + Terms genres = response.getAggregations().get("genres"); + List genreBuckets = genres.getBuckets(); + // For this test to be useful we need >1 genre bucket to compare + assertThat(genreBuckets.size(), greaterThan(1)); + double lastMaxPrice = asc ? Double.MIN_VALUE : Double.MAX_VALUE; + for (Terms.Bucket genreBucket : genres.getBuckets()) { + Sampler sample = genreBucket.getAggregations().get("sample"); + Max maxPriceInGenre = sample.getAggregations().get("max_price"); + double price = maxPriceInGenre.value(); + if (asc) { + assertThat(price, greaterThanOrEqualTo(lastMaxPrice)); + } else { + assertThat(price, lessThanOrEqualTo(lastMaxPrice)); + } + lastMaxPrice = price; + } } - lastMaxPrice = price; - } - + ); } public void testSimpleSampler() throws Exception { SamplerAggregationBuilder sampleAgg = sampler("sample").shardSize(100); sampleAgg.subAggregation(terms("authors").field("author")); - SearchResponse response = prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) - .setQuery(new TermQueryBuilder("genre", "fantasy")) - .setFrom(0) - .setSize(60) - .addAggregation(sampleAgg) - .get(); - assertNoFailures(response); - Sampler sample = response.getAggregations().get("sample"); - Terms authors = sample.getAggregations().get("authors"); - List testBuckets = authors.getBuckets(); - - long maxBooksPerAuthor = 0; - for (Terms.Bucket testBucket : testBuckets) { - maxBooksPerAuthor = Math.max(testBucket.getDocCount(), maxBooksPerAuthor); - } - assertThat(maxBooksPerAuthor, equalTo(3L)); + assertNoFailuresAndResponse( + prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) + .setQuery(new TermQueryBuilder("genre", "fantasy")) + .setFrom(0) + .setSize(60) + .addAggregation(sampleAgg), + response -> { + Sampler sample = response.getAggregations().get("sample"); + Terms authors = sample.getAggregations().get("authors"); + List testBuckets = authors.getBuckets(); + + long maxBooksPerAuthor = 0; + for (Terms.Bucket testBucket : testBuckets) { + maxBooksPerAuthor = Math.max(testBucket.getDocCount(), maxBooksPerAuthor); + } + assertThat(maxBooksPerAuthor, equalTo(3L)); + } + ); } public void testUnmappedChildAggNoDiversity() throws Exception { SamplerAggregationBuilder sampleAgg = sampler("sample").shardSize(100); sampleAgg.subAggregation(terms("authors").field("author")); - SearchResponse response = prepareSearch("idx_unmapped").setSearchType(SearchType.QUERY_THEN_FETCH) - .setQuery(new TermQueryBuilder("genre", "fantasy")) - .setFrom(0) - .setSize(60) - .addAggregation(sampleAgg) - .get(); - assertNoFailures(response); - Sampler sample = response.getAggregations().get("sample"); - assertThat(sample.getDocCount(), equalTo(0L)); - Terms authors = sample.getAggregations().get("authors"); - assertThat(authors.getBuckets().size(), equalTo(0)); + assertNoFailuresAndResponse( + prepareSearch("idx_unmapped").setSearchType(SearchType.QUERY_THEN_FETCH) + .setQuery(new TermQueryBuilder("genre", "fantasy")) + .setFrom(0) + .setSize(60) + .addAggregation(sampleAgg), + response -> { + Sampler sample = response.getAggregations().get("sample"); + assertThat(sample.getDocCount(), equalTo(0L)); + Terms authors = sample.getAggregations().get("authors"); + assertThat(authors.getBuckets().size(), equalTo(0)); + } + ); } public void testPartiallyUnmappedChildAggNoDiversity() throws Exception { SamplerAggregationBuilder sampleAgg = sampler("sample").shardSize(100); sampleAgg.subAggregation(terms("authors").field("author")); - SearchResponse response = prepareSearch("idx_unmapped", "test").setSearchType(SearchType.QUERY_THEN_FETCH) - .setQuery(new TermQueryBuilder("genre", "fantasy")) - .setFrom(0) - .setSize(60) - .setExplain(true) - .addAggregation(sampleAgg) - .get(); - assertNoFailures(response); - Sampler sample = response.getAggregations().get("sample"); - assertThat(sample.getDocCount(), greaterThan(0L)); - Terms authors = sample.getAggregations().get("authors"); - assertThat(authors.getBuckets().size(), greaterThan(0)); + assertNoFailuresAndResponse( + prepareSearch("idx_unmapped", "test").setSearchType(SearchType.QUERY_THEN_FETCH) + .setQuery(new TermQueryBuilder("genre", "fantasy")) + .setFrom(0) + .setSize(60) + .setExplain(true) + .addAggregation(sampleAgg), + response -> { + Sampler sample = response.getAggregations().get("sample"); + assertThat(sample.getDocCount(), greaterThan(0L)); + Terms authors = sample.getAggregations().get("authors"); + assertThat(authors.getBuckets().size(), greaterThan(0)); + } + ); } public void testRidiculousShardSizeSampler() throws Exception { SamplerAggregationBuilder sampleAgg = sampler("sample").shardSize(Integer.MAX_VALUE); sampleAgg.subAggregation(terms("authors").field("author")); - SearchResponse response = prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) - .setQuery(new TermQueryBuilder("genre", "fantasy")) - .setFrom(0) - .setSize(60) - .addAggregation(sampleAgg) - .get(); - assertNoFailures(response); + assertNoFailures( + prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) + .setQuery(new TermQueryBuilder("genre", "fantasy")) + .setFrom(0) + .setSize(60) + .addAggregation(sampleAgg) + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ShardReduceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ShardReduceIT.java index b0f9556bc842b..5b9aead6b9e05 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ShardReduceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ShardReduceIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.geometry.utils.Geohash; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; @@ -36,7 +35,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.range; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; @@ -87,246 +86,248 @@ public void setupSuiteScopeCluster() throws Exception { } public void testGlobal() throws Exception { - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) - .addAggregation( - global("global").subAggregation( - dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0) - ) - ) - .get(); - - assertNoFailures(response); - - Global global = response.getAggregations().get("global"); - Histogram histo = global.getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(4)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + global("global").subAggregation( + dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0) + ) + ), + response -> { + Global global = response.getAggregations().get("global"); + Histogram histo = global.getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + } + ); } public void testFilter() throws Exception { - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) - .addAggregation( - filter("filter", QueryBuilders.matchAllQuery()).subAggregation( - dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0) - ) - ) - .get(); - - assertNoFailures(response); - - Filter filter = response.getAggregations().get("filter"); - Histogram histo = filter.getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(4)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + filter("filter", QueryBuilders.matchAllQuery()).subAggregation( + dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0) + ) + ), + response -> { + Filter filter = response.getAggregations().get("filter"); + Histogram histo = filter.getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + } + ); } public void testMissing() throws Exception { - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) - .addAggregation( - missing("missing").field("foobar") - .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) - ) - .get(); - - assertNoFailures(response); - - Missing missing = response.getAggregations().get("missing"); - Histogram histo = missing.getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(4)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + missing("missing").field("foobar") + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ), + response -> { + Missing missing = response.getAggregations().get("missing"); + Histogram histo = missing.getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + } + ); } public void testGlobalWithFilterWithMissing() throws Exception { - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) - .addAggregation( - global("global").subAggregation( - filter("filter", QueryBuilders.matchAllQuery()).subAggregation( - missing("missing").field("foobar") - .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + global("global").subAggregation( + filter("filter", QueryBuilders.matchAllQuery()).subAggregation( + missing("missing").field("foobar") + .subAggregation( + dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0) + ) + ) ) - ) - ) - .get(); - - assertNoFailures(response); - - Global global = response.getAggregations().get("global"); - Filter filter = global.getAggregations().get("filter"); - Missing missing = filter.getAggregations().get("missing"); - Histogram histo = missing.getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(4)); + ), + response -> { + Global global = response.getAggregations().get("global"); + Filter filter = global.getAggregations().get("filter"); + Missing missing = filter.getAggregations().get("missing"); + Histogram histo = missing.getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + } + ); } public void testNested() throws Exception { - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) - .addAggregation( - nested("nested", "nested").subAggregation( - dateHistogram("histo").field("nested.date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0) - ) - ) - .get(); - - assertNoFailures(response); - - Nested nested = response.getAggregations().get("nested"); - Histogram histo = nested.getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(4)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + nested("nested", "nested").subAggregation( + dateHistogram("histo").field("nested.date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0) + ) + ), + response -> { + Nested nested = response.getAggregations().get("nested"); + Histogram histo = nested.getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + } + ); } public void testStringTerms() throws Exception { - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) - .addAggregation( - terms("terms").field("term-s") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) - ) - .get(); - - assertNoFailures(response); - - Terms terms = response.getAggregations().get("terms"); - Histogram histo = terms.getBucketByKey("term").getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(4)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + terms("terms").field("term-s") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ), + response -> { + Terms terms = response.getAggregations().get("terms"); + Histogram histo = terms.getBucketByKey("term").getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + } + ); } public void testLongTerms() throws Exception { - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) - .addAggregation( - terms("terms").field("term-l") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) - ) - .get(); - - assertNoFailures(response); - - Terms terms = response.getAggregations().get("terms"); - Histogram histo = terms.getBucketByKey("1").getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(4)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + terms("terms").field("term-l") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ), + response -> { + Terms terms = response.getAggregations().get("terms"); + Histogram histo = terms.getBucketByKey("1").getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + } + ); } public void testDoubleTerms() throws Exception { - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) - .addAggregation( - terms("terms").field("term-d") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) - ) - .get(); - - assertNoFailures(response); - - Terms terms = response.getAggregations().get("terms"); - Histogram histo = terms.getBucketByKey("1.5").getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(4)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + terms("terms").field("term-d") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ), + response -> { + Terms terms = response.getAggregations().get("terms"); + Histogram histo = terms.getBucketByKey("1.5").getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + } + ); } public void testRange() throws Exception { - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) - .addAggregation( - range("range").field("value") - .addRange("r1", 0, 10) - .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) - ) - .get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - Histogram histo = range.getBuckets().get(0).getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(4)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + range("range").field("value") + .addRange("r1", 0, 10) + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ), + response -> { + Range range = response.getAggregations().get("range"); + Histogram histo = range.getBuckets().get(0).getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + } + ); } public void testDateRange() throws Exception { - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) - .addAggregation( - dateRange("range").field("date") - .addRange("r1", "2014-01-01", "2014-01-10") - .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) - ) - .get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - Histogram histo = range.getBuckets().get(0).getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(4)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + dateRange("range").field("date") + .addRange("r1", "2014-01-01", "2014-01-10") + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ), + response -> { + Range range = response.getAggregations().get("range"); + Histogram histo = range.getBuckets().get(0).getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + } + ); } public void testIpRange() throws Exception { - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) - .addAggregation( - ipRange("range").field("ip") - .addRange("r1", "10.0.0.1", "10.0.0.10") - .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) - ) - .get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - Histogram histo = range.getBuckets().get(0).getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(4)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + ipRange("range").field("ip") + .addRange("r1", "10.0.0.1", "10.0.0.10") + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ), + response -> { + Range range = response.getAggregations().get("range"); + Histogram histo = range.getBuckets().get(0).getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + } + ); } public void testHistogram() throws Exception { - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) - .addAggregation( - histogram("topHisto").field("value") - .interval(5) - .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) - ) - .get(); - - assertNoFailures(response); - - Histogram topHisto = response.getAggregations().get("topHisto"); - Histogram histo = topHisto.getBuckets().get(0).getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(4)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + histogram("topHisto").field("value") + .interval(5) + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ), + response -> { + Histogram topHisto = response.getAggregations().get("topHisto"); + Histogram histo = topHisto.getBuckets().get(0).getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + } + ); } public void testDateHistogram() throws Exception { - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) - .addAggregation( - dateHistogram("topHisto").field("date") - .calendarInterval(DateHistogramInterval.MONTH) - .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) - ) - .get(); - - assertNoFailures(response); - - Histogram topHisto = response.getAggregations().get("topHisto"); - Histogram histo = topHisto.getBuckets().iterator().next().getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(4)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + dateHistogram("topHisto").field("date") + .calendarInterval(DateHistogramInterval.MONTH) + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ), + response -> { + Histogram topHisto = response.getAggregations().get("topHisto"); + Histogram histo = topHisto.getBuckets().iterator().next().getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + } + ); } public void testGeoHashGrid() throws Exception { - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) - .addAggregation( - geohashGrid("grid").field("location") - .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) - ) - .get(); - - assertNoFailures(response); - - GeoGrid grid = response.getAggregations().get("grid"); - Histogram histo = grid.getBuckets().iterator().next().getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(4)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + geohashGrid("grid").field("location") + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ), + response -> { + GeoGrid grid = response.getAggregations().get("grid"); + Histogram histo = grid.getBuckets().iterator().next().getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + } + ); } public void testGeoTileGrid() throws Exception { - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) - .addAggregation( - geotileGrid("grid").field("location") - .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) - ) - .get(); - - assertNoFailures(response); - - GeoGrid grid = response.getAggregations().get("grid"); - Histogram histo = grid.getBuckets().iterator().next().getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(4)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + geotileGrid("grid").field("location") + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ), + response -> { + GeoGrid grid = response.getAggregations().get("grid"); + Histogram histo = grid.getBuckets().iterator().next().getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsIT.java index 2c0c7766b646c..b8a1b3df8cf60 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsIT.java @@ -7,7 +7,6 @@ */ package org.elasticsearch.search.aggregations.bucket; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.bucket.terms.Terms; @@ -18,6 +17,7 @@ import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.hamcrest.Matchers.equalTo; public class ShardSizeTermsIT extends ShardSizeTestCase { @@ -26,22 +26,27 @@ public void testNoShardSizeString() throws Exception { indexData(); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("keys").field("key").size(3).collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.count(false)) - ) - .get(); - - Terms terms = response.getAggregations().get("keys"); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - Map expected = new HashMap<>(); - expected.put("1", 8L); - expected.put("3", 8L); - expected.put("2", 5L); - for (Terms.Bucket bucket : buckets) { - assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsString()))); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key") + .size(3) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.count(false)) + ), + response -> { + Terms terms = response.getAggregations().get("keys"); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + Map expected = new HashMap<>(); + expected.put("1", 8L); + expected.put("3", 8L); + expected.put("2", 5L); + for (Terms.Bucket bucket : buckets) { + assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsString()))); + } + } + ); } public void testShardSizeEqualsSizeString() throws Exception { @@ -49,26 +54,28 @@ public void testShardSizeEqualsSizeString() throws Exception { indexData(); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("keys").field("key") - .size(3) - .shardSize(3) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.count(false)) - ) - .get(); - - Terms terms = response.getAggregations().get("keys"); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - Map expected = new HashMap<>(); - expected.put("1", 8L); - expected.put("3", 8L); - expected.put("2", 4L); - for (Terms.Bucket bucket : buckets) { - assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsString()))); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key") + .size(3) + .shardSize(3) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.count(false)) + ), + response -> { + Terms terms = response.getAggregations().get("keys"); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + Map expected = new HashMap<>(); + expected.put("1", 8L); + expected.put("3", 8L); + expected.put("2", 4L); + for (Terms.Bucket bucket : buckets) { + assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsString()))); + } + } + ); } public void testWithShardSizeString() throws Exception { @@ -77,26 +84,28 @@ public void testWithShardSizeString() throws Exception { indexData(); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("keys").field("key") - .size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .shardSize(5) - .order(BucketOrder.count(false)) - ) - .get(); - - Terms terms = response.getAggregations().get("keys"); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param) - Map expected = new HashMap<>(); - expected.put("1", 8L); - expected.put("3", 8L); - expected.put("2", 5L); // <-- count is now fixed - for (Terms.Bucket bucket : buckets) { - assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsString()))); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key") + .size(3) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .shardSize(5) + .order(BucketOrder.count(false)) + ), + response -> { + Terms terms = response.getAggregations().get("keys"); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param) + Map expected = new HashMap<>(); + expected.put("1", 8L); + expected.put("3", 8L); + expected.put("2", 5L); // <-- count is now fixed + for (Terms.Bucket bucket : buckets) { + assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsString()))); + } + } + ); } public void testWithShardSizeStringSingleShard() throws Exception { @@ -105,27 +114,29 @@ public void testWithShardSizeStringSingleShard() throws Exception { indexData(); - SearchResponse response = prepareSearch("idx").setRouting(routing1) - .setQuery(matchAllQuery()) - .addAggregation( - terms("keys").field("key") - .size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .shardSize(5) - .order(BucketOrder.count(false)) - ) - .get(); - - Terms terms = response.getAggregations().get("keys"); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param) - Map expected = new HashMap<>(); - expected.put("1", 5L); - expected.put("2", 4L); - expected.put("3", 3L); // <-- count is now fixed - for (Terms.Bucket bucket : buckets) { - assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKey()))); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setRouting(routing1) + .setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key") + .size(3) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .shardSize(5) + .order(BucketOrder.count(false)) + ), + response -> { + Terms terms = response.getAggregations().get("keys"); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param) + Map expected = new HashMap<>(); + expected.put("1", 5L); + expected.put("2", 4L); + expected.put("3", 3L); // <-- count is now fixed + for (Terms.Bucket bucket : buckets) { + assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKey()))); + } + } + ); } public void testNoShardSizeTermOrderString() throws Exception { @@ -133,22 +144,24 @@ public void testNoShardSizeTermOrderString() throws Exception { indexData(); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("keys").field("key").size(3).collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.key(true)) - ) - .get(); - - Terms terms = response.getAggregations().get("keys"); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - Map expected = new HashMap<>(); - expected.put("1", 8L); - expected.put("2", 5L); - expected.put("3", 8L); - for (Terms.Bucket bucket : buckets) { - assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsString()))); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key").size(3).collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.key(true)) + ), + response -> { + Terms terms = response.getAggregations().get("keys"); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + Map expected = new HashMap<>(); + expected.put("1", 8L); + expected.put("2", 5L); + expected.put("3", 8L); + for (Terms.Bucket bucket : buckets) { + assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsString()))); + } + } + ); } public void testNoShardSizeLong() throws Exception { @@ -156,22 +169,27 @@ public void testNoShardSizeLong() throws Exception { indexData(); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("keys").field("key").size(3).collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.count(false)) - ) - .get(); - - Terms terms = response.getAggregations().get("keys"); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - Map expected = new HashMap<>(); - expected.put(1, 8L); - expected.put(3, 8L); - expected.put(2, 5L); - for (Terms.Bucket bucket : buckets) { - assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key") + .size(3) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.count(false)) + ), + response -> { + Terms terms = response.getAggregations().get("keys"); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + Map expected = new HashMap<>(); + expected.put(1, 8L); + expected.put(3, 8L); + expected.put(2, 5L); + for (Terms.Bucket bucket : buckets) { + assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); + } + } + ); } public void testShardSizeEqualsSizeLong() throws Exception { @@ -179,26 +197,28 @@ public void testShardSizeEqualsSizeLong() throws Exception { indexData(); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("keys").field("key") - .size(3) - .shardSize(3) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.count(false)) - ) - .get(); - - Terms terms = response.getAggregations().get("keys"); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - Map expected = new HashMap<>(); - expected.put(1, 8L); - expected.put(3, 8L); - expected.put(2, 4L); - for (Terms.Bucket bucket : buckets) { - assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key") + .size(3) + .shardSize(3) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.count(false)) + ), + response -> { + Terms terms = response.getAggregations().get("keys"); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + Map expected = new HashMap<>(); + expected.put(1, 8L); + expected.put(3, 8L); + expected.put(2, 4L); + for (Terms.Bucket bucket : buckets) { + assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); + } + } + ); } public void testWithShardSizeLong() throws Exception { @@ -206,26 +226,28 @@ public void testWithShardSizeLong() throws Exception { indexData(); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("keys").field("key") - .size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .shardSize(5) - .order(BucketOrder.count(false)) - ) - .get(); - - Terms terms = response.getAggregations().get("keys"); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param) - Map expected = new HashMap<>(); - expected.put(1, 8L); - expected.put(3, 8L); - expected.put(2, 5L); // <-- count is now fixed - for (Terms.Bucket bucket : buckets) { - assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key") + .size(3) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .shardSize(5) + .order(BucketOrder.count(false)) + ), + response -> { + Terms terms = response.getAggregations().get("keys"); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param) + Map expected = new HashMap<>(); + expected.put(1, 8L); + expected.put(3, 8L); + expected.put(2, 5L); // <-- count is now fixed + for (Terms.Bucket bucket : buckets) { + assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); + } + } + ); } public void testWithShardSizeLongSingleShard() throws Exception { @@ -234,27 +256,29 @@ public void testWithShardSizeLongSingleShard() throws Exception { indexData(); - SearchResponse response = prepareSearch("idx").setRouting(routing1) - .setQuery(matchAllQuery()) - .addAggregation( - terms("keys").field("key") - .size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .shardSize(5) - .order(BucketOrder.count(false)) - ) - .get(); - - Terms terms = response.getAggregations().get("keys"); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param) - Map expected = new HashMap<>(); - expected.put(1, 5L); - expected.put(2, 4L); - expected.put(3, 3L); - for (Terms.Bucket bucket : buckets) { - assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setRouting(routing1) + .setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key") + .size(3) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .shardSize(5) + .order(BucketOrder.count(false)) + ), + response -> { + Terms terms = response.getAggregations().get("keys"); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param) + Map expected = new HashMap<>(); + expected.put(1, 5L); + expected.put(2, 4L); + expected.put(3, 3L); + for (Terms.Bucket bucket : buckets) { + assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); + } + } + ); } public void testNoShardSizeTermOrderLong() throws Exception { @@ -262,22 +286,24 @@ public void testNoShardSizeTermOrderLong() throws Exception { indexData(); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("keys").field("key").size(3).collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.key(true)) - ) - .get(); - - Terms terms = response.getAggregations().get("keys"); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - Map expected = new HashMap<>(); - expected.put(1, 8L); - expected.put(2, 5L); - expected.put(3, 8L); - for (Terms.Bucket bucket : buckets) { - assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key").size(3).collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.key(true)) + ), + response -> { + Terms terms = response.getAggregations().get("keys"); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + Map expected = new HashMap<>(); + expected.put(1, 8L); + expected.put(2, 5L); + expected.put(3, 8L); + for (Terms.Bucket bucket : buckets) { + assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); + } + } + ); } public void testNoShardSizeDouble() throws Exception { @@ -285,22 +311,27 @@ public void testNoShardSizeDouble() throws Exception { indexData(); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("keys").field("key").size(3).collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.count(false)) - ) - .get(); - - Terms terms = response.getAggregations().get("keys"); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - Map expected = new HashMap<>(); - expected.put(1, 8L); - expected.put(3, 8L); - expected.put(2, 5L); - for (Terms.Bucket bucket : buckets) { - assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key") + .size(3) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.count(false)) + ), + response -> { + Terms terms = response.getAggregations().get("keys"); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + Map expected = new HashMap<>(); + expected.put(1, 8L); + expected.put(3, 8L); + expected.put(2, 5L); + for (Terms.Bucket bucket : buckets) { + assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); + } + } + ); } public void testShardSizeEqualsSizeDouble() throws Exception { @@ -308,26 +339,28 @@ public void testShardSizeEqualsSizeDouble() throws Exception { indexData(); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("keys").field("key") - .size(3) - .shardSize(3) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.count(false)) - ) - .get(); - - Terms terms = response.getAggregations().get("keys"); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - Map expected = new HashMap<>(); - expected.put(1, 8L); - expected.put(3, 8L); - expected.put(2, 4L); - for (Terms.Bucket bucket : buckets) { - assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key") + .size(3) + .shardSize(3) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.count(false)) + ), + response -> { + Terms terms = response.getAggregations().get("keys"); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + Map expected = new HashMap<>(); + expected.put(1, 8L); + expected.put(3, 8L); + expected.put(2, 4L); + for (Terms.Bucket bucket : buckets) { + assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); + } + } + ); } public void testWithShardSizeDouble() throws Exception { @@ -335,26 +368,28 @@ public void testWithShardSizeDouble() throws Exception { indexData(); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("keys").field("key") - .size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .shardSize(5) - .order(BucketOrder.count(false)) - ) - .get(); - - Terms terms = response.getAggregations().get("keys"); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - Map expected = new HashMap<>(); - expected.put(1, 8L); - expected.put(3, 8L); - expected.put(2, 5L); // <-- count is now fixed - for (Terms.Bucket bucket : buckets) { - assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key") + .size(3) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .shardSize(5) + .order(BucketOrder.count(false)) + ), + response -> { + Terms terms = response.getAggregations().get("keys"); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + Map expected = new HashMap<>(); + expected.put(1, 8L); + expected.put(3, 8L); + expected.put(2, 5L); // <-- count is now fixed + for (Terms.Bucket bucket : buckets) { + assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); + } + } + ); } public void testWithShardSizeDoubleSingleShard() throws Exception { @@ -362,27 +397,29 @@ public void testWithShardSizeDoubleSingleShard() throws Exception { indexData(); - SearchResponse response = prepareSearch("idx").setRouting(routing1) - .setQuery(matchAllQuery()) - .addAggregation( - terms("keys").field("key") - .size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .shardSize(5) - .order(BucketOrder.count(false)) - ) - .get(); - - Terms terms = response.getAggregations().get("keys"); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - Map expected = new HashMap<>(); - expected.put(1, 5L); - expected.put(2, 4L); - expected.put(3, 3L); - for (Terms.Bucket bucket : buckets) { - assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setRouting(routing1) + .setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key") + .size(3) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .shardSize(5) + .order(BucketOrder.count(false)) + ), + response -> { + Terms terms = response.getAggregations().get("keys"); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + Map expected = new HashMap<>(); + expected.put(1, 5L); + expected.put(2, 4L); + expected.put(3, 3L); + for (Terms.Bucket bucket : buckets) { + assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); + } + } + ); } public void testNoShardSizeTermOrderDouble() throws Exception { @@ -390,21 +427,23 @@ public void testNoShardSizeTermOrderDouble() throws Exception { indexData(); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("keys").field("key").size(3).collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.key(true)) - ) - .get(); - - Terms terms = response.getAggregations().get("keys"); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - Map expected = new HashMap<>(); - expected.put(1, 8L); - expected.put(2, 5L); - expected.put(3, 8L); - for (Terms.Bucket bucket : buckets) { - assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key").size(3).collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.key(true)) + ), + response -> { + Terms terms = response.getAggregations().get("keys"); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + Map expected = new HashMap<>(); + expected.put(1, 8L); + expected.put(2, 5L); + expected.put(3, 8L); + for (Terms.Bucket bucket : buckets) { + assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); + } + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java index 4d94173f8d978..40336ad615376 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java @@ -9,7 +9,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; @@ -57,7 +56,9 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.significantText; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertCheckedResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -130,71 +131,70 @@ public void testXContentResponse() throws Exception { ); } - SearchResponse response = request.get(); - - assertNoFailures(response); - StringTerms classes = response.getAggregations().get("class"); - assertThat(classes.getBuckets().size(), equalTo(2)); - for (Terms.Bucket classBucket : classes.getBuckets()) { - Map aggs = classBucket.getAggregations().asMap(); - assertTrue(aggs.containsKey("sig_terms")); - SignificantTerms agg = (SignificantTerms) aggs.get("sig_terms"); - assertThat(agg.getBuckets().size(), equalTo(1)); - String term = agg.iterator().next().getKeyAsString(); - String classTerm = classBucket.getKeyAsString(); - assertTrue(term.equals(classTerm)); - } + assertCheckedResponse(request, response -> { + assertNoFailures(response); + StringTerms classes = response.getAggregations().get("class"); + assertThat(classes.getBuckets().size(), equalTo(2)); + for (Terms.Bucket classBucket : classes.getBuckets()) { + Map aggs = classBucket.getAggregations().asMap(); + assertTrue(aggs.containsKey("sig_terms")); + SignificantTerms agg = (SignificantTerms) aggs.get("sig_terms"); + assertThat(agg.getBuckets().size(), equalTo(1)); + String term = agg.iterator().next().getKeyAsString(); + String classTerm = classBucket.getKeyAsString(); + assertTrue(term.equals(classTerm)); + } - XContentBuilder responseBuilder = XContentFactory.jsonBuilder(); - responseBuilder.startObject(); - classes.toXContent(responseBuilder, ToXContent.EMPTY_PARAMS); - responseBuilder.endObject(); - - Object[] args = new Object[] { type.equals("long") ? "0" : "\"0\"", type.equals("long") ? "1" : "\"1\"" }; - String result = Strings.format(""" - { - "class": { - "doc_count_error_upper_bound": 0, - "sum_other_doc_count": 0, - "buckets": [ - { - "key": "0", - "doc_count": 4, - "sig_terms": { - "doc_count": 4, - "bg_count": 7, - "buckets": [ - { - "key": %s, + XContentBuilder responseBuilder = XContentFactory.jsonBuilder(); + responseBuilder.startObject(); + classes.toXContent(responseBuilder, ToXContent.EMPTY_PARAMS); + responseBuilder.endObject(); + + Object[] args = new Object[] { type.equals("long") ? "0" : "\"0\"", type.equals("long") ? "1" : "\"1\"" }; + String result = Strings.format(""" + { + "class": { + "doc_count_error_upper_bound": 0, + "sum_other_doc_count": 0, + "buckets": [ + { + "key": "0", + "doc_count": 4, + "sig_terms": { "doc_count": 4, - "score": 0.39999999999999997, - "bg_count": 5 + "bg_count": 7, + "buckets": [ + { + "key": %s, + "doc_count": 4, + "score": 0.39999999999999997, + "bg_count": 5 + } + ] } - ] - } - }, - { - "key": "1", - "doc_count": 3, - "sig_terms": { - "doc_count": 3, - "bg_count": 7, - "buckets": [ - { - "key":%s, + }, + { + "key": "1", + "doc_count": 3, + "sig_terms": { "doc_count": 3, - "score": 0.75, - "bg_count": 4 + "bg_count": 7, + "buckets": [ + { + "key":%s, + "doc_count": 3, + "score": 0.75, + "bg_count": 4 + } + ] } - ] - } + } + ] } - ] - } - } - """, args); - assertThat(Strings.toString(responseBuilder), equalTo(XContentHelper.stripWhitespace(result))); - + } + """, args); + assertThat(Strings.toString(responseBuilder), equalTo(XContentHelper.stripWhitespace(result))); + }); } public void testPopularTermManyDeletedDocs() throws Exception { @@ -286,9 +286,6 @@ public void testBackgroundVsSeparateSet( ); } - SearchResponse response1 = request1.get(); - assertNoFailures(response1); - SearchRequestBuilder request2; if (useSigText) { request2 = prepareSearch(INDEX_NAME).addAggregation( @@ -324,32 +321,32 @@ public void testBackgroundVsSeparateSet( ); } - SearchResponse response2 = request2.get(); - - StringTerms classes = response1.getAggregations().get("class"); + assertNoFailuresAndResponse(request1, response1 -> assertNoFailuresAndResponse(request2, response2 -> { + StringTerms classes = response1.getAggregations().get("class"); - SignificantTerms sigTerms0 = ((SignificantTerms) (classes.getBucketByKey("0").getAggregations().asMap().get("sig_terms"))); - assertThat(sigTerms0.getBuckets().size(), equalTo(2)); - double score00Background = sigTerms0.getBucketByKey("0").getSignificanceScore(); - double score01Background = sigTerms0.getBucketByKey("1").getSignificanceScore(); - SignificantTerms sigTerms1 = ((SignificantTerms) (classes.getBucketByKey("1").getAggregations().asMap().get("sig_terms"))); - double score10Background = sigTerms1.getBucketByKey("0").getSignificanceScore(); - double score11Background = sigTerms1.getBucketByKey("1").getSignificanceScore(); + SignificantTerms sigTerms0 = ((SignificantTerms) (classes.getBucketByKey("0").getAggregations().asMap().get("sig_terms"))); + assertThat(sigTerms0.getBuckets().size(), equalTo(2)); + double score00Background = sigTerms0.getBucketByKey("0").getSignificanceScore(); + double score01Background = sigTerms0.getBucketByKey("1").getSignificanceScore(); + SignificantTerms sigTerms1 = ((SignificantTerms) (classes.getBucketByKey("1").getAggregations().asMap().get("sig_terms"))); + double score10Background = sigTerms1.getBucketByKey("0").getSignificanceScore(); + double score11Background = sigTerms1.getBucketByKey("1").getSignificanceScore(); - Aggregations aggs = response2.getAggregations(); + Aggregations aggs = response2.getAggregations(); - sigTerms0 = (SignificantTerms) ((InternalFilter) aggs.get("0")).getAggregations().getAsMap().get("sig_terms"); - double score00SeparateSets = sigTerms0.getBucketByKey("0").getSignificanceScore(); - double score01SeparateSets = sigTerms0.getBucketByKey("1").getSignificanceScore(); + sigTerms0 = (SignificantTerms) ((InternalFilter) aggs.get("0")).getAggregations().getAsMap().get("sig_terms"); + double score00SeparateSets = sigTerms0.getBucketByKey("0").getSignificanceScore(); + double score01SeparateSets = sigTerms0.getBucketByKey("1").getSignificanceScore(); - sigTerms1 = (SignificantTerms) ((InternalFilter) aggs.get("1")).getAggregations().getAsMap().get("sig_terms"); - double score10SeparateSets = sigTerms1.getBucketByKey("0").getSignificanceScore(); - double score11SeparateSets = sigTerms1.getBucketByKey("1").getSignificanceScore(); + sigTerms1 = (SignificantTerms) ((InternalFilter) aggs.get("1")).getAggregations().getAsMap().get("sig_terms"); + double score10SeparateSets = sigTerms1.getBucketByKey("0").getSignificanceScore(); + double score11SeparateSets = sigTerms1.getBucketByKey("1").getSignificanceScore(); - assertThat(score00Background, equalTo(score00SeparateSets)); - assertThat(score01Background, equalTo(score01SeparateSets)); - assertThat(score10Background, equalTo(score10SeparateSets)); - assertThat(score11Background, equalTo(score11SeparateSets)); + assertThat(score00Background, equalTo(score00SeparateSets)); + assertThat(score01Background, equalTo(score01SeparateSets)); + assertThat(score10Background, equalTo(score10SeparateSets)); + assertThat(score11Background, equalTo(score11SeparateSets)); + })); } public void testScoresEqualForPositiveAndNegative() throws Exception { @@ -385,25 +382,23 @@ public void testScoresEqualForPositiveAndNegative(SignificanceHeuristic heuristi ) ); } - SearchResponse response = request.get(); - assertNoFailures(response); - - assertNoFailures(response); - StringTerms classes = response.getAggregations().get("class"); - assertThat(classes.getBuckets().size(), equalTo(2)); - Iterator classBuckets = classes.getBuckets().iterator(); - - Aggregations aggregations = classBuckets.next().getAggregations(); - SignificantTerms sigTerms = aggregations.get("mySignificantTerms"); - - List classA = sigTerms.getBuckets(); - Iterator classBBucketIterator = sigTerms.iterator(); - assertThat(classA.size(), greaterThan(0)); - for (SignificantTerms.Bucket classABucket : classA) { - SignificantTerms.Bucket classBBucket = classBBucketIterator.next(); - assertThat(classABucket.getKey(), equalTo(classBBucket.getKey())); - assertThat(classABucket.getSignificanceScore(), closeTo(classBBucket.getSignificanceScore(), 1.e-5)); - } + assertNoFailuresAndResponse(request, response -> { + StringTerms classes = response.getAggregations().get("class"); + assertThat(classes.getBuckets().size(), equalTo(2)); + Iterator classBuckets = classes.getBuckets().iterator(); + + Aggregations aggregations = classBuckets.next().getAggregations(); + SignificantTerms sigTerms = aggregations.get("mySignificantTerms"); + + List classA = sigTerms.getBuckets(); + Iterator classBBucketIterator = sigTerms.iterator(); + assertThat(classA.size(), greaterThan(0)); + for (SignificantTerms.Bucket classABucket : classA) { + SignificantTerms.Bucket classBBucket = classBBucketIterator.next(); + assertThat(classABucket.getKey(), equalTo(classBBucket.getKey())); + assertThat(classABucket.getSignificanceScore(), closeTo(classBBucket.getSignificanceScore(), 1.e-5)); + } + }); } /** @@ -423,16 +418,15 @@ public void testSubAggregations() throws Exception { .size(1000) .subAggregation(subAgg); - SearchResponse response = prepareSearch("test").setQuery(query).addAggregation(agg).get(); - assertNoFailures(response); - - SignificantTerms sigTerms = response.getAggregations().get("significant_terms"); - assertThat(sigTerms.getBuckets().size(), equalTo(2)); + assertNoFailuresAndResponse(prepareSearch("test").setQuery(query).addAggregation(agg), response -> { + SignificantTerms sigTerms = response.getAggregations().get("significant_terms"); + assertThat(sigTerms.getBuckets().size(), equalTo(2)); - for (SignificantTerms.Bucket bucket : sigTerms) { - StringTerms terms = bucket.getAggregations().get("class"); - assertThat(terms.getBuckets().size(), equalTo(2)); - } + for (SignificantTerms.Bucket bucket : sigTerms) { + StringTerms terms = bucket.getAggregations().get("class"); + assertThat(terms.getBuckets().size(), equalTo(2)); + } + }); } private void indexEqualTestData() throws ExecutionException, InterruptedException { @@ -497,17 +491,17 @@ public void testScriptScore() throws ExecutionException, InterruptedException, I ) ); } - SearchResponse response = request.get(); - assertNoFailures(response); - for (Terms.Bucket classBucket : ((Terms) response.getAggregations().get("class")).getBuckets()) { - SignificantTerms sigTerms = classBucket.getAggregations().get("mySignificantTerms"); - for (SignificantTerms.Bucket bucket : sigTerms.getBuckets()) { - assertThat( - bucket.getSignificanceScore(), - is((double) bucket.getSubsetDf() + bucket.getSubsetSize() + bucket.getSupersetDf() + bucket.getSupersetSize()) - ); + assertNoFailuresAndResponse(request, response -> { + for (Terms.Bucket classBucket : ((Terms) response.getAggregations().get("class")).getBuckets()) { + SignificantTerms sigTerms = classBucket.getAggregations().get("mySignificantTerms"); + for (SignificantTerms.Bucket bucket : sigTerms.getBuckets()) { + assertThat( + bucket.getSignificanceScore(), + is((double) bucket.getSubsetDf() + bucket.getSubsetSize() + bucket.getSupersetDf() + bucket.getSupersetSize()) + ); + } } - } + }); } private ScriptHeuristic getScriptSignificanceHeuristic() throws IOException { @@ -579,17 +573,15 @@ public void testScriptCaching() throws Exception { new Script(ScriptType.INLINE, "mockscript", "Math.random()", Collections.emptyMap()) ); boolean useSigText = randomBoolean(); - SearchResponse r; + SearchRequestBuilder request; if (useSigText) { - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation(significantText("foo", "t").significanceHeuristic(scriptHeuristic)) - .get(); + request = prepareSearch("cache_test_idx").setSize(0) + .addAggregation(significantText("foo", "t").significanceHeuristic(scriptHeuristic)); } else { - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation(significantTerms("foo").field("s").significanceHeuristic(scriptHeuristic)) - .get(); + request = prepareSearch("cache_test_idx").setSize(0) + .addAggregation(significantTerms("foo").field("s").significanceHeuristic(scriptHeuristic)); } - assertNoFailures(r); + assertNoFailures(request); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -604,15 +596,13 @@ public void testScriptCaching() throws Exception { scriptHeuristic = getScriptSignificanceHeuristic(); useSigText = randomBoolean(); if (useSigText) { - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation(significantText("foo", "t").significanceHeuristic(scriptHeuristic)) - .get(); + request = prepareSearch("cache_test_idx").setSize(0) + .addAggregation(significantText("foo", "t").significanceHeuristic(scriptHeuristic)); } else { - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation(significantTerms("foo").field("s").significanceHeuristic(scriptHeuristic)) - .get(); + request = prepareSearch("cache_test_idx").setSize(0) + .addAggregation(significantTerms("foo").field("s").significanceHeuristic(scriptHeuristic)); } - assertNoFailures(r); + assertNoFailures(request); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -625,11 +615,11 @@ public void testScriptCaching() throws Exception { // Ensure that non-scripted requests are cached as normal if (useSigText) { - r = prepareSearch("cache_test_idx").setSize(0).addAggregation(significantText("foo", "t")).get(); + request = prepareSearch("cache_test_idx").setSize(0).addAggregation(significantText("foo", "t")); } else { - r = prepareSearch("cache_test_idx").setSize(0).addAggregation(significantTerms("foo").field("s")).get(); + request = prepareSearch("cache_test_idx").setSize(0).addAggregation(significantTerms("foo").field("s")); } - assertNoFailures(r); + assertNoFailures(request); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java index 58609df7ae8fe..84740d148f101 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java @@ -28,7 +28,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; @@ -267,691 +267,643 @@ private void assertUnboundedDocCountError(int size, SearchResponse accurateRespo public void testStringValueField() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertDocCountErrorWithinBounds(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertDocCountErrorWithinBounds(size, accurateResponse, testResponse) + ) + ); } public void testStringValueFieldSingleShard() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertNoDocCountError(size, accurateResponse, testResponse); - } - - public void testStringValueFieldWithRouting() throws Exception { - int size = randomIntBetween(1, 20); - int shardSize = randomIntBetween(size, size * 2); - - SearchResponse testResponse = prepareSearch("idx_with_routing").setRouting(String.valueOf(between(1, numRoutingValues))) - .addAggregation( + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(STRING_FIELD_NAME) .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) + .size(10000) + .shardSize(10000) .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertNoDocCountError(size, accurateResponse, testResponse) ) - .get(); + ); + } - assertNoFailures(testResponse); + public void testStringValueFieldWithRouting() throws Exception { + int size = randomIntBetween(1, 20); + int shardSize = randomIntBetween(size, size * 2); - assertNoDocCountErrorSingleResponse(size, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_with_routing").setRouting(String.valueOf(between(1, numRoutingValues))) + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertNoDocCountErrorSingleResponse(size, testResponse) + ); } public void testStringValueFieldDocCountAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.count(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.count(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertUnboundedDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.count(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.count(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertUnboundedDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testStringValueFieldTermSortAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.key(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.key(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertNoDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.key(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.key(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertNoDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testStringValueFieldTermSortDesc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.key(false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.key(false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertNoDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.key(false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.key(false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertNoDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testStringValueFieldSubAggAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.aggregation("sortAgg", true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.aggregation("sortAgg", true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) - ).get(); - - assertNoFailures(testResponse); - - assertUnboundedDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.aggregation("sortAgg", true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.aggregation("sortAgg", true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ), + testResponse -> assertUnboundedDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testStringValueFieldSubAggDesc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.aggregation("sortAgg", false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.aggregation("sortAgg", false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) - ).get(); - - assertNoFailures(testResponse); - - assertUnboundedDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.aggregation("sortAgg", false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.aggregation("sortAgg", false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ), + testResponse -> assertUnboundedDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testLongValueField() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertDocCountErrorWithinBounds(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertDocCountErrorWithinBounds(size, accurateResponse, testResponse) + ) + ); } public void testLongValueFieldSingleShard() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertNoDocCountError(size, accurateResponse, testResponse); - } - - public void testLongValueFieldWithRouting() throws Exception { - int size = randomIntBetween(1, 20); - int shardSize = randomIntBetween(size, size * 2); - - SearchResponse testResponse = prepareSearch("idx_with_routing").setRouting(String.valueOf(between(1, numRoutingValues))) - .addAggregation( + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(LONG_FIELD_NAME) .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) + .size(10000) + .shardSize(10000) .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertNoDocCountError(size, accurateResponse, testResponse) ) - .get(); + ); + } - assertNoFailures(testResponse); + public void testLongValueFieldWithRouting() throws Exception { + int size = randomIntBetween(1, 20); + int shardSize = randomIntBetween(size, size * 2); - assertNoDocCountErrorSingleResponse(size, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_with_routing").setRouting(String.valueOf(between(1, numRoutingValues))) + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertNoDocCountErrorSingleResponse(size, testResponse) + ); } public void testLongValueFieldDocCountAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.count(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.count(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertUnboundedDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.count(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.count(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertUnboundedDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testLongValueFieldTermSortAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.key(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.key(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertNoDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.key(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.key(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertNoDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testLongValueFieldTermSortDesc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.key(false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.key(false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertNoDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.key(false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.key(false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertNoDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testLongValueFieldSubAggAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.aggregation("sortAgg", true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.aggregation("sortAgg", true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) - ).get(); - - assertNoFailures(testResponse); - - assertUnboundedDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.aggregation("sortAgg", true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.aggregation("sortAgg", true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ), + testResponse -> assertUnboundedDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testLongValueFieldSubAggDesc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.aggregation("sortAgg", false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(DOUBLE_FIELD_NAME)) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.aggregation("sortAgg", false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(DOUBLE_FIELD_NAME)) - ).get(); - - assertNoFailures(testResponse); - - assertUnboundedDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.aggregation("sortAgg", false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(DOUBLE_FIELD_NAME)) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.aggregation("sortAgg", false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(DOUBLE_FIELD_NAME)) + ), + testResponse -> assertUnboundedDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testDoubleValueField() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertDocCountErrorWithinBounds(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertDocCountErrorWithinBounds(size, accurateResponse, testResponse) + ) + ); } public void testDoubleValueFieldSingleShard() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertNoDocCountError(size, accurateResponse, testResponse); - } - - public void testDoubleValueFieldWithRouting() throws Exception { - int size = randomIntBetween(1, 20); - int shardSize = randomIntBetween(size, size * 2); - - SearchResponse testResponse = prepareSearch("idx_with_routing").setRouting(String.valueOf(between(1, numRoutingValues))) - .addAggregation( + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(DOUBLE_FIELD_NAME) .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) + .size(10000) + .shardSize(10000) .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertNoDocCountError(size, accurateResponse, testResponse) ) - .get(); + ); + } - assertNoFailures(testResponse); + public void testDoubleValueFieldWithRouting() throws Exception { + int size = randomIntBetween(1, 20); + int shardSize = randomIntBetween(size, size * 2); - assertNoDocCountErrorSingleResponse(size, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_with_routing").setRouting(String.valueOf(between(1, numRoutingValues))) + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertNoDocCountErrorSingleResponse(size, testResponse) + ); } public void testDoubleValueFieldDocCountAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.count(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.count(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertUnboundedDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.count(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.count(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertUnboundedDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testDoubleValueFieldTermSortAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.key(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.key(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertNoDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.key(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.key(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertNoDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testDoubleValueFieldTermSortDesc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.key(false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.key(false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertNoDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.key(false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.key(false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertNoDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testDoubleValueFieldSubAggAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.aggregation("sortAgg", true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.aggregation("sortAgg", true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) - ).get(); - - assertNoFailures(testResponse); - - assertUnboundedDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.aggregation("sortAgg", true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.aggregation("sortAgg", true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ), + testResponse -> assertUnboundedDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testDoubleValueFieldSubAggDesc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.aggregation("sortAgg", false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.aggregation("sortAgg", false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) - ).get(); - - assertNoFailures(testResponse); - - assertUnboundedDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.aggregation("sortAgg", false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.aggregation("sortAgg", false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ), + testResponse -> assertUnboundedDocCountError(size, accurateResponse, testResponse) + ) + ); } /** @@ -960,52 +912,54 @@ public void testDoubleValueFieldSubAggDesc() throws Exception { * 3 one-shard indices. */ public void testFixedDocs() throws Exception { - SearchResponse response = prepareSearch("idx_fixed_docs_0", "idx_fixed_docs_1", "idx_fixed_docs_2").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(5) - .shardSize(5) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - assertNoFailures(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getDocCountError(), equalTo(46L)); - List buckets = terms.getBuckets(); - assertThat(buckets, notNullValue()); - assertThat(buckets.size(), equalTo(5)); - - Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("A")); - assertThat(bucket.getDocCount(), equalTo(100L)); - assertThat(bucket.getDocCountError(), equalTo(0L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("Z")); - assertThat(bucket.getDocCount(), equalTo(52L)); - assertThat(bucket.getDocCountError(), equalTo(2L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("C")); - assertThat(bucket.getDocCount(), equalTo(50L)); - assertThat(bucket.getDocCountError(), equalTo(15L)); - - bucket = buckets.get(3); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("G")); - assertThat(bucket.getDocCount(), equalTo(45L)); - assertThat(bucket.getDocCountError(), equalTo(2L)); - - bucket = buckets.get(4); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("B")); - assertThat(bucket.getDocCount(), equalTo(43L)); - assertThat(bucket.getDocCountError(), equalTo(29L)); + assertNoFailuresAndResponse( + prepareSearch("idx_fixed_docs_0", "idx_fixed_docs_1", "idx_fixed_docs_2").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(5) + .shardSize(5) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + response -> { + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getDocCountError(), equalTo(46L)); + List buckets = terms.getBuckets(); + assertThat(buckets, notNullValue()); + assertThat(buckets.size(), equalTo(5)); + + Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("A")); + assertThat(bucket.getDocCount(), equalTo(100L)); + assertThat(bucket.getDocCountError(), equalTo(0L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("Z")); + assertThat(bucket.getDocCount(), equalTo(52L)); + assertThat(bucket.getDocCountError(), equalTo(2L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("C")); + assertThat(bucket.getDocCount(), equalTo(50L)); + assertThat(bucket.getDocCountError(), equalTo(15L)); + + bucket = buckets.get(3); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("G")); + assertThat(bucket.getDocCount(), equalTo(45L)); + assertThat(bucket.getDocCountError(), equalTo(2L)); + + bucket = buckets.get(4); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("B")); + assertThat(bucket.getDocCount(), equalTo(43L)); + assertThat(bucket.getDocCountError(), equalTo(29L)); + } + ); } /** @@ -1013,16 +967,19 @@ public void testFixedDocs() throws Exception { * See https://github.com/elastic/elasticsearch/issues/40005 for more details */ public void testIncrementalReduction() { - SearchResponse response = prepareSearch("idx_fixed_docs_3", "idx_fixed_docs_4", "idx_fixed_docs_5").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(5) - .shardSize(5) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - assertNoFailures(response); - Terms terms = response.getAggregations().get("terms"); - assertThat(terms.getDocCountError(), equalTo(0L)); + assertNoFailuresAndResponse( + prepareSearch("idx_fixed_docs_3", "idx_fixed_docs_4", "idx_fixed_docs_5").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(5) + .shardSize(5) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + response -> { + Terms terms = response.getAggregations().get("terms"); + assertThat(terms.getDocCountError(), equalTo(0L)); + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsShardMinDocCountIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsShardMinDocCountIT.java index ffb9539bee735..2a09e5f90f19c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsShardMinDocCountIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsShardMinDocCountIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.bucket.filter.InternalFilter; @@ -26,6 +25,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.hamcrest.Matchers.equalTo; public class TermsShardMinDocCountIT extends ESIntegTestCase { @@ -61,34 +61,41 @@ public void testShardMinDocCountSignificantTermsTest() throws Exception { indexRandom(true, false, indexBuilders); // first, check that indeed when not setting the shardMinDocCount parameter 0 terms are returned - SearchResponse response = prepareSearch(index).addAggregation( - (filter("inclass", QueryBuilders.termQuery("class", true))).subAggregation( - significantTerms("mySignificantTerms").field("text") - .minDocCount(2) - .size(2) - .shardSize(2) - .executionHint(randomExecutionHint()) - ) - ).get(); - assertNoFailures(response); - InternalFilter filteredBucket = response.getAggregations().get("inclass"); - SignificantTerms sigterms = filteredBucket.getAggregations().get("mySignificantTerms"); - assertThat(sigterms.getBuckets().size(), equalTo(0)); - - response = prepareSearch(index).addAggregation( - (filter("inclass", QueryBuilders.termQuery("class", true))).subAggregation( - significantTerms("mySignificantTerms").field("text") - .minDocCount(2) - .shardSize(2) - .shardMinDocCount(2) - .size(2) - .executionHint(randomExecutionHint()) - ) - ).get(); - assertNoFailures(response); - filteredBucket = response.getAggregations().get("inclass"); - sigterms = filteredBucket.getAggregations().get("mySignificantTerms"); - assertThat(sigterms.getBuckets().size(), equalTo(2)); + assertNoFailuresAndResponse( + prepareSearch(index).addAggregation( + (filter("inclass", QueryBuilders.termQuery("class", true))).subAggregation( + significantTerms("mySignificantTerms").field("text") + .minDocCount(2) + .size(2) + .shardSize(2) + .executionHint(randomExecutionHint()) + ) + ), + response -> { + InternalFilter filteredBucket = response.getAggregations().get("inclass"); + SignificantTerms sigterms = filteredBucket.getAggregations().get("mySignificantTerms"); + assertThat(sigterms.getBuckets().size(), equalTo(0)); + } + ); + + assertNoFailuresAndResponse( + prepareSearch(index).addAggregation( + (filter("inclass", QueryBuilders.termQuery("class", true))).subAggregation( + significantTerms("mySignificantTerms").field("text") + .minDocCount(2) + .shardSize(2) + .shardMinDocCount(2) + .size(2) + .executionHint(randomExecutionHint()) + ) + ), + response -> { + assertNoFailures(response); + InternalFilter filteredBucket = response.getAggregations().get("inclass"); + SignificantTerms sigterms = filteredBucket.getAggregations().get("mySignificantTerms"); + assertThat(sigterms.getBuckets().size(), equalTo(2)); + } + ); } private void addTermsDocs(String term, int numInClass, int numNotInClass, List builders) { @@ -122,31 +129,36 @@ public void testShardMinDocCountTermsTest() throws Exception { indexRandom(true, false, indexBuilders); // first, check that indeed when not setting the shardMinDocCount parameter 0 terms are returned - SearchResponse response = prepareSearch(index).addAggregation( - terms("myTerms").field("text") - .minDocCount(2) - .size(2) - .shardSize(2) - .executionHint(randomExecutionHint()) - .order(BucketOrder.key(true)) - ).get(); - assertNoFailures(response); - Terms sigterms = response.getAggregations().get("myTerms"); - assertThat(sigterms.getBuckets().size(), equalTo(0)); - - response = prepareSearch(index).addAggregation( - terms("myTerms").field("text") - .minDocCount(2) - .shardMinDocCount(2) - .size(2) - .shardSize(2) - .executionHint(randomExecutionHint()) - .order(BucketOrder.key(true)) - ).get(); - assertNoFailures(response); - sigterms = response.getAggregations().get("myTerms"); - assertThat(sigterms.getBuckets().size(), equalTo(2)); - + assertNoFailuresAndResponse( + prepareSearch(index).addAggregation( + terms("myTerms").field("text") + .minDocCount(2) + .size(2) + .shardSize(2) + .executionHint(randomExecutionHint()) + .order(BucketOrder.key(true)) + ), + response -> { + Terms sigterms = response.getAggregations().get("myTerms"); + assertThat(sigterms.getBuckets().size(), equalTo(0)); + } + ); + + assertNoFailuresAndResponse( + prepareSearch(index).addAggregation( + terms("myTerms").field("text") + .minDocCount(2) + .shardMinDocCount(2) + .size(2) + .shardSize(2) + .executionHint(randomExecutionHint()) + .order(BucketOrder.key(true)) + ), + response -> { + Terms sigterms = response.getAggregations().get("myTerms"); + assertThat(sigterms.getBuckets().size(), equalTo(2)); + } + ); } private static void addTermsDocs(String term, int numDocs, List builders) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsIT.java index 02d16804198dd..2dccda385bf53 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsIT.java @@ -10,8 +10,6 @@ import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -19,6 +17,7 @@ import org.hamcrest.Matchers; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; /** * Test that index enough data to trigger the creation of Cuckoo filters. @@ -56,11 +55,13 @@ public void testSingleValuedString() { } private void assertNumRareTerms(int maxDocs, int rareTerms) { - final SearchRequestBuilder requestBuilder = client().prepareSearch(index); - requestBuilder.addAggregation(new RareTermsAggregationBuilder("rareTerms").field("str_value.keyword").maxDocCount(maxDocs)); - final SearchResponse response = requestBuilder.get(); - assertNoFailures(response); - final RareTerms terms = response.getAggregations().get("rareTerms"); - assertThat(terms.getBuckets().size(), Matchers.equalTo(rareTerms)); + assertNoFailuresAndResponse( + client().prepareSearch(index) + .addAggregation(new RareTermsAggregationBuilder("rareTerms").field("str_value.keyword").maxDocCount(maxDocs)), + response -> { + final RareTerms terms = response.getAggregations().get("rareTerms"); + assertThat(terms.getBuckets().size(), Matchers.equalTo(rareTerms)); + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java index ceafd07c67d65..527753df7fc3e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.fielddata.ScriptDocValues; @@ -59,6 +58,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -295,132 +295,148 @@ public void testMultiValueFieldWithPartitionedFiltering() throws Exception { private void runTestFieldWithPartitionedFiltering(String field) throws Exception { // Find total number of unique terms - SearchResponse allResponse = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(field).size(10000).collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - assertNoFailures(allResponse); - StringTerms terms = allResponse.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - int expectedCardinality = terms.getBuckets().size(); + int[] expectedCardinality = new int[1]; + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(field).size(10000).collectMode(randomFrom(SubAggCollectionMode.values())) + ), + response -> { + StringTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + expectedCardinality[0] = terms.getBuckets().size(); + } + ); // Gather terms using partitioned aggregations final int numPartitions = randomIntBetween(2, 4); Set foundTerms = new HashSet<>(); for (int partition = 0; partition < numPartitions; partition++) { - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(field) - .includeExclude(new IncludeExclude(partition, numPartitions)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - assertNoFailures(response); - terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - for (StringTerms.Bucket bucket : terms.getBuckets()) { - assertTrue(foundTerms.add(bucket.getKeyAsString())); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(field) + .includeExclude(new IncludeExclude(partition, numPartitions)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + response -> { + StringTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + for (StringTerms.Bucket bucket : terms.getBuckets()) { + assertTrue(foundTerms.add(bucket.getKeyAsString())); + } + } + ); } - assertEquals(expectedCardinality, foundTerms.size()); + assertEquals(expectedCardinality[0], foundTerms.size()); } public void testSingleValuedFieldWithValueScript() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) - .field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "'foo_' + _value", Collections.emptyMap())) - ).get(); - - assertNoFailures(response); - - StringTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - StringTerms.Bucket bucket = terms.getBucketByKey("foo_val" + i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("foo_val" + i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) + .field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "'foo_' + _value", Collections.emptyMap())) + ), + response -> { + StringTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + StringTerms.Bucket bucket = terms.getBucketByKey("foo_val" + i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("foo_val" + i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + } + ); } public void testMultiValuedFieldWithValueScriptNotUnique() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) - .field(MULTI_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value.substring(0,3)", Collections.emptyMap())) - ).get(); - - assertNoFailures(response); - - StringTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(1)); - - StringTerms.Bucket bucket = terms.getBucketByKey("val"); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("val")); - assertThat(bucket.getDocCount(), equalTo(5L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) + .field(MULTI_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value.substring(0,3)", Collections.emptyMap())) + ), + response -> { + StringTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(1)); + + StringTerms.Bucket bucket = terms.getBucketByKey("val"); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("val")); + assertThat(bucket.getDocCount(), equalTo(5L)); + } + ); } public void testMultiValuedScript() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) - .script( - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['" + MULTI_VALUED_FIELD_NAME + "']", Collections.emptyMap()) - ) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(response); - - StringTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(6)); - - for (int i = 0; i < 6; i++) { - Terms.Bucket bucket = terms.getBucketByKey("val" + i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("val" + i)); - if (i == 0 || i == 5) { - assertThat(bucket.getDocCount(), equalTo(1L)); - } else { - assertThat(bucket.getDocCount(), equalTo(2L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) + .script( + new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "doc['" + MULTI_VALUED_FIELD_NAME + "']", + Collections.emptyMap() + ) + ) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + response -> { + StringTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(6)); + + for (int i = 0; i < 6; i++) { + Terms.Bucket bucket = terms.getBucketByKey("val" + i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("val" + i)); + if (i == 0 || i == 5) { + assertThat(bucket.getDocCount(), equalTo(1L)); + } else { + assertThat(bucket.getDocCount(), equalTo(2L)); + } + } } - } + ); } public void testMultiValuedFieldWithValueScript() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) - .field(MULTI_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "'foo_' + _value", Collections.emptyMap())) - ).get(); - - assertNoFailures(response); - - StringTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(6)); - - for (int i = 0; i < 6; i++) { - StringTerms.Bucket bucket = terms.getBucketByKey("foo_val" + i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("foo_val" + i)); - if (i == 0 || i == 5) { - assertThat(bucket.getDocCount(), equalTo(1L)); - } else { - assertThat(bucket.getDocCount(), equalTo(2L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) + .field(MULTI_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "'foo_' + _value", Collections.emptyMap())) + ), + response -> { + StringTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(6)); + + for (int i = 0; i < 6; i++) { + StringTerms.Bucket bucket = terms.getBucketByKey("foo_val" + i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("foo_val" + i)); + if (i == 0 || i == 5) { + assertThat(bucket.getDocCount(), equalTo(1L)); + } else { + assertThat(bucket.getDocCount(), equalTo(2L)); + } + } } - } + ); } /* @@ -443,25 +459,26 @@ public void testScriptSingleValue() throws Exception { Collections.emptyMap() ); - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").collectMode(randomFrom(SubAggCollectionMode.values())) - .executionHint(randomExecutionHint()) - .script(script) - ).get(); - - assertNoFailures(response); - - StringTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - StringTerms.Bucket bucket = terms.getBucketByKey("val" + i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("val" + i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").collectMode(randomFrom(SubAggCollectionMode.values())) + .executionHint(randomExecutionHint()) + .script(script) + ), + response -> { + StringTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + StringTerms.Bucket bucket = terms.getBucketByKey("val" + i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("val" + i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + } + ); } public void testScriptSingleValueExplicitSingleValue() throws Exception { @@ -472,108 +489,114 @@ public void testScriptSingleValueExplicitSingleValue() throws Exception { Collections.emptyMap() ); - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").collectMode(randomFrom(SubAggCollectionMode.values())) - .executionHint(randomExecutionHint()) - .script(script) - ).get(); - - assertNoFailures(response); - - StringTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - StringTerms.Bucket bucket = terms.getBucketByKey("val" + i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("val" + i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").collectMode(randomFrom(SubAggCollectionMode.values())) + .executionHint(randomExecutionHint()) + .script(script) + ), + response -> { + StringTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + StringTerms.Bucket bucket = terms.getBucketByKey("val" + i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("val" + i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + } + ); } public void testScriptMultiValued() throws Exception { - SearchResponse response = prepareSearch("idx") - - .addAggregation( - new TermsAggregationBuilder("terms").collectMode(randomFrom(SubAggCollectionMode.values())) - .executionHint(randomExecutionHint()) - .script( - new Script( - ScriptType.INLINE, - CustomScriptPlugin.NAME, - "doc['" + MULTI_VALUED_FIELD_NAME + "']", - Collections.emptyMap() + assertNoFailuresAndResponse( + prepareSearch("idx") + + .addAggregation( + new TermsAggregationBuilder("terms").collectMode(randomFrom(SubAggCollectionMode.values())) + .executionHint(randomExecutionHint()) + .script( + new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "doc['" + MULTI_VALUED_FIELD_NAME + "']", + Collections.emptyMap() + ) ) - ) - ) - .get(); - - assertNoFailures(response); - - StringTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(6)); - - for (int i = 0; i < 6; i++) { - StringTerms.Bucket bucket = terms.getBucketByKey("val" + i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("val" + i)); - if (i == 0 || i == 5) { - assertThat(bucket.getDocCount(), equalTo(1L)); - } else { - assertThat(bucket.getDocCount(), equalTo(2L)); + ), + response -> { + StringTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(6)); + + for (int i = 0; i < 6; i++) { + StringTerms.Bucket bucket = terms.getBucketByKey("val" + i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("val" + i)); + if (i == 0 || i == 5) { + assertThat(bucket.getDocCount(), equalTo(1L)); + } else { + assertThat(bucket.getDocCount(), equalTo(2L)); + } + } } - } + ); } public void testPartiallyUnmapped() throws Exception { - SearchResponse response = prepareSearch("idx", "idx_unmapped").addAggregation( - new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) - .field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(response); - - StringTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - StringTerms.Bucket bucket = terms.getBucketByKey("val" + i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("val" + i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - } + assertNoFailuresAndResponse( + prepareSearch("idx", "idx_unmapped").addAggregation( + new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) + .field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + response -> { + StringTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + StringTerms.Bucket bucket = terms.getBucketByKey("val" + i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("val" + i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + } + ); } public void testStringTermsNestedIntoPerBucketAggregator() throws Exception { // no execution hint so that the logic that decides whether or not to use ordinals is executed - SearchResponse response = prepareSearch("idx").addAggregation( - filter("filter", termQuery(MULTI_VALUED_FIELD_NAME, "val3")).subAggregation( - new TermsAggregationBuilder("terms").field(MULTI_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) - ) - ).get(); - - assertThat(response.getFailedShards(), equalTo(0)); - - Filter filter = response.getAggregations().get("filter"); - - StringTerms terms = filter.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(3)); - - for (int i = 2; i <= 4; i++) { - StringTerms.Bucket bucket = terms.getBucketByKey("val" + i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("val" + i)); - assertThat(bucket.getDocCount(), equalTo(i == 3 ? 2L : 1L)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + filter("filter", termQuery(MULTI_VALUED_FIELD_NAME, "val3")).subAggregation( + new TermsAggregationBuilder("terms").field(MULTI_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + ), + response -> { + assertThat(response.getFailedShards(), equalTo(0)); + + Filter filter = response.getAggregations().get("filter"); + + StringTerms terms = filter.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(3)); + + for (int i = 2; i <= 4; i++) { + StringTerms.Bucket bucket = terms.getBucketByKey("val" + i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("val" + i)); + assertThat(bucket.getDocCount(), equalTo(i == 3 ? 2L : 1L)); + } + } + ); } public void testSingleValuedFieldOrderedByIllegalAgg() throws Exception { @@ -612,93 +635,95 @@ public void testSingleValuedFieldOrderedByIllegalAgg() throws Exception { public void testSingleValuedFieldOrderedBySingleBucketSubAggregationAsc() throws Exception { boolean asc = randomBoolean(); - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("tags").executionHint(randomExecutionHint()) - .field("tag") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("filter", asc)) - .subAggregation(filter("filter", QueryBuilders.matchAllQuery())) - ).get(); - - assertNoFailures(response); - - StringTerms tags = response.getAggregations().get("tags"); - assertThat(tags, notNullValue()); - assertThat(tags.getName(), equalTo("tags")); - assertThat(tags.getBuckets().size(), equalTo(2)); - - Iterator iters = tags.getBuckets().iterator(); - - Terms.Bucket tag = iters.next(); - assertThat(tag, notNullValue()); - assertThat(tag.getKeyAsString(), equalTo(asc ? "less" : "more")); - assertThat(tag.getDocCount(), equalTo(asc ? 2L : 3L)); - Filter filter = tag.getAggregations().get("filter"); - assertThat(filter, notNullValue()); - assertThat(filter.getDocCount(), equalTo(asc ? 2L : 3L)); - - tag = iters.next(); - assertThat(tag, notNullValue()); - assertThat(tag.getKeyAsString(), equalTo(asc ? "more" : "less")); - assertThat(tag.getDocCount(), equalTo(asc ? 3L : 2L)); - filter = tag.getAggregations().get("filter"); - assertThat(filter, notNullValue()); - assertThat(filter.getDocCount(), equalTo(asc ? 3L : 2L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("tags").executionHint(randomExecutionHint()) + .field("tag") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("filter", asc)) + .subAggregation(filter("filter", QueryBuilders.matchAllQuery())) + ), + response -> { + StringTerms tags = response.getAggregations().get("tags"); + assertThat(tags, notNullValue()); + assertThat(tags.getName(), equalTo("tags")); + assertThat(tags.getBuckets().size(), equalTo(2)); + + Iterator iters = tags.getBuckets().iterator(); + + Terms.Bucket tag = iters.next(); + assertThat(tag, notNullValue()); + assertThat(tag.getKeyAsString(), equalTo(asc ? "less" : "more")); + assertThat(tag.getDocCount(), equalTo(asc ? 2L : 3L)); + Filter filter = tag.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(asc ? 2L : 3L)); + + tag = iters.next(); + assertThat(tag, notNullValue()); + assertThat(tag.getKeyAsString(), equalTo(asc ? "more" : "less")); + assertThat(tag.getDocCount(), equalTo(asc ? 3L : 2L)); + filter = tag.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(asc ? 3L : 2L)); + } + ); } public void testSingleValuedFieldOrderedBySubAggregationAscMultiHierarchyLevels() throws Exception { boolean asc = randomBoolean(); - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("tags").executionHint(randomExecutionHint()) - .field("tag") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("filter1>filter2>stats.max", asc)) - .subAggregation( - filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( - filter("filter2", QueryBuilders.matchAllQuery()).subAggregation(stats("stats").field("i")) + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("tags").executionHint(randomExecutionHint()) + .field("tag") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("filter1>filter2>stats.max", asc)) + .subAggregation( + filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( + filter("filter2", QueryBuilders.matchAllQuery()).subAggregation(stats("stats").field("i")) + ) ) - ) - ).get(); - - assertNoFailures(response); - - StringTerms tags = response.getAggregations().get("tags"); - assertThat(tags, notNullValue()); - assertThat(tags.getName(), equalTo("tags")); - assertThat(tags.getBuckets().size(), equalTo(2)); - - Iterator iters = tags.getBuckets().iterator(); - - // the max for "more" is 2 - // the max for "less" is 4 - - StringTerms.Bucket tag = iters.next(); - assertThat(tag, notNullValue()); - assertThat(tag.getKeyAsString(), equalTo(asc ? "more" : "less")); - assertThat(tag.getDocCount(), equalTo(asc ? 3L : 2L)); - Filter filter1 = tag.getAggregations().get("filter1"); - assertThat(filter1, notNullValue()); - assertThat(filter1.getDocCount(), equalTo(asc ? 3L : 2L)); - Filter filter2 = filter1.getAggregations().get("filter2"); - assertThat(filter2, notNullValue()); - assertThat(filter2.getDocCount(), equalTo(asc ? 3L : 2L)); - Stats stats = filter2.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getMax(), equalTo(asc ? 2.0 : 4.0)); - - tag = iters.next(); - assertThat(tag, notNullValue()); - assertThat(tag.getKeyAsString(), equalTo(asc ? "less" : "more")); - assertThat(tag.getDocCount(), equalTo(asc ? 2L : 3L)); - filter1 = tag.getAggregations().get("filter1"); - assertThat(filter1, notNullValue()); - assertThat(filter1.getDocCount(), equalTo(asc ? 2L : 3L)); - filter2 = filter1.getAggregations().get("filter2"); - assertThat(filter2, notNullValue()); - assertThat(filter2.getDocCount(), equalTo(asc ? 2L : 3L)); - stats = filter2.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getMax(), equalTo(asc ? 4.0 : 2.0)); + ), + response -> { + StringTerms tags = response.getAggregations().get("tags"); + assertThat(tags, notNullValue()); + assertThat(tags.getName(), equalTo("tags")); + assertThat(tags.getBuckets().size(), equalTo(2)); + + Iterator iters = tags.getBuckets().iterator(); + + // the max for "more" is 2 + // the max for "less" is 4 + + StringTerms.Bucket tag = iters.next(); + assertThat(tag, notNullValue()); + assertThat(tag.getKeyAsString(), equalTo(asc ? "more" : "less")); + assertThat(tag.getDocCount(), equalTo(asc ? 3L : 2L)); + Filter filter1 = tag.getAggregations().get("filter1"); + assertThat(filter1, notNullValue()); + assertThat(filter1.getDocCount(), equalTo(asc ? 3L : 2L)); + Filter filter2 = filter1.getAggregations().get("filter2"); + assertThat(filter2, notNullValue()); + assertThat(filter2.getDocCount(), equalTo(asc ? 3L : 2L)); + Stats stats = filter2.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getMax(), equalTo(asc ? 2.0 : 4.0)); + + tag = iters.next(); + assertThat(tag, notNullValue()); + assertThat(tag.getKeyAsString(), equalTo(asc ? "less" : "more")); + assertThat(tag.getDocCount(), equalTo(asc ? 2L : 3L)); + filter1 = tag.getAggregations().get("filter1"); + assertThat(filter1, notNullValue()); + assertThat(filter1.getDocCount(), equalTo(asc ? 2L : 3L)); + filter2 = filter1.getAggregations().get("filter2"); + assertThat(filter2, notNullValue()); + assertThat(filter2.getDocCount(), equalTo(asc ? 2L : 3L)); + stats = filter2.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getMax(), equalTo(asc ? 4.0 : 2.0)); + } + ); } public void testSingleValuedFieldOrderedBySubAggregationAscMultiHierarchyLevelsSpecialChars() throws Exception { @@ -709,57 +734,58 @@ public void testSingleValuedFieldOrderedBySubAggregationAscMultiHierarchyLevelsS statsNameBuilder.append(randomAlphaOfLengthBetween(3, 10).replace("[", "").replace("]", "").replace(">", "")); String statsName = statsNameBuilder.toString(); boolean asc = randomBoolean(); - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("tags").executionHint(randomExecutionHint()) - .field("tag") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("filter1>" + filter2Name + ">" + statsName + ".max", asc)) - .subAggregation( - filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( - filter(filter2Name, QueryBuilders.matchAllQuery()).subAggregation(stats(statsName).field("i")) + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("tags").executionHint(randomExecutionHint()) + .field("tag") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("filter1>" + filter2Name + ">" + statsName + ".max", asc)) + .subAggregation( + filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( + filter(filter2Name, QueryBuilders.matchAllQuery()).subAggregation(stats(statsName).field("i")) + ) ) - ) - ).get(); - - assertNoFailures(response); - - StringTerms tags = response.getAggregations().get("tags"); - assertThat(tags, notNullValue()); - assertThat(tags.getName(), equalTo("tags")); - assertThat(tags.getBuckets().size(), equalTo(2)); - - Iterator iters = tags.getBuckets().iterator(); - - // the max for "more" is 2 - // the max for "less" is 4 - - StringTerms.Bucket tag = iters.next(); - assertThat(tag, notNullValue()); - assertThat(tag.getKeyAsString(), equalTo(asc ? "more" : "less")); - assertThat(tag.getDocCount(), equalTo(asc ? 3L : 2L)); - Filter filter1 = tag.getAggregations().get("filter1"); - assertThat(filter1, notNullValue()); - assertThat(filter1.getDocCount(), equalTo(asc ? 3L : 2L)); - Filter filter2 = filter1.getAggregations().get(filter2Name); - assertThat(filter2, notNullValue()); - assertThat(filter2.getDocCount(), equalTo(asc ? 3L : 2L)); - Stats stats = filter2.getAggregations().get(statsName); - assertThat(stats, notNullValue()); - assertThat(stats.getMax(), equalTo(asc ? 2.0 : 4.0)); - - tag = iters.next(); - assertThat(tag, notNullValue()); - assertThat(tag.getKeyAsString(), equalTo(asc ? "less" : "more")); - assertThat(tag.getDocCount(), equalTo(asc ? 2L : 3L)); - filter1 = tag.getAggregations().get("filter1"); - assertThat(filter1, notNullValue()); - assertThat(filter1.getDocCount(), equalTo(asc ? 2L : 3L)); - filter2 = filter1.getAggregations().get(filter2Name); - assertThat(filter2, notNullValue()); - assertThat(filter2.getDocCount(), equalTo(asc ? 2L : 3L)); - stats = filter2.getAggregations().get(statsName); - assertThat(stats, notNullValue()); - assertThat(stats.getMax(), equalTo(asc ? 4.0 : 2.0)); + ), + response -> { + StringTerms tags = response.getAggregations().get("tags"); + assertThat(tags, notNullValue()); + assertThat(tags.getName(), equalTo("tags")); + assertThat(tags.getBuckets().size(), equalTo(2)); + + Iterator iters = tags.getBuckets().iterator(); + + // the max for "more" is 2 + // the max for "less" is 4 + + StringTerms.Bucket tag = iters.next(); + assertThat(tag, notNullValue()); + assertThat(tag.getKeyAsString(), equalTo(asc ? "more" : "less")); + assertThat(tag.getDocCount(), equalTo(asc ? 3L : 2L)); + Filter filter1 = tag.getAggregations().get("filter1"); + assertThat(filter1, notNullValue()); + assertThat(filter1.getDocCount(), equalTo(asc ? 3L : 2L)); + Filter filter2 = filter1.getAggregations().get(filter2Name); + assertThat(filter2, notNullValue()); + assertThat(filter2.getDocCount(), equalTo(asc ? 3L : 2L)); + Stats stats = filter2.getAggregations().get(statsName); + assertThat(stats, notNullValue()); + assertThat(stats.getMax(), equalTo(asc ? 2.0 : 4.0)); + + tag = iters.next(); + assertThat(tag, notNullValue()); + assertThat(tag.getKeyAsString(), equalTo(asc ? "less" : "more")); + assertThat(tag.getDocCount(), equalTo(asc ? 2L : 3L)); + filter1 = tag.getAggregations().get("filter1"); + assertThat(filter1, notNullValue()); + assertThat(filter1.getDocCount(), equalTo(asc ? 2L : 3L)); + filter2 = filter1.getAggregations().get(filter2Name); + assertThat(filter2, notNullValue()); + assertThat(filter2.getDocCount(), equalTo(asc ? 2L : 3L)); + stats = filter2.getAggregations().get(statsName); + assertThat(stats, notNullValue()); + assertThat(stats.getMax(), equalTo(asc ? 4.0 : 2.0)); + } + ); } public void testSingleValuedFieldOrderedBySubAggregationAscMultiHierarchyLevelsSpecialCharsNoDotNotation() throws Exception { @@ -770,57 +796,58 @@ public void testSingleValuedFieldOrderedBySubAggregationAscMultiHierarchyLevelsS statsNameBuilder.append(randomAlphaOfLengthBetween(3, 10).replace("[", "").replace("]", "").replace(">", "")); String statsName = statsNameBuilder.toString(); boolean asc = randomBoolean(); - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("tags").executionHint(randomExecutionHint()) - .field("tag") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("filter1>" + filter2Name + ">" + statsName + "[max]", asc)) - .subAggregation( - filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( - filter(filter2Name, QueryBuilders.matchAllQuery()).subAggregation(stats(statsName).field("i")) + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("tags").executionHint(randomExecutionHint()) + .field("tag") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("filter1>" + filter2Name + ">" + statsName + "[max]", asc)) + .subAggregation( + filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( + filter(filter2Name, QueryBuilders.matchAllQuery()).subAggregation(stats(statsName).field("i")) + ) ) - ) - ).get(); - - assertNoFailures(response); - - StringTerms tags = response.getAggregations().get("tags"); - assertThat(tags, notNullValue()); - assertThat(tags.getName(), equalTo("tags")); - assertThat(tags.getBuckets().size(), equalTo(2)); - - Iterator iters = tags.getBuckets().iterator(); - - // the max for "more" is 2 - // the max for "less" is 4 - - StringTerms.Bucket tag = iters.next(); - assertThat(tag, notNullValue()); - assertThat(tag.getKeyAsString(), equalTo(asc ? "more" : "less")); - assertThat(tag.getDocCount(), equalTo(asc ? 3L : 2L)); - Filter filter1 = tag.getAggregations().get("filter1"); - assertThat(filter1, notNullValue()); - assertThat(filter1.getDocCount(), equalTo(asc ? 3L : 2L)); - Filter filter2 = filter1.getAggregations().get(filter2Name); - assertThat(filter2, notNullValue()); - assertThat(filter2.getDocCount(), equalTo(asc ? 3L : 2L)); - Stats stats = filter2.getAggregations().get(statsName); - assertThat(stats, notNullValue()); - assertThat(stats.getMax(), equalTo(asc ? 2.0 : 4.0)); - - tag = iters.next(); - assertThat(tag, notNullValue()); - assertThat(tag.getKeyAsString(), equalTo(asc ? "less" : "more")); - assertThat(tag.getDocCount(), equalTo(asc ? 2L : 3L)); - filter1 = tag.getAggregations().get("filter1"); - assertThat(filter1, notNullValue()); - assertThat(filter1.getDocCount(), equalTo(asc ? 2L : 3L)); - filter2 = filter1.getAggregations().get(filter2Name); - assertThat(filter2, notNullValue()); - assertThat(filter2.getDocCount(), equalTo(asc ? 2L : 3L)); - stats = filter2.getAggregations().get(statsName); - assertThat(stats, notNullValue()); - assertThat(stats.getMax(), equalTo(asc ? 4.0 : 2.0)); + ), + response -> { + StringTerms tags = response.getAggregations().get("tags"); + assertThat(tags, notNullValue()); + assertThat(tags.getName(), equalTo("tags")); + assertThat(tags.getBuckets().size(), equalTo(2)); + + Iterator iters = tags.getBuckets().iterator(); + + // the max for "more" is 2 + // the max for "less" is 4 + + StringTerms.Bucket tag = iters.next(); + assertThat(tag, notNullValue()); + assertThat(tag.getKeyAsString(), equalTo(asc ? "more" : "less")); + assertThat(tag.getDocCount(), equalTo(asc ? 3L : 2L)); + Filter filter1 = tag.getAggregations().get("filter1"); + assertThat(filter1, notNullValue()); + assertThat(filter1.getDocCount(), equalTo(asc ? 3L : 2L)); + Filter filter2 = filter1.getAggregations().get(filter2Name); + assertThat(filter2, notNullValue()); + assertThat(filter2.getDocCount(), equalTo(asc ? 3L : 2L)); + Stats stats = filter2.getAggregations().get(statsName); + assertThat(stats, notNullValue()); + assertThat(stats.getMax(), equalTo(asc ? 2.0 : 4.0)); + + tag = iters.next(); + assertThat(tag, notNullValue()); + assertThat(tag.getKeyAsString(), equalTo(asc ? "less" : "more")); + assertThat(tag.getDocCount(), equalTo(asc ? 2L : 3L)); + filter1 = tag.getAggregations().get("filter1"); + assertThat(filter1, notNullValue()); + assertThat(filter1.getDocCount(), equalTo(asc ? 2L : 3L)); + filter2 = filter1.getAggregations().get(filter2Name); + assertThat(filter2, notNullValue()); + assertThat(filter2.getDocCount(), equalTo(asc ? 2L : 3L)); + stats = filter2.getAggregations().get(statsName); + assertThat(stats, notNullValue()); + assertThat(stats.getMax(), equalTo(asc ? 4.0 : 2.0)); + } + ); } public void testSingleValuedFieldOrderedByMissingSubAggregation() throws Exception { @@ -868,7 +895,7 @@ public void testSingleValuedFieldOrderedByNonMetricsOrMultiBucketSubAggregation( public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithUnknownMetric() throws Exception { for (String index : Arrays.asList("idx", "idx_unmapped")) { try { - SearchResponse response = prepareSearch(index).addAggregation( + prepareSearch(index).addAggregation( new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) @@ -877,9 +904,7 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithUnknownMe ).get(); fail( "Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " - + "with an unknown specified metric to order by. response had " - + response.getFailedShards() - + " failed shards." + + "with an unknown specified metric to order by" ); } catch (ElasticsearchException e) { @@ -912,139 +937,140 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithoutMetric public void testSingleValuedFieldOrderedByMultiValueSubAggregationAsc() throws Exception { boolean asc = true; - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) - .field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("stats.avg", asc)) - .subAggregation(stats("stats").field("i")) - ).get(); - - assertNoFailures(response); - - StringTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - int i = 0; - for (StringTerms.Bucket bucket : terms.getBuckets()) { - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("val" + i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - Stats stats = bucket.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getMax(), equalTo((double) i)); - i++; - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) + .field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("stats.avg", asc)) + .subAggregation(stats("stats").field("i")) + ), + response -> { + StringTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + int i = 0; + for (StringTerms.Bucket bucket : terms.getBuckets()) { + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("val" + i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + + Stats stats = bucket.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getMax(), equalTo((double) i)); + i++; + } + } + ); } public void testSingleValuedFieldOrderedByMultiValueSubAggregationDesc() throws Exception { boolean asc = false; - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) - .field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("stats.avg", asc)) - .subAggregation(stats("stats").field("i")) - ).get(); - - assertNoFailures(response); - - StringTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - int i = 4; - for (StringTerms.Bucket bucket : terms.getBuckets()) { - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("val" + i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - Stats stats = bucket.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getMax(), equalTo((double) i)); - i--; - } - + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) + .field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("stats.avg", asc)) + .subAggregation(stats("stats").field("i")) + ), + response -> { + StringTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + int i = 4; + for (StringTerms.Bucket bucket : terms.getBuckets()) { + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("val" + i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + + Stats stats = bucket.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getMax(), equalTo((double) i)); + i--; + } + } + ); } public void testSingleValuedFieldOrderedByMultiValueExtendedStatsAsc() throws Exception { boolean asc = true; - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) - .field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("stats.sum_of_squares", asc)) - .subAggregation(extendedStats("stats").field("i")) - ).get(); - - assertNoFailures(response); - - StringTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - int i = 0; - for (StringTerms.Bucket bucket : terms.getBuckets()) { - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("val" + i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - ExtendedStats stats = bucket.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getMax(), equalTo((double) i)); - i++; - } - + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) + .field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("stats.sum_of_squares", asc)) + .subAggregation(extendedStats("stats").field("i")) + ), + response -> { + StringTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + int i = 0; + for (StringTerms.Bucket bucket : terms.getBuckets()) { + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("val" + i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + + ExtendedStats stats = bucket.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getMax(), equalTo((double) i)); + i++; + } + } + ); } public void testSingleValuedFieldOrderedByStatsAggAscWithTermsSubAgg() throws Exception { boolean asc = true; - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) - .field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("stats.sum_of_squares", asc)) - .subAggregation(extendedStats("stats").field("i")) - .subAggregation( - new TermsAggregationBuilder("subTerms").field("s_values").collectMode(randomFrom(SubAggCollectionMode.values())) - ) - ).get(); - - assertNoFailures(response); - - StringTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - int i = 0; - for (StringTerms.Bucket bucket : terms.getBuckets()) { - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("val" + i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - ExtendedStats stats = bucket.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getMax(), equalTo((double) i)); - - StringTerms subTermsAgg = bucket.getAggregations().get("subTerms"); - assertThat(subTermsAgg, notNullValue()); - assertThat(subTermsAgg.getBuckets().size(), equalTo(2)); - int j = i; - for (StringTerms.Bucket subBucket : subTermsAgg.getBuckets()) { - assertThat(subBucket, notNullValue()); - assertThat(subBucket.getKeyAsString(), equalTo("val" + j)); - assertThat(subBucket.getDocCount(), equalTo(1L)); - j++; + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) + .field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("stats.sum_of_squares", asc)) + .subAggregation(extendedStats("stats").field("i")) + .subAggregation( + new TermsAggregationBuilder("subTerms").field("s_values").collectMode(randomFrom(SubAggCollectionMode.values())) + ) + ), + response -> { + StringTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + int i = 0; + for (StringTerms.Bucket bucket : terms.getBuckets()) { + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("val" + i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + + ExtendedStats stats = bucket.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getMax(), equalTo((double) i)); + + StringTerms subTermsAgg = bucket.getAggregations().get("subTerms"); + assertThat(subTermsAgg, notNullValue()); + assertThat(subTermsAgg.getBuckets().size(), equalTo(2)); + int j = i; + for (StringTerms.Bucket subBucket : subTermsAgg.getBuckets()) { + assertThat(subBucket, notNullValue()); + assertThat(subBucket.getKeyAsString(), equalTo("val" + j)); + assertThat(subBucket.getDocCount(), equalTo(1L)); + j++; + } + i++; + } } - i++; - } - + ); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAndTermsDesc() throws Exception { @@ -1088,57 +1114,60 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAsCompound } private void assertMultiSortResponse(String[] expectedKeys, BucketOrder... order) { - SearchResponse response = prepareSearch("sort_idx").addAggregation( - new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) - .field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.compound(order)) - .subAggregation(avg("avg_l").field("l")) - .subAggregation(sum("sum_d").field("d")) - ).get(); - - assertNoFailures(response); - - StringTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(expectedKeys.length)); - - int i = 0; - for (StringTerms.Bucket bucket : terms.getBuckets()) { - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(expectedKeys[i])); - assertThat(bucket.getDocCount(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("_count"))); - Avg avg = bucket.getAggregations().get("avg_l"); - assertThat(avg, notNullValue()); - assertThat(avg.getValue(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("avg_l"))); - Sum sum = bucket.getAggregations().get("sum_d"); - assertThat(sum, notNullValue()); - assertThat(sum.value(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("sum_d"))); - i++; - } + assertNoFailuresAndResponse( + prepareSearch("sort_idx").addAggregation( + new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) + .field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.compound(order)) + .subAggregation(avg("avg_l").field("l")) + .subAggregation(sum("sum_d").field("d")) + ), + response -> { + StringTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(expectedKeys.length)); + + int i = 0; + for (StringTerms.Bucket bucket : terms.getBuckets()) { + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(expectedKeys[i])); + assertThat(bucket.getDocCount(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("_count"))); + Avg avg = bucket.getAggregations().get("avg_l"); + assertThat(avg, notNullValue()); + assertThat(avg.getValue(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("avg_l"))); + Sum sum = bucket.getAggregations().get("sum_d"); + assertThat(sum, notNullValue()); + assertThat(sum.value(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("sum_d"))); + i++; + } + } + ); } public void testIndexMetaField() throws Exception { - SearchResponse response = prepareSearch("idx", "empty_bucket_idx").addAggregation( - new TermsAggregationBuilder("terms").collectMode(randomFrom(SubAggCollectionMode.values())) - .executionHint(randomExecutionHint()) - .field(IndexFieldMapper.NAME) - ).get(); - - assertNoFailures(response); - StringTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(2)); - - int i = 0; - for (StringTerms.Bucket bucket : terms.getBuckets()) { - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(i == 0 ? "idx" : "empty_bucket_idx")); - assertThat(bucket.getDocCount(), equalTo(i == 0 ? 5L : 2L)); - i++; - } + assertNoFailuresAndResponse( + prepareSearch("idx", "empty_bucket_idx").addAggregation( + new TermsAggregationBuilder("terms").collectMode(randomFrom(SubAggCollectionMode.values())) + .executionHint(randomExecutionHint()) + .field(IndexFieldMapper.NAME) + ), + response -> { + StringTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(2)); + + int i = 0; + for (StringTerms.Bucket bucket : terms.getBuckets()) { + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(i == 0 ? "idx" : "empty_bucket_idx")); + assertThat(bucket.getDocCount(), equalTo(i == 0 ? 5L : 2L)); + i++; + } + } + ); } public void testOtherDocCount() { @@ -1171,13 +1200,13 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - new TermsAggregationBuilder("terms").field("d") - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", Collections.emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + new TermsAggregationBuilder("terms").field("d") + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", Collections.emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -1189,13 +1218,13 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a deterministic script gets cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - new TermsAggregationBuilder("terms").field("d") - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "'foo_' + _value", Collections.emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + new TermsAggregationBuilder("terms").field("d") + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "'foo_' + _value", Collections.emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -1207,8 +1236,7 @@ public void testScriptCaching() throws Exception { ); // Ensure that non-scripted requests are cached as normal - r = prepareSearch("cache_test_idx").setSize(0).addAggregation(new TermsAggregationBuilder("terms").field("d")).get(); - assertNoFailures(r); + assertNoFailures(prepareSearch("cache_test_idx").setSize(0).addAggregation(new TermsAggregationBuilder("terms").field("d"))); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -1230,13 +1258,12 @@ public void testScriptWithValueType() throws Exception { String source = builder.toString(); try (XContentParser parser = createParser(JsonXContent.jsonXContent, source)) { - SearchResponse response = prepareSearch("idx").setSource(new SearchSourceBuilder().parseXContent(parser, true)).get(); - - assertNoFailures(response); - LongTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(1)); + assertNoFailuresAndResponse(prepareSearch("idx").setSource(new SearchSourceBuilder().parseXContent(parser, true)), response -> { + LongTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(1)); + }); } String invalidValueType = source.replaceAll("\"value_type\":\"n.*\"", "\"value_type\":\"foobar\""); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java index 64a97bf0f6f16..295486fba2e56 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java @@ -7,7 +7,6 @@ */ package org.elasticsearch.search.aggregations.metrics; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; @@ -39,6 +38,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @@ -91,310 +91,325 @@ private static double varianceSampling(int... vals) { @Override public void testEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field("value").interval(1L).minDocCount(0).subAggregation(extendedStats("stats").field("value")) - ) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - Histogram histo = searchResponse.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - Histogram.Bucket bucket = histo.getBuckets().get(1); - assertThat(bucket, notNullValue()); - - ExtendedStats stats = bucket.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getSumOfSquares(), equalTo(0.0)); - assertThat(stats.getCount(), equalTo(0L)); - assertThat(stats.getSum(), equalTo(0.0)); - assertThat(stats.getMin(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(stats.getMax(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(Double.isNaN(stats.getStdDeviation()), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationPopulation()), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationSampling()), is(true)); - assertThat(Double.isNaN(stats.getAvg()), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER)), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER)), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER_POPULATION)), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER_POPULATION)), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER_SAMPLING)), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER_SAMPLING)), is(true)); + assertResponse( + prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value").interval(1L).minDocCount(0).subAggregation(extendedStats("stats").field("value")) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + Histogram.Bucket bucket = histo.getBuckets().get(1); + assertThat(bucket, notNullValue()); + + ExtendedStats stats = bucket.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat(stats.getSumOfSquares(), equalTo(0.0)); + assertThat(stats.getCount(), equalTo(0L)); + assertThat(stats.getSum(), equalTo(0.0)); + assertThat(stats.getMin(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(stats.getMax(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(Double.isNaN(stats.getStdDeviation()), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationPopulation()), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationSampling()), is(true)); + assertThat(Double.isNaN(stats.getAvg()), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER)), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER)), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER_POPULATION)), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER_POPULATION)), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER_SAMPLING)), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER_SAMPLING)), is(true)); + } + ); } @Override public void testUnmapped() throws Exception { - SearchResponse searchResponse = prepareSearch("idx_unmapped").setQuery(matchAllQuery()) - .addAggregation(extendedStats("stats").field("value")) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); - - ExtendedStats stats = searchResponse.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo(Double.NaN)); - assertThat(stats.getMin(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(stats.getMax(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(stats.getSum(), equalTo(0.0)); - assertThat(stats.getCount(), equalTo(0L)); - assertThat(stats.getSumOfSquares(), equalTo(0.0)); - assertThat(stats.getVariance(), equalTo(Double.NaN)); - assertThat(stats.getVariancePopulation(), equalTo(Double.NaN)); - assertThat(stats.getVarianceSampling(), equalTo(Double.NaN)); - assertThat(stats.getStdDeviation(), equalTo(Double.NaN)); - assertThat(stats.getStdDeviationPopulation(), equalTo(Double.NaN)); - assertThat(stats.getStdDeviationSampling(), equalTo(Double.NaN)); - assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER)), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER)), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER_POPULATION)), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER_POPULATION)), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER_SAMPLING)), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER_SAMPLING)), is(true)); + assertResponse( + prepareSearch("idx_unmapped").setQuery(matchAllQuery()).addAggregation(extendedStats("stats").field("value")), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(0L)); + + ExtendedStats stats = response.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat(stats.getAvg(), equalTo(Double.NaN)); + assertThat(stats.getMin(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(stats.getMax(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(stats.getSum(), equalTo(0.0)); + assertThat(stats.getCount(), equalTo(0L)); + assertThat(stats.getSumOfSquares(), equalTo(0.0)); + assertThat(stats.getVariance(), equalTo(Double.NaN)); + assertThat(stats.getVariancePopulation(), equalTo(Double.NaN)); + assertThat(stats.getVarianceSampling(), equalTo(Double.NaN)); + assertThat(stats.getStdDeviation(), equalTo(Double.NaN)); + assertThat(stats.getStdDeviationPopulation(), equalTo(Double.NaN)); + assertThat(stats.getStdDeviationSampling(), equalTo(Double.NaN)); + assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER)), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER)), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER_POPULATION)), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER_POPULATION)), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER_SAMPLING)), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER_SAMPLING)), is(true)); + } + ); } public void testPartiallyUnmapped() { double sigma = randomDouble() * 5; - ExtendedStats s1 = prepareSearch("idx").addAggregation(extendedStats("stats").field("value").sigma(sigma)) - .get() - .getAggregations() - .get("stats"); - ExtendedStats s2 = prepareSearch("idx", "idx_unmapped").addAggregation(extendedStats("stats").field("value").sigma(sigma)) - .get() - .getAggregations() - .get("stats"); - assertEquals(s1.getAvg(), s2.getAvg(), 1e-10); - assertEquals(s1.getCount(), s2.getCount()); - assertEquals(s1.getMin(), s2.getMin(), 0d); - assertEquals(s1.getMax(), s2.getMax(), 0d); - assertEquals(s1.getStdDeviation(), s2.getStdDeviation(), 1e-10); - assertEquals(s1.getStdDeviationPopulation(), s2.getStdDeviationPopulation(), 1e-10); - assertEquals(s1.getStdDeviationSampling(), s2.getStdDeviationSampling(), 1e-10); - assertEquals(s1.getSumOfSquares(), s2.getSumOfSquares(), 1e-10); - assertEquals(s1.getStdDeviationBound(Bounds.LOWER), s2.getStdDeviationBound(Bounds.LOWER), 1e-10); - assertEquals(s1.getStdDeviationBound(Bounds.UPPER), s2.getStdDeviationBound(Bounds.UPPER), 1e-10); - assertEquals(s1.getStdDeviationBound(Bounds.LOWER_POPULATION), s2.getStdDeviationBound(Bounds.LOWER_POPULATION), 1e-10); - assertEquals(s1.getStdDeviationBound(Bounds.UPPER_POPULATION), s2.getStdDeviationBound(Bounds.UPPER_POPULATION), 1e-10); - assertEquals(s1.getStdDeviationBound(Bounds.LOWER_SAMPLING), s2.getStdDeviationBound(Bounds.LOWER_SAMPLING), 1e-10); - assertEquals(s1.getStdDeviationBound(Bounds.UPPER_SAMPLING), s2.getStdDeviationBound(Bounds.UPPER_SAMPLING), 1e-10); + assertResponse(prepareSearch("idx").addAggregation(extendedStats("stats").field("value").sigma(sigma)), response1 -> { + ExtendedStats s1 = response1.getAggregations().get("stats"); + assertResponse( + prepareSearch("idx", "idx_unmapped").addAggregation(extendedStats("stats").field("value").sigma(sigma)), + response2 -> { + ExtendedStats s2 = response2.getAggregations().get("stats"); + assertEquals(s1.getAvg(), s2.getAvg(), 1e-10); + assertEquals(s1.getCount(), s2.getCount()); + assertEquals(s1.getMin(), s2.getMin(), 0d); + assertEquals(s1.getMax(), s2.getMax(), 0d); + assertEquals(s1.getStdDeviation(), s2.getStdDeviation(), 1e-10); + assertEquals(s1.getStdDeviationPopulation(), s2.getStdDeviationPopulation(), 1e-10); + assertEquals(s1.getStdDeviationSampling(), s2.getStdDeviationSampling(), 1e-10); + assertEquals(s1.getSumOfSquares(), s2.getSumOfSquares(), 1e-10); + assertEquals(s1.getStdDeviationBound(Bounds.LOWER), s2.getStdDeviationBound(Bounds.LOWER), 1e-10); + assertEquals(s1.getStdDeviationBound(Bounds.UPPER), s2.getStdDeviationBound(Bounds.UPPER), 1e-10); + assertEquals(s1.getStdDeviationBound(Bounds.LOWER_POPULATION), s2.getStdDeviationBound(Bounds.LOWER_POPULATION), 1e-10); + assertEquals(s1.getStdDeviationBound(Bounds.UPPER_POPULATION), s2.getStdDeviationBound(Bounds.UPPER_POPULATION), 1e-10); + assertEquals(s1.getStdDeviationBound(Bounds.LOWER_SAMPLING), s2.getStdDeviationBound(Bounds.LOWER_SAMPLING), 1e-10); + assertEquals(s1.getStdDeviationBound(Bounds.UPPER_SAMPLING), s2.getStdDeviationBound(Bounds.UPPER_SAMPLING), 1e-10); + } + ); + }); } @Override public void testSingleValuedField() throws Exception { double sigma = randomDouble() * randomIntBetween(1, 10); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(extendedStats("stats").field("value").sigma(sigma)) - .get(); - - assertHitCount(searchResponse, 10); - - ExtendedStats stats = searchResponse.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10)); - assertThat(stats.getMin(), equalTo(1.0)); - assertThat(stats.getMax(), equalTo(10.0)); - assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); - assertThat(stats.getCount(), equalTo(10L)); - assertThat(stats.getSumOfSquares(), equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100)); - assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getStdDeviationPopulation(), equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - checkUpperLowerBounds(stats, sigma); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(extendedStats("stats").field("value").sigma(sigma)), + response -> { + assertHitCount(response, 10); + + ExtendedStats stats = response.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10)); + assertThat(stats.getMin(), equalTo(1.0)); + assertThat(stats.getMax(), equalTo(10.0)); + assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); + assertThat(stats.getCount(), equalTo(10L)); + assertThat(stats.getSumOfSquares(), equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100)); + assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getStdDeviationPopulation(), equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + checkUpperLowerBounds(stats, sigma); + } + ); } public void testSingleValuedFieldDefaultSigma() throws Exception { // Same as previous test, but uses a default value for sigma - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(extendedStats("stats").field("value")) - .get(); - - assertHitCount(searchResponse, 10); - - ExtendedStats stats = searchResponse.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10)); - assertThat(stats.getMin(), equalTo(1.0)); - assertThat(stats.getMax(), equalTo(10.0)); - assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); - assertThat(stats.getCount(), equalTo(10L)); - assertThat(stats.getSumOfSquares(), equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100)); - assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getStdDeviationPopulation(), equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - checkUpperLowerBounds(stats, 2); + assertResponse(prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(extendedStats("stats").field("value")), response -> { + + assertHitCount(response, 10); + + ExtendedStats stats = response.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10)); + assertThat(stats.getMin(), equalTo(1.0)); + assertThat(stats.getMax(), equalTo(10.0)); + assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); + assertThat(stats.getCount(), equalTo(10L)); + assertThat(stats.getSumOfSquares(), equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100)); + assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getStdDeviationPopulation(), equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + checkUpperLowerBounds(stats, 2); + }); } public void testSingleValuedField_WithFormatter() throws Exception { double sigma = randomDouble() * randomIntBetween(1, 10); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(extendedStats("stats").format("0000.0").field("value").sigma(sigma)) - .get(); - - assertHitCount(searchResponse, 10); - - ExtendedStats stats = searchResponse.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10)); - assertThat(stats.getAvgAsString(), equalTo("0005.5")); - assertThat(stats.getMin(), equalTo(1.0)); - assertThat(stats.getMinAsString(), equalTo("0001.0")); - assertThat(stats.getMax(), equalTo(10.0)); - assertThat(stats.getMaxAsString(), equalTo("0010.0")); - assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); - assertThat(stats.getSumAsString(), equalTo("0055.0")); - assertThat(stats.getCount(), equalTo(10L)); - assertThat(stats.getSumOfSquares(), equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100)); - assertThat(stats.getSumOfSquaresAsString(), equalTo("0385.0")); - assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getVarianceAsString(), equalTo("0008.2")); - assertThat(stats.getVariancePopulationAsString(), equalTo("0008.2")); - assertThat(stats.getVarianceSamplingAsString(), equalTo("0009.2")); - assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getStdDeviationPopulation(), equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getStdDeviationAsString(), equalTo("0002.9")); - assertThat(stats.getStdDeviationPopulationAsString(), equalTo("0002.9")); - assertThat(stats.getStdDeviationSamplingAsString(), equalTo("0003.0")); - checkUpperLowerBounds(stats, sigma); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(extendedStats("stats").format("0000.0").field("value").sigma(sigma)), + response -> { + assertHitCount(response, 10); + + ExtendedStats stats = response.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10)); + assertThat(stats.getAvgAsString(), equalTo("0005.5")); + assertThat(stats.getMin(), equalTo(1.0)); + assertThat(stats.getMinAsString(), equalTo("0001.0")); + assertThat(stats.getMax(), equalTo(10.0)); + assertThat(stats.getMaxAsString(), equalTo("0010.0")); + assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); + assertThat(stats.getSumAsString(), equalTo("0055.0")); + assertThat(stats.getCount(), equalTo(10L)); + assertThat(stats.getSumOfSquares(), equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100)); + assertThat(stats.getSumOfSquaresAsString(), equalTo("0385.0")); + assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getVarianceAsString(), equalTo("0008.2")); + assertThat(stats.getVariancePopulationAsString(), equalTo("0008.2")); + assertThat(stats.getVarianceSamplingAsString(), equalTo("0009.2")); + assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getStdDeviationPopulation(), equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getStdDeviationAsString(), equalTo("0002.9")); + assertThat(stats.getStdDeviationPopulationAsString(), equalTo("0002.9")); + assertThat(stats.getStdDeviationSamplingAsString(), equalTo("0003.0")); + checkUpperLowerBounds(stats, sigma); + } + ); } @Override public void testSingleValuedFieldGetProperty() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(global("global").subAggregation(extendedStats("stats").field("value"))) - .get(); - - assertHitCount(searchResponse, 10); - - Global global = searchResponse.getAggregations().get("global"); - assertThat(global, notNullValue()); - assertThat(global.getName(), equalTo("global")); - assertThat(global.getDocCount(), equalTo(10L)); - assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().size(), equalTo(1)); - - ExtendedStats stats = global.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - ExtendedStats statsFromProperty = (ExtendedStats) ((InternalAggregation) global).getProperty("stats"); - assertThat(statsFromProperty, notNullValue()); - assertThat(statsFromProperty, sameInstance(stats)); - double expectedAvgValue = (double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10; - assertThat(stats.getAvg(), equalTo(expectedAvgValue)); - assertThat((double) ((InternalAggregation) global).getProperty("stats.avg"), equalTo(expectedAvgValue)); - double expectedMinValue = 1.0; - assertThat(stats.getMin(), equalTo(expectedMinValue)); - assertThat((double) ((InternalAggregation) global).getProperty("stats.min"), equalTo(expectedMinValue)); - double expectedMaxValue = 10.0; - assertThat(stats.getMax(), equalTo(expectedMaxValue)); - assertThat((double) ((InternalAggregation) global).getProperty("stats.max"), equalTo(expectedMaxValue)); - double expectedSumValue = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10; - assertThat(stats.getSum(), equalTo(expectedSumValue)); - assertThat((double) ((InternalAggregation) global).getProperty("stats.sum"), equalTo(expectedSumValue)); - long expectedCountValue = 10; - assertThat(stats.getCount(), equalTo(expectedCountValue)); - assertThat((double) ((InternalAggregation) global).getProperty("stats.count"), equalTo((double) expectedCountValue)); - double expectedSumOfSquaresValue = (double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100; - assertThat(stats.getSumOfSquares(), equalTo(expectedSumOfSquaresValue)); - assertThat((double) ((InternalAggregation) global).getProperty("stats.sum_of_squares"), equalTo(expectedSumOfSquaresValue)); - double expectedVarianceValue = variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); - assertThat(stats.getVariance(), equalTo(expectedVarianceValue)); - assertThat((double) ((InternalAggregation) global).getProperty("stats.variance"), equalTo(expectedVarianceValue)); - double expectedVariancePopulationValue = variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); - assertThat(stats.getVariancePopulation(), equalTo(expectedVariancePopulationValue)); - assertThat( - (double) ((InternalAggregation) global).getProperty("stats.variance_population"), - equalTo(expectedVariancePopulationValue) - ); - double expectedVarianceSamplingValue = varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); - assertThat(stats.getVarianceSampling(), equalTo(expectedVarianceSamplingValue)); - assertThat((double) ((InternalAggregation) global).getProperty("stats.variance_sampling"), equalTo(expectedVarianceSamplingValue)); - double expectedStdDevValue = stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); - assertThat(stats.getStdDeviation(), equalTo(expectedStdDevValue)); - assertThat((double) ((InternalAggregation) global).getProperty("stats.std_deviation"), equalTo(expectedStdDevValue)); - double expectedStdDevPopulationValue = stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); - assertThat(stats.getStdDeviationPopulation(), equalTo(expectedStdDevValue)); - assertThat( - (double) ((InternalAggregation) global).getProperty("stats.std_deviation_population"), - equalTo(expectedStdDevPopulationValue) - ); - double expectedStdDevSamplingValue = stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); - assertThat(stats.getStdDeviationSampling(), equalTo(expectedStdDevSamplingValue)); - assertThat( - (double) ((InternalAggregation) global).getProperty("stats.std_deviation_sampling"), - equalTo(expectedStdDevSamplingValue) + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(global("global").subAggregation(extendedStats("stats").field("value"))), + response -> { + assertHitCount(response, 10); + + Global global = response.getAggregations().get("global"); + assertThat(global, notNullValue()); + assertThat(global.getName(), equalTo("global")); + assertThat(global.getDocCount(), equalTo(10L)); + assertThat(global.getAggregations(), notNullValue()); + assertThat(global.getAggregations().asMap().size(), equalTo(1)); + + ExtendedStats stats = global.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + ExtendedStats statsFromProperty = (ExtendedStats) ((InternalAggregation) global).getProperty("stats"); + assertThat(statsFromProperty, notNullValue()); + assertThat(statsFromProperty, sameInstance(stats)); + double expectedAvgValue = (double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10; + assertThat(stats.getAvg(), equalTo(expectedAvgValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.avg"), equalTo(expectedAvgValue)); + double expectedMinValue = 1.0; + assertThat(stats.getMin(), equalTo(expectedMinValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.min"), equalTo(expectedMinValue)); + double expectedMaxValue = 10.0; + assertThat(stats.getMax(), equalTo(expectedMaxValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.max"), equalTo(expectedMaxValue)); + double expectedSumValue = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10; + assertThat(stats.getSum(), equalTo(expectedSumValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.sum"), equalTo(expectedSumValue)); + long expectedCountValue = 10; + assertThat(stats.getCount(), equalTo(expectedCountValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.count"), equalTo((double) expectedCountValue)); + double expectedSumOfSquaresValue = (double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100; + assertThat(stats.getSumOfSquares(), equalTo(expectedSumOfSquaresValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.sum_of_squares"), equalTo(expectedSumOfSquaresValue)); + double expectedVarianceValue = variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); + assertThat(stats.getVariance(), equalTo(expectedVarianceValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.variance"), equalTo(expectedVarianceValue)); + double expectedVariancePopulationValue = variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); + assertThat(stats.getVariancePopulation(), equalTo(expectedVariancePopulationValue)); + assertThat( + (double) ((InternalAggregation) global).getProperty("stats.variance_population"), + equalTo(expectedVariancePopulationValue) + ); + double expectedVarianceSamplingValue = varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); + assertThat(stats.getVarianceSampling(), equalTo(expectedVarianceSamplingValue)); + assertThat( + (double) ((InternalAggregation) global).getProperty("stats.variance_sampling"), + equalTo(expectedVarianceSamplingValue) + ); + double expectedStdDevValue = stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); + assertThat(stats.getStdDeviation(), equalTo(expectedStdDevValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.std_deviation"), equalTo(expectedStdDevValue)); + double expectedStdDevPopulationValue = stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); + assertThat(stats.getStdDeviationPopulation(), equalTo(expectedStdDevValue)); + assertThat( + (double) ((InternalAggregation) global).getProperty("stats.std_deviation_population"), + equalTo(expectedStdDevPopulationValue) + ); + double expectedStdDevSamplingValue = stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); + assertThat(stats.getStdDeviationSampling(), equalTo(expectedStdDevSamplingValue)); + assertThat( + (double) ((InternalAggregation) global).getProperty("stats.std_deviation_sampling"), + equalTo(expectedStdDevSamplingValue) + ); + } ); } @Override public void testSingleValuedFieldPartiallyUnmapped() throws Exception { double sigma = randomDouble() * randomIntBetween(1, 10); - SearchResponse searchResponse = prepareSearch("idx", "idx_unmapped").setQuery(matchAllQuery()) - .addAggregation(extendedStats("stats").field("value").sigma(sigma)) - .get(); - - assertHitCount(searchResponse, 10); - - ExtendedStats stats = searchResponse.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10)); - assertThat(stats.getMin(), equalTo(1.0)); - assertThat(stats.getMax(), equalTo(10.0)); - assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); - assertThat(stats.getCount(), equalTo(10L)); - assertThat(stats.getSumOfSquares(), equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100)); - assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getStdDeviationPopulation(), equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - checkUpperLowerBounds(stats, sigma); + assertResponse( + prepareSearch("idx", "idx_unmapped").setQuery(matchAllQuery()) + .addAggregation(extendedStats("stats").field("value").sigma(sigma)), + response -> { + assertHitCount(response, 10); + + ExtendedStats stats = response.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10)); + assertThat(stats.getMin(), equalTo(1.0)); + assertThat(stats.getMax(), equalTo(10.0)); + assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); + assertThat(stats.getCount(), equalTo(10L)); + assertThat(stats.getSumOfSquares(), equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100)); + assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getStdDeviationPopulation(), equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + checkUpperLowerBounds(stats, sigma); + } + ); } @Override public void testSingleValuedFieldWithValueScript() throws Exception { double sigma = randomDouble() * randomIntBetween(1, 10); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - extendedStats("stats").field("value") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", Collections.emptyMap())) - .sigma(sigma) - ) - .get(); - - assertHitCount(searchResponse, 10); - - ExtendedStats stats = searchResponse.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo((double) (2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11) / 10)); - assertThat(stats.getMin(), equalTo(2.0)); - assertThat(stats.getMax(), equalTo(11.0)); - assertThat(stats.getSum(), equalTo((double) 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11)); - assertThat(stats.getCount(), equalTo(10L)); - assertThat(stats.getSumOfSquares(), equalTo((double) 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121)); - assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat(stats.getStdDeviationPopulation(), equalTo(stdDevPopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - checkUpperLowerBounds(stats, sigma); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + extendedStats("stats").field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", Collections.emptyMap())) + .sigma(sigma) + ), + response -> { + assertHitCount(response, 10); + + ExtendedStats stats = response.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat(stats.getAvg(), equalTo((double) (2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11) / 10)); + assertThat(stats.getMin(), equalTo(2.0)); + assertThat(stats.getMax(), equalTo(11.0)); + assertThat(stats.getSum(), equalTo((double) 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11)); + assertThat(stats.getCount(), equalTo(10L)); + assertThat(stats.getSumOfSquares(), equalTo((double) 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121)); + assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat(stats.getStdDeviationPopulation(), equalTo(stdDevPopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + checkUpperLowerBounds(stats, sigma); + } + ); } @Override @@ -402,118 +417,139 @@ public void testSingleValuedFieldWithValueScriptWithParams() throws Exception { Map params = new HashMap<>(); params.put("inc", 1); double sigma = randomDouble() * randomIntBetween(1, 10); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - extendedStats("stats").field("value") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + inc", params)) - .sigma(sigma) - ) - .get(); - - assertHitCount(searchResponse, 10); - - ExtendedStats stats = searchResponse.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo((double) (2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11) / 10)); - assertThat(stats.getMin(), equalTo(2.0)); - assertThat(stats.getMax(), equalTo(11.0)); - assertThat(stats.getSum(), equalTo((double) 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11)); - assertThat(stats.getCount(), equalTo(10L)); - assertThat(stats.getSumOfSquares(), equalTo((double) 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121)); - assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat(stats.getStdDeviationPopulation(), equalTo(stdDevPopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - checkUpperLowerBounds(stats, sigma); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + extendedStats("stats").field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + inc", params)) + .sigma(sigma) + ), + response -> { + assertHitCount(response, 10); + + ExtendedStats stats = response.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat(stats.getAvg(), equalTo((double) (2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11) / 10)); + assertThat(stats.getMin(), equalTo(2.0)); + assertThat(stats.getMax(), equalTo(11.0)); + assertThat(stats.getSum(), equalTo((double) 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11)); + assertThat(stats.getCount(), equalTo(10L)); + assertThat(stats.getSumOfSquares(), equalTo((double) 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121)); + assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat(stats.getStdDeviationPopulation(), equalTo(stdDevPopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + checkUpperLowerBounds(stats, sigma); + } + ); } @Override public void testMultiValuedField() throws Exception { double sigma = randomDouble() * randomIntBetween(1, 10); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(extendedStats("stats").field("values").sigma(sigma)) - .get(); - - assertHitCount(searchResponse, 10); - - ExtendedStats stats = searchResponse.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat( - stats.getAvg(), - equalTo((double) (2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12) / 20) + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(extendedStats("stats").field("values").sigma(sigma)), + response -> { + + assertHitCount(response, 10); + + ExtendedStats stats = response.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat( + stats.getAvg(), + equalTo((double) (2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12) / 20) + ); + assertThat(stats.getMin(), equalTo(2.0)); + assertThat(stats.getMax(), equalTo(12.0)); + assertThat( + stats.getSum(), + equalTo((double) 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12) + ); + assertThat(stats.getCount(), equalTo(20L)); + assertThat( + stats.getSumOfSquares(), + equalTo((double) 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121 + 144) + ); + assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))); + assertThat( + stats.getVariancePopulation(), + equalTo(variancePopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) + ); + assertThat( + stats.getVarianceSampling(), + equalTo(varianceSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) + ); + assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))); + assertThat( + stats.getStdDeviationPopulation(), + equalTo(stdDevPopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) + ); + assertThat( + stats.getStdDeviationSampling(), + equalTo(stdDevSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) + ); + checkUpperLowerBounds(stats, sigma); + } ); - assertThat(stats.getMin(), equalTo(2.0)); - assertThat(stats.getMax(), equalTo(12.0)); - assertThat(stats.getSum(), equalTo((double) 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12)); - assertThat(stats.getCount(), equalTo(20L)); - assertThat( - stats.getSumOfSquares(), - equalTo((double) 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121 + 144) - ); - assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))); - assertThat( - stats.getVariancePopulation(), - equalTo(variancePopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) - ); - assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))); - assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))); - assertThat( - stats.getStdDeviationPopulation(), - equalTo(stdDevPopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) - ); - assertThat( - stats.getStdDeviationSampling(), - equalTo(stdDevSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) - ); - checkUpperLowerBounds(stats, sigma); } @Override public void testMultiValuedFieldWithValueScript() throws Exception { double sigma = randomDouble() * randomIntBetween(1, 10); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - extendedStats("stats").field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", Collections.emptyMap())) - .sigma(sigma) - ) - .get(); - - assertHitCount(searchResponse, 10); - - ExtendedStats stats = searchResponse.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat( - stats.getAvg(), - equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11) / 20) + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + extendedStats("stats").field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", Collections.emptyMap())) + .sigma(sigma) + ), + response -> { + assertHitCount(response, 10); + + ExtendedStats stats = response.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat( + stats.getAvg(), + equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11) / 20) + ); + assertThat(stats.getMin(), equalTo(1.0)); + assertThat(stats.getMax(), equalTo(11.0)); + assertThat( + stats.getSum(), + equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11) + ); + assertThat(stats.getCount(), equalTo(20L)); + assertThat( + stats.getSumOfSquares(), + equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121) + ); + assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat( + stats.getVariancePopulation(), + equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)) + ); + assertThat( + stats.getVarianceSampling(), + equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)) + ); + assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat( + stats.getStdDeviationPopulation(), + equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)) + ); + assertThat( + stats.getStdDeviationSampling(), + equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)) + ); + + checkUpperLowerBounds(stats, sigma); + } ); - assertThat(stats.getMin(), equalTo(1.0)); - assertThat(stats.getMax(), equalTo(11.0)); - assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11)); - assertThat(stats.getCount(), equalTo(20L)); - assertThat( - stats.getSumOfSquares(), - equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121) - ); - assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat( - stats.getVariancePopulation(), - equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)) - ); - assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat( - stats.getStdDeviationPopulation(), - equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)) - ); - assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - - checkUpperLowerBounds(stats, sigma); } @Override @@ -521,75 +557,88 @@ public void testMultiValuedFieldWithValueScriptWithParams() throws Exception { Map params = new HashMap<>(); params.put("dec", 1); double sigma = randomDouble() * randomIntBetween(1, 10); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - extendedStats("stats").field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) - .sigma(sigma) - ) - .get(); - - assertHitCount(searchResponse, 10); - - ExtendedStats stats = searchResponse.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat( - stats.getAvg(), - equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11) / 20) + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + extendedStats("stats").field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) + .sigma(sigma) + ), + response -> { + assertHitCount(response, 10); + + ExtendedStats stats = response.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat( + stats.getAvg(), + equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11) / 20) + ); + assertThat(stats.getMin(), equalTo(1.0)); + assertThat(stats.getMax(), equalTo(11.0)); + assertThat( + stats.getSum(), + equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11) + ); + assertThat(stats.getCount(), equalTo(20L)); + assertThat( + stats.getSumOfSquares(), + equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121) + ); + assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat( + stats.getVariancePopulation(), + equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)) + ); + assertThat( + stats.getVarianceSampling(), + equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)) + ); + assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat( + stats.getStdDeviationPopulation(), + equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)) + ); + assertThat( + stats.getStdDeviationSampling(), + equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)) + ); + checkUpperLowerBounds(stats, sigma); + } ); - assertThat(stats.getMin(), equalTo(1.0)); - assertThat(stats.getMax(), equalTo(11.0)); - assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11)); - assertThat(stats.getCount(), equalTo(20L)); - assertThat( - stats.getSumOfSquares(), - equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121) - ); - assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat( - stats.getVariancePopulation(), - equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)) - ); - assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat( - stats.getStdDeviationPopulation(), - equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)) - ); - assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - checkUpperLowerBounds(stats, sigma); } @Override public void testScriptSingleValued() throws Exception { double sigma = randomDouble() * randomIntBetween(1, 10); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - extendedStats("stats").script( - new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", Collections.emptyMap()) - ).sigma(sigma) - ) - .get(); - - assertHitCount(searchResponse, 10); - - ExtendedStats stats = searchResponse.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10)); - assertThat(stats.getMin(), equalTo(1.0)); - assertThat(stats.getMax(), equalTo(10.0)); - assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); - assertThat(stats.getCount(), equalTo(10L)); - assertThat(stats.getSumOfSquares(), equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100)); - assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getStdDeviationPopulation(), equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - checkUpperLowerBounds(stats, sigma); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + extendedStats("stats").script( + new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", Collections.emptyMap()) + ).sigma(sigma) + ), + response -> { + assertHitCount(response, 10); + + ExtendedStats stats = response.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10)); + assertThat(stats.getMin(), equalTo(1.0)); + assertThat(stats.getMax(), equalTo(10.0)); + assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); + assertThat(stats.getCount(), equalTo(10L)); + assertThat(stats.getSumOfSquares(), equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100)); + assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getStdDeviationPopulation(), equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + checkUpperLowerBounds(stats, sigma); + } + ); } @Override @@ -600,74 +649,83 @@ public void testScriptSingleValuedWithParams() throws Exception { Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value + inc", params); double sigma = randomDouble() * randomIntBetween(1, 10); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(extendedStats("stats").script(script).sigma(sigma)) - .get(); - - assertHitCount(searchResponse, 10); - - ExtendedStats stats = searchResponse.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo((double) (2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11) / 10)); - assertThat(stats.getMin(), equalTo(2.0)); - assertThat(stats.getMax(), equalTo(11.0)); - assertThat(stats.getSum(), equalTo((double) 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11)); - assertThat(stats.getCount(), equalTo(10L)); - assertThat(stats.getSumOfSquares(), equalTo((double) 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121)); - assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat(stats.getStdDeviationPopulation(), equalTo(stdDevPopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - checkUpperLowerBounds(stats, sigma); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(extendedStats("stats").script(script).sigma(sigma)), + response -> { + assertHitCount(response, 10); + + ExtendedStats stats = response.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat(stats.getAvg(), equalTo((double) (2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11) / 10)); + assertThat(stats.getMin(), equalTo(2.0)); + assertThat(stats.getMax(), equalTo(11.0)); + assertThat(stats.getSum(), equalTo((double) 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11)); + assertThat(stats.getCount(), equalTo(10L)); + assertThat(stats.getSumOfSquares(), equalTo((double) 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121)); + assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat(stats.getStdDeviationPopulation(), equalTo(stdDevPopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + checkUpperLowerBounds(stats, sigma); + } + ); } @Override public void testScriptMultiValued() throws Exception { double sigma = randomDouble() * randomIntBetween(1, 10); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - extendedStats("stats").script( - new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['values']", Collections.emptyMap()) - ).sigma(sigma) - ) - .get(); - - assertHitCount(searchResponse, 10); - - ExtendedStats stats = searchResponse.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat( - stats.getAvg(), - equalTo((double) (2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12) / 20) - ); - assertThat(stats.getMin(), equalTo(2.0)); - assertThat(stats.getMax(), equalTo(12.0)); - assertThat(stats.getSum(), equalTo((double) 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12)); - assertThat(stats.getCount(), equalTo(20L)); - assertThat( - stats.getSumOfSquares(), - equalTo((double) 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121 + 144) - ); - assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))); - assertThat( - stats.getVariancePopulation(), - equalTo(variancePopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) - ); - assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))); - assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))); - assertThat( - stats.getStdDeviationPopulation(), - equalTo(stdDevPopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) - ); - assertThat( - stats.getStdDeviationSampling(), - equalTo(stdDevSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + extendedStats("stats").script( + new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['values']", Collections.emptyMap()) + ).sigma(sigma) + ), + response -> { + assertHitCount(response, 10); + + ExtendedStats stats = response.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat( + stats.getAvg(), + equalTo((double) (2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12) / 20) + ); + assertThat(stats.getMin(), equalTo(2.0)); + assertThat(stats.getMax(), equalTo(12.0)); + assertThat( + stats.getSum(), + equalTo((double) 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12) + ); + assertThat(stats.getCount(), equalTo(20L)); + assertThat( + stats.getSumOfSquares(), + equalTo((double) 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121 + 144) + ); + assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))); + assertThat( + stats.getVariancePopulation(), + equalTo(variancePopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) + ); + assertThat( + stats.getVarianceSampling(), + equalTo(varianceSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) + ); + assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))); + assertThat( + stats.getStdDeviationPopulation(), + equalTo(stdDevPopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) + ); + assertThat( + stats.getStdDeviationSampling(), + equalTo(stdDevSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) + ); + checkUpperLowerBounds(stats, sigma); + } ); - checkUpperLowerBounds(stats, sigma); } @Override @@ -683,125 +741,147 @@ public void testScriptMultiValuedWithParams() throws Exception { ); double sigma = randomDouble() * randomIntBetween(1, 10); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(extendedStats("stats").script(script).sigma(sigma)) - .get(); - - assertHitCount(searchResponse, 10); - - ExtendedStats stats = searchResponse.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9) / 20)); - assertThat(stats.getMin(), equalTo(0.0)); - assertThat(stats.getMax(), equalTo(10.0)); - assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9)); - assertThat(stats.getCount(), equalTo(20L)); - assertThat( - stats.getSumOfSquares(), - equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 0 + 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81) - ); - assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9))); - assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9))); - assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9))); - assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9))); - assertThat( - stats.getStdDeviationPopulation(), - equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9)) + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(extendedStats("stats").script(script).sigma(sigma)), + response -> { + assertHitCount(response, 10); + + ExtendedStats stats = response.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat( + stats.getAvg(), + equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9) / 20) + ); + assertThat(stats.getMin(), equalTo(0.0)); + assertThat(stats.getMax(), equalTo(10.0)); + assertThat( + stats.getSum(), + equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9) + ); + assertThat(stats.getCount(), equalTo(20L)); + assertThat( + stats.getSumOfSquares(), + equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 0 + 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81) + ); + assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9))); + assertThat( + stats.getVariancePopulation(), + equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9)) + ); + assertThat( + stats.getVarianceSampling(), + equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9)) + ); + assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9))); + assertThat( + stats.getStdDeviationPopulation(), + equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9)) + ); + assertThat( + stats.getStdDeviationSampling(), + equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9)) + ); + checkUpperLowerBounds(stats, sigma); + } ); - assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9))); - checkUpperLowerBounds(stats, sigma); } public void testEmptySubAggregation() { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("value").field("value") - .subAggregation(missing("values").field("values").subAggregation(extendedStats("stats").field("value"))) - ) - .get(); - - assertHitCount(searchResponse, 10); - - Terms terms = searchResponse.getAggregations().get("value"); - assertThat(terms, notNullValue()); - assertThat(terms.getBuckets().size(), equalTo(10)); - - for (Terms.Bucket bucket : terms.getBuckets()) { - assertThat(bucket.getDocCount(), equalTo(1L)); - - Missing missing = bucket.getAggregations().get("values"); - assertThat(missing, notNullValue()); - assertThat(missing.getDocCount(), equalTo(0L)); - - ExtendedStats stats = missing.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getSumOfSquares(), equalTo(0.0)); - assertThat(stats.getCount(), equalTo(0L)); - assertThat(stats.getSum(), equalTo(0.0)); - assertThat(stats.getMin(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(stats.getMax(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(Double.isNaN(stats.getStdDeviation()), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationPopulation()), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationSampling()), is(true)); - assertThat(Double.isNaN(stats.getAvg()), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER)), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER)), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER_POPULATION)), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER_POPULATION)), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER_SAMPLING)), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER_SAMPLING)), is(true)); - } + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("value").field("value") + .subAggregation(missing("values").field("values").subAggregation(extendedStats("stats").field("value"))) + ), + response -> { + assertHitCount(response, 10); + + Terms terms = response.getAggregations().get("value"); + assertThat(terms, notNullValue()); + assertThat(terms.getBuckets().size(), equalTo(10)); + + for (Terms.Bucket bucket : terms.getBuckets()) { + assertThat(bucket.getDocCount(), equalTo(1L)); + + Missing missing = bucket.getAggregations().get("values"); + assertThat(missing, notNullValue()); + assertThat(missing.getDocCount(), equalTo(0L)); + + ExtendedStats stats = missing.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat(stats.getSumOfSquares(), equalTo(0.0)); + assertThat(stats.getCount(), equalTo(0L)); + assertThat(stats.getSum(), equalTo(0.0)); + assertThat(stats.getMin(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(stats.getMax(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(Double.isNaN(stats.getStdDeviation()), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationPopulation()), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationSampling()), is(true)); + assertThat(Double.isNaN(stats.getAvg()), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER)), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER)), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER_POPULATION)), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER_POPULATION)), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER_SAMPLING)), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER_SAMPLING)), is(true)); + } + } + ); } @Override public void testOrderByEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("terms").field("value") - .order(BucketOrder.compound(BucketOrder.aggregation("filter>extendedStats.avg", true))) - .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(extendedStats("extendedStats").field("value"))) - ) - .get(); - - assertHitCount(searchResponse, 10); - - Terms terms = searchResponse.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - List buckets = terms.getBuckets(); - assertThat(buckets, notNullValue()); - assertThat(buckets.size(), equalTo(10)); - - for (int i = 0; i < 10; i++) { - Terms.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); - assertThat(bucket.getDocCount(), equalTo(1L)); - Filter filter = bucket.getAggregations().get("filter"); - assertThat(filter, notNullValue()); - assertThat(filter.getDocCount(), equalTo(0L)); - ExtendedStats extendedStats = filter.getAggregations().get("extendedStats"); - assertThat(extendedStats, notNullValue()); - assertThat(extendedStats.getMin(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(extendedStats.getMax(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(extendedStats.getAvg(), equalTo(Double.NaN)); - assertThat(extendedStats.getSum(), equalTo(0.0)); - assertThat(extendedStats.getCount(), equalTo(0L)); - assertThat(extendedStats.getStdDeviation(), equalTo(Double.NaN)); - assertThat(extendedStats.getStdDeviationPopulation(), equalTo(Double.NaN)); - assertThat(extendedStats.getStdDeviationSampling(), equalTo(Double.NaN)); - assertThat(extendedStats.getSumOfSquares(), equalTo(0.0)); - assertThat(extendedStats.getVariance(), equalTo(Double.NaN)); - assertThat(extendedStats.getVariancePopulation(), equalTo(Double.NaN)); - assertThat(extendedStats.getVarianceSampling(), equalTo(Double.NaN)); - assertThat(extendedStats.getStdDeviationBound(Bounds.LOWER), equalTo(Double.NaN)); - assertThat(extendedStats.getStdDeviationBound(Bounds.UPPER), equalTo(Double.NaN)); - assertThat(extendedStats.getStdDeviationBound(Bounds.LOWER_POPULATION), equalTo(Double.NaN)); - assertThat(extendedStats.getStdDeviationBound(Bounds.UPPER_POPULATION), equalTo(Double.NaN)); - assertThat(extendedStats.getStdDeviationBound(Bounds.LOWER_SAMPLING), equalTo(Double.NaN)); - assertThat(extendedStats.getStdDeviationBound(Bounds.UPPER_SAMPLING), equalTo(Double.NaN)); - } + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("terms").field("value") + .order(BucketOrder.compound(BucketOrder.aggregation("filter>extendedStats.avg", true))) + .subAggregation( + filter("filter", termQuery("value", 100)).subAggregation(extendedStats("extendedStats").field("value")) + ) + ), + response -> { + assertHitCount(response, 10); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + List buckets = terms.getBuckets(); + assertThat(buckets, notNullValue()); + assertThat(buckets.size(), equalTo(10)); + + for (int i = 0; i < 10; i++) { + Terms.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); + assertThat(bucket.getDocCount(), equalTo(1L)); + Filter filter = bucket.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(0L)); + ExtendedStats extendedStats = filter.getAggregations().get("extendedStats"); + assertThat(extendedStats, notNullValue()); + assertThat(extendedStats.getMin(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(extendedStats.getMax(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(extendedStats.getAvg(), equalTo(Double.NaN)); + assertThat(extendedStats.getSum(), equalTo(0.0)); + assertThat(extendedStats.getCount(), equalTo(0L)); + assertThat(extendedStats.getStdDeviation(), equalTo(Double.NaN)); + assertThat(extendedStats.getStdDeviationPopulation(), equalTo(Double.NaN)); + assertThat(extendedStats.getStdDeviationSampling(), equalTo(Double.NaN)); + assertThat(extendedStats.getSumOfSquares(), equalTo(0.0)); + assertThat(extendedStats.getVariance(), equalTo(Double.NaN)); + assertThat(extendedStats.getVariancePopulation(), equalTo(Double.NaN)); + assertThat(extendedStats.getVarianceSampling(), equalTo(Double.NaN)); + assertThat(extendedStats.getStdDeviationBound(Bounds.LOWER), equalTo(Double.NaN)); + assertThat(extendedStats.getStdDeviationBound(Bounds.UPPER), equalTo(Double.NaN)); + assertThat(extendedStats.getStdDeviationBound(Bounds.LOWER_POPULATION), equalTo(Double.NaN)); + assertThat(extendedStats.getStdDeviationBound(Bounds.UPPER_POPULATION), equalTo(Double.NaN)); + assertThat(extendedStats.getStdDeviationBound(Bounds.LOWER_SAMPLING), equalTo(Double.NaN)); + assertThat(extendedStats.getStdDeviationBound(Bounds.UPPER_SAMPLING), equalTo(Double.NaN)); + } + } + ); } private void checkUpperLowerBounds(ExtendedStats stats, double sigma) { @@ -845,13 +925,13 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - extendedStats("foo").field("d") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", Collections.emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + extendedStats("foo").field("d") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", Collections.emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -863,13 +943,13 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a deterministic script gets cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - extendedStats("foo").field("d") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", Collections.emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + extendedStats("foo").field("d") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", Collections.emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -881,8 +961,7 @@ public void testScriptCaching() throws Exception { ); // Ensure that non-scripted requests are cached as normal - r = prepareSearch("cache_test_idx").setSize(0).addAggregation(extendedStats("foo").field("d")).get(); - assertNoFailures(r); + assertNoFailures(prepareSearch("cache_test_idx").setSize(0).addAggregation(extendedStats("foo").field("d"))); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -893,5 +972,4 @@ public void testScriptCaching() throws Exception { equalTo(2L) ); } - } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsIT.java index 3aebbce43e1e1..f8b633dca1a10 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsIT.java @@ -8,13 +8,12 @@ package org.elasticsearch.search.aggregations.metrics; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.SpatialPoint; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.geo.RandomGeoGenerator; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; @@ -26,45 +25,42 @@ public class GeoBoundsIT extends SpatialBoundsAggregationTestBase { public void testSingleValuedFieldNearDateLine() { - SearchResponse response = prepareSearch(DATELINE_IDX_NAME).addAggregation( - boundsAgg(aggName(), SINGLE_VALUED_FIELD_NAME).wrapLongitude(false) - ).get(); - - assertNoFailures(response); - - GeoPoint geoValuesTopLeft = new GeoPoint(38, -179); - GeoPoint geoValuesBottomRight = new GeoPoint(-24, 178); - - GeoBounds geoBounds = response.getAggregations().get(aggName()); - assertThat(geoBounds, notNullValue()); - assertThat(geoBounds.getName(), equalTo(aggName())); - GeoPoint topLeft = geoBounds.topLeft(); - GeoPoint bottomRight = geoBounds.bottomRight(); - assertThat(topLeft.getY(), closeTo(geoValuesTopLeft.getY(), GEOHASH_TOLERANCE)); - assertThat(topLeft.getX(), closeTo(geoValuesTopLeft.getX(), GEOHASH_TOLERANCE)); - assertThat(bottomRight.getY(), closeTo(geoValuesBottomRight.getY(), GEOHASH_TOLERANCE)); - assertThat(bottomRight.getX(), closeTo(geoValuesBottomRight.getX(), GEOHASH_TOLERANCE)); + assertNoFailuresAndResponse( + prepareSearch(DATELINE_IDX_NAME).addAggregation(boundsAgg(aggName(), SINGLE_VALUED_FIELD_NAME).wrapLongitude(false)), + response -> { + GeoPoint geoValuesTopLeft = new GeoPoint(38, -179); + GeoPoint geoValuesBottomRight = new GeoPoint(-24, 178); + + GeoBounds geoBounds = response.getAggregations().get(aggName()); + assertThat(geoBounds, notNullValue()); + assertThat(geoBounds.getName(), equalTo(aggName())); + GeoPoint topLeft = geoBounds.topLeft(); + GeoPoint bottomRight = geoBounds.bottomRight(); + assertThat(topLeft.getY(), closeTo(geoValuesTopLeft.getY(), GEOHASH_TOLERANCE)); + assertThat(topLeft.getX(), closeTo(geoValuesTopLeft.getX(), GEOHASH_TOLERANCE)); + assertThat(bottomRight.getY(), closeTo(geoValuesBottomRight.getY(), GEOHASH_TOLERANCE)); + assertThat(bottomRight.getX(), closeTo(geoValuesBottomRight.getX(), GEOHASH_TOLERANCE)); + } + ); } public void testSingleValuedFieldNearDateLineWrapLongitude() { - GeoPoint geoValuesTopLeft = new GeoPoint(38, 170); GeoPoint geoValuesBottomRight = new GeoPoint(-24, -175); - SearchResponse response = prepareSearch(DATELINE_IDX_NAME).addAggregation( - boundsAgg(aggName(), SINGLE_VALUED_FIELD_NAME).wrapLongitude(true) - ).get(); - - assertNoFailures(response); - - GeoBounds geoBounds = response.getAggregations().get(aggName()); - assertThat(geoBounds, notNullValue()); - assertThat(geoBounds.getName(), equalTo(aggName())); - GeoPoint topLeft = geoBounds.topLeft(); - GeoPoint bottomRight = geoBounds.bottomRight(); - assertThat(topLeft.getY(), closeTo(geoValuesTopLeft.getY(), GEOHASH_TOLERANCE)); - assertThat(topLeft.getX(), closeTo(geoValuesTopLeft.getX(), GEOHASH_TOLERANCE)); - assertThat(bottomRight.getY(), closeTo(geoValuesBottomRight.getY(), GEOHASH_TOLERANCE)); - assertThat(bottomRight.getX(), closeTo(geoValuesBottomRight.getX(), GEOHASH_TOLERANCE)); + assertNoFailuresAndResponse( + prepareSearch(DATELINE_IDX_NAME).addAggregation(boundsAgg(aggName(), SINGLE_VALUED_FIELD_NAME).wrapLongitude(true)), + response -> { + GeoBounds geoBounds = response.getAggregations().get(aggName()); + assertThat(geoBounds, notNullValue()); + assertThat(geoBounds.getName(), equalTo(aggName())); + GeoPoint topLeft = geoBounds.topLeft(); + GeoPoint bottomRight = geoBounds.bottomRight(); + assertThat(topLeft.getY(), closeTo(geoValuesTopLeft.getY(), GEOHASH_TOLERANCE)); + assertThat(topLeft.getX(), closeTo(geoValuesTopLeft.getX(), GEOHASH_TOLERANCE)); + assertThat(bottomRight.getY(), closeTo(geoValuesBottomRight.getY(), GEOHASH_TOLERANCE)); + assertThat(bottomRight.getX(), closeTo(geoValuesBottomRight.getX(), GEOHASH_TOLERANCE)); + } + ); } @Override diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidIT.java index 4b12cddde691f..a7d32863718e3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.metrics; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.SpatialPoint; import org.elasticsearch.search.aggregations.bucket.geogrid.GeoGrid; @@ -18,7 +17,7 @@ import java.util.List; import static org.elasticsearch.search.aggregations.AggregationBuilders.geohashGrid; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; @@ -29,21 +28,24 @@ public class GeoCentroidIT extends CentroidAggregationTestBase { public void testSingleValueFieldAsSubAggToGeohashGrid() { - SearchResponse response = prepareSearch(HIGH_CARD_IDX_NAME).addAggregation( - geohashGrid("geoGrid").field(SINGLE_VALUED_FIELD_NAME).subAggregation(centroidAgg(aggName()).field(SINGLE_VALUED_FIELD_NAME)) - ).get(); - assertNoFailures(response); - - GeoGrid grid = response.getAggregations().get("geoGrid"); - assertThat(grid, notNullValue()); - assertThat(grid.getName(), equalTo("geoGrid")); - List buckets = grid.getBuckets(); - for (GeoGrid.Bucket cell : buckets) { - String geohash = cell.getKeyAsString(); - SpatialPoint expectedCentroid = expectedCentroidsForGeoHash.get(geohash); - GeoCentroid centroidAgg = cell.getAggregations().get(aggName()); - assertSameCentroid(centroidAgg.centroid(), expectedCentroid); - } + assertNoFailuresAndResponse( + prepareSearch(HIGH_CARD_IDX_NAME).addAggregation( + geohashGrid("geoGrid").field(SINGLE_VALUED_FIELD_NAME) + .subAggregation(centroidAgg(aggName()).field(SINGLE_VALUED_FIELD_NAME)) + ), + response -> { + GeoGrid grid = response.getAggregations().get("geoGrid"); + assertThat(grid, notNullValue()); + assertThat(grid.getName(), equalTo("geoGrid")); + List buckets = grid.getBuckets(); + for (GeoGrid.Bucket cell : buckets) { + String geohash = cell.getKeyAsString(); + SpatialPoint expectedCentroid = expectedCentroidsForGeoHash.get(geohash); + GeoCentroid centroidAgg = cell.getAggregations().get(aggName()); + assertSameCentroid(centroidAgg.centroid(), expectedCentroid); + } + } + ); } @Override diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java index 7d5e446d591bb..d4b5be3045cdf 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.metrics; import org.apache.logging.log4j.LogManager; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; @@ -40,6 +39,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -99,70 +99,76 @@ private void assertConsistent(double[] pcts, PercentileRanks values, long minVal @Override public void testEmptyAggregation() throws Exception { int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field("value") - .interval(1L) - .minDocCount(0) - .subAggregation( - percentileRanks("percentile_ranks", new double[] { 10, 15 }).field("value") - .method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - ) - ) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - Histogram histo = searchResponse.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - Histogram.Bucket bucket = histo.getBuckets().get(1); - assertThat(bucket, notNullValue()); - - PercentileRanks reversePercentiles = bucket.getAggregations().get("percentile_ranks"); - assertThat(reversePercentiles, notNullValue()); - assertThat(reversePercentiles.getName(), equalTo("percentile_ranks")); - assertThat(reversePercentiles.percent(10), equalTo(Double.NaN)); - assertThat(reversePercentiles.percent(15), equalTo(Double.NaN)); + assertResponse( + prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(1L) + .minDocCount(0) + .subAggregation( + percentileRanks("percentile_ranks", new double[] { 10, 15 }).field("value") + .method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + ) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + Histogram.Bucket bucket = histo.getBuckets().get(1); + assertThat(bucket, notNullValue()); + + PercentileRanks reversePercentiles = bucket.getAggregations().get("percentile_ranks"); + assertThat(reversePercentiles, notNullValue()); + assertThat(reversePercentiles.getName(), equalTo("percentile_ranks")); + assertThat(reversePercentiles.percent(10), equalTo(Double.NaN)); + assertThat(reversePercentiles.percent(15), equalTo(Double.NaN)); + } + ); } @Override public void testUnmapped() throws Exception { int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = prepareSearch("idx_unmapped").setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", new double[] { 0, 10, 15, 100 }).method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .field("value") - ) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); - - PercentileRanks reversePercentiles = searchResponse.getAggregations().get("percentile_ranks"); - assertThat(reversePercentiles, notNullValue()); - assertThat(reversePercentiles.getName(), equalTo("percentile_ranks")); - assertThat(reversePercentiles.percent(0), equalTo(Double.NaN)); - assertThat(reversePercentiles.percent(10), equalTo(Double.NaN)); - assertThat(reversePercentiles.percent(15), equalTo(Double.NaN)); - assertThat(reversePercentiles.percent(100), equalTo(Double.NaN)); + assertResponse( + prepareSearch("idx_unmapped").setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", new double[] { 0, 10, 15, 100 }).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .field("value") + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(0L)); + + PercentileRanks reversePercentiles = response.getAggregations().get("percentile_ranks"); + assertThat(reversePercentiles, notNullValue()); + assertThat(reversePercentiles.getName(), equalTo("percentile_ranks")); + assertThat(reversePercentiles.percent(0), equalTo(Double.NaN)); + assertThat(reversePercentiles.percent(10), equalTo(Double.NaN)); + assertThat(reversePercentiles.percent(15), equalTo(Double.NaN)); + assertThat(reversePercentiles.percent(100), equalTo(Double.NaN)); + } + ); } @Override public void testSingleValuedField() throws Exception { int sigDigits = randomSignificantDigits(); final double[] pcts = randomPercents(minValue, maxValue); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .field("value") - ) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue, maxValue, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .field("value") + ), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValue, maxValue, sigDigits); + } + ); } public void testNullValuesField() throws Exception { @@ -201,84 +207,91 @@ public void testEmptyValuesField() throws Exception { public void testSingleValuedFieldGetProperty() throws Exception { int sigDigits = randomSignificantDigits(); final double[] pcts = randomPercents(minValue, maxValue); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - global("global").subAggregation( - percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .field("value") - ) - ) - .get(); - - assertHitCount(searchResponse, 10); - - Global global = searchResponse.getAggregations().get("global"); - assertThat(global, notNullValue()); - assertThat(global.getName(), equalTo("global")); - assertThat(global.getDocCount(), equalTo(10L)); - assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().size(), equalTo(1)); - - PercentileRanks values = global.getAggregations().get("percentile_ranks"); - assertThat(values, notNullValue()); - assertThat(values.getName(), equalTo("percentile_ranks")); - assertThat(((InternalAggregation) global).getProperty("percentile_ranks"), sameInstance(values)); - + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + global("global").subAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .field("value") + ) + ), + response -> { + assertHitCount(response, 10); + + Global global = response.getAggregations().get("global"); + assertThat(global, notNullValue()); + assertThat(global.getName(), equalTo("global")); + assertThat(global.getDocCount(), equalTo(10L)); + assertThat(global.getAggregations(), notNullValue()); + assertThat(global.getAggregations().asMap().size(), equalTo(1)); + + PercentileRanks values = global.getAggregations().get("percentile_ranks"); + assertThat(values, notNullValue()); + assertThat(values.getName(), equalTo("percentile_ranks")); + assertThat(((InternalAggregation) global).getProperty("percentile_ranks"), sameInstance(values)); + } + ); } public void testSingleValuedFieldOutsideRange() throws Exception { int sigDigits = randomSignificantDigits(); final double[] pcts = new double[] { minValue - 1, maxValue + 1 }; - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .field("value") - ) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue, maxValue, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .field("value") + ), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValue, maxValue, sigDigits); + } + ); } @Override public void testSingleValuedFieldPartiallyUnmapped() throws Exception { int sigDigits = randomSignificantDigits(); final double[] pcts = randomPercents(minValue, maxValue); - SearchResponse searchResponse = prepareSearch("idx", "idx_unmapped").setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .field("value") - ) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue, maxValue, sigDigits); + assertResponse( + prepareSearch("idx", "idx_unmapped").setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .field("value") + ), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValue, maxValue, sigDigits); + } + ); } @Override public void testSingleValuedFieldWithValueScript() throws Exception { int sigDigits = randomSignificantDigits(); final double[] pcts = randomPercents(minValue - 1, maxValue - 1); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .field("value") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue - 1, maxValue - 1, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + ), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValue - 1, maxValue - 1, sigDigits); + } + ); } @Override @@ -287,74 +300,82 @@ public void testSingleValuedFieldWithValueScriptWithParams() throws Exception { Map params = new HashMap<>(); params.put("dec", 1); final double[] pcts = randomPercents(minValue - 1, maxValue - 1); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .field("value") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue - 1, maxValue - 1, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) + ), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValue - 1, maxValue - 1, sigDigits); + } + ); } @Override public void testMultiValuedField() throws Exception { int sigDigits = randomSignificantDigits(); final double[] pcts = randomPercents(minValues, maxValues); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .field("values") - ) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValues, maxValues, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .field("values") + ), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValues, maxValues, sigDigits); + } + ); } @Override public void testMultiValuedFieldWithValueScript() throws Exception { int sigDigits = randomSignificantDigits(); final double[] pcts = randomPercents(minValues - 1, maxValues - 1); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValues - 1, maxValues - 1, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + ), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValues - 1, maxValues - 1, sigDigits); + } + ); } public void testMultiValuedFieldWithValueScriptReverse() throws Exception { int sigDigits = randomSignificantDigits(); final double[] pcts = randomPercents(20 - maxValues, 20 - minValues); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "20 - _value", emptyMap())) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, 20 - maxValues, 20 - minValues, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "20 - _value", emptyMap())) + ), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, 20 - maxValues, 20 - minValues, sigDigits); + } + ); } @Override @@ -363,37 +384,41 @@ public void testMultiValuedFieldWithValueScriptWithParams() throws Exception { Map params = new HashMap<>(); params.put("dec", 1); final double[] pcts = randomPercents(minValues - 1, maxValues - 1); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValues - 1, maxValues - 1, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) + ), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValues - 1, maxValues - 1, sigDigits); + } + ); } @Override public void testScriptSingleValued() throws Exception { int sigDigits = randomSignificantDigits(); final double[] pcts = randomPercents(minValue, maxValue); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", emptyMap())) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue, maxValue, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", emptyMap())) + ), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValue, maxValue, sigDigits); + } + ); } @Override @@ -405,18 +430,20 @@ public void testScriptSingleValuedWithParams() throws Exception { Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value - dec", params); final double[] pcts = randomPercents(minValue - 1, maxValue - 1); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .script(script) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue - 1, maxValue - 1, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .script(script) + ), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValue - 1, maxValue - 1, sigDigits); + } + ); } @Override @@ -426,18 +453,20 @@ public void testScriptMultiValued() throws Exception { Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['values']", emptyMap()); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .script(script) - ) - .get(); - - assertHitCount(searchResponse, 10); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .script(script) + ), + response -> { + assertHitCount(response, 10); - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValues, maxValues, sigDigits); + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValues, maxValues, sigDigits); + } + ); } @Override @@ -446,87 +475,93 @@ public void testScriptMultiValuedWithParams() throws Exception { Script script = AggregationTestScriptsPlugin.DECREMENT_ALL_VALUES; final double[] pcts = randomPercents(minValues - 1, maxValues - 1); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .script(script) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValues - 1, maxValues - 1, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .script(script) + ), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValues - 1, maxValues - 1, sigDigits); + } + ); } public void testOrderBySubAggregation() { int sigDigits = randomSignificantDigits(); boolean asc = randomBoolean(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field("value") - .interval(2L) - .subAggregation( - percentileRanks("percentile_ranks", new double[] { 99 }).field("value") - .method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - ) - .order(BucketOrder.aggregation("percentile_ranks", "99", asc)) - ) - .get(); - - assertHitCount(searchResponse, 10); - - Histogram histo = searchResponse.getAggregations().get("histo"); - double previous = asc ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY; - for (Histogram.Bucket bucket : histo.getBuckets()) { - PercentileRanks values = bucket.getAggregations().get("percentile_ranks"); - double p99 = values.percent(99); - if (asc) { - assertThat(p99, greaterThanOrEqualTo(previous)); - } else { - assertThat(p99, lessThanOrEqualTo(previous)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(2L) + .subAggregation( + percentileRanks("percentile_ranks", new double[] { 99 }).field("value") + .method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + ) + .order(BucketOrder.aggregation("percentile_ranks", "99", asc)) + ), + response -> { + assertHitCount(response, 10); + + Histogram histo = response.getAggregations().get("histo"); + double previous = asc ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY; + for (Histogram.Bucket bucket : histo.getBuckets()) { + PercentileRanks values = bucket.getAggregations().get("percentile_ranks"); + double p99 = values.percent(99); + if (asc) { + assertThat(p99, greaterThanOrEqualTo(previous)); + } else { + assertThat(p99, lessThanOrEqualTo(previous)); + } + previous = p99; + } } - previous = p99; - } + ); } @Override public void testOrderByEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("terms").field("value") - .order(BucketOrder.compound(BucketOrder.aggregation("filter>ranks.99", true))) - .subAggregation( - filter("filter", termQuery("value", 100)).subAggregation( - percentileRanks("ranks", new double[] { 99 }).method(PercentilesMethod.HDR).field("value") + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("terms").field("value") + .order(BucketOrder.compound(BucketOrder.aggregation("filter>ranks.99", true))) + .subAggregation( + filter("filter", termQuery("value", 100)).subAggregation( + percentileRanks("ranks", new double[] { 99 }).method(PercentilesMethod.HDR).field("value") + ) ) - ) - ) - .get(); - - assertHitCount(searchResponse, 10); - - Terms terms = searchResponse.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - List buckets = terms.getBuckets(); - assertThat(buckets, notNullValue()); - assertThat(buckets.size(), equalTo(10)); - - for (int i = 0; i < 10; i++) { - Terms.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); - assertThat(bucket.getDocCount(), equalTo(1L)); - Filter filter = bucket.getAggregations().get("filter"); - assertThat(filter, notNullValue()); - assertThat(filter.getDocCount(), equalTo(0L)); - PercentileRanks ranks = filter.getAggregations().get("ranks"); - assertThat(ranks, notNullValue()); - assertThat(ranks.percent(99), equalTo(Double.NaN)); - - } + ), + response -> { + assertHitCount(response, 10); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + List buckets = terms.getBuckets(); + assertThat(buckets, notNullValue()); + assertThat(buckets.size(), equalTo(10)); + + for (int i = 0; i < 10; i++) { + Terms.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); + assertThat(bucket.getDocCount(), equalTo(1L)); + Filter filter = bucket.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(0L)); + PercentileRanks ranks = filter.getAggregations().get("ranks"); + assertThat(ranks, notNullValue()); + assertThat(ranks.percent(99), equalTo(Double.NaN)); + + } + } + ); } /** @@ -555,14 +590,14 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - percentileRanks("foo", new double[] { 50.0 }).method(PercentilesMethod.HDR) - .field("d") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + percentileRanks("foo", new double[] { 50.0 }).method(PercentilesMethod.HDR) + .field("d") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -574,14 +609,14 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a deterministic script gets cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - percentileRanks("foo", new double[] { 50.0 }).method(PercentilesMethod.HDR) - .field("d") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + percentileRanks("foo", new double[] { 50.0 }).method(PercentilesMethod.HDR) + .field("d") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -593,10 +628,10 @@ public void testScriptCaching() throws Exception { ); // Ensure that non-scripted requests are cached as normal - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation(percentileRanks("foo", new double[] { 50.0 }).method(PercentilesMethod.HDR).field("d")) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation(percentileRanks("foo", new double[] { 50.0 }).method(PercentilesMethod.HDR).field("d")) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -607,5 +642,4 @@ public void testScriptCaching() throws Exception { equalTo(2L) ); } - } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java index 3ac50c7b5e104..9eac8d4a06a43 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.metrics; import org.apache.logging.log4j.LogManager; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.plugins.Plugin; @@ -42,6 +41,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -102,143 +102,154 @@ private void assertConsistent(double[] pcts, Percentiles percentiles, long minVa @Override public void testEmptyAggregation() throws Exception { int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field("value") - .interval(1L) - .minDocCount(0) - .subAggregation( - percentiles("percentiles").field("value") - .numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .percentiles(10, 15) - ) - ) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - Histogram histo = searchResponse.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - Histogram.Bucket bucket = histo.getBuckets().get(1); - assertThat(bucket, notNullValue()); - - Percentiles percentiles = bucket.getAggregations().get("percentiles"); - assertThat(percentiles, notNullValue()); - assertThat(percentiles.getName(), equalTo("percentiles")); - assertThat(percentiles.percentile(10), equalTo(Double.NaN)); - assertThat(percentiles.percentile(15), equalTo(Double.NaN)); + assertResponse( + prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(1L) + .minDocCount(0) + .subAggregation( + percentiles("percentiles").field("value") + .numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .percentiles(10, 15) + ) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + Histogram.Bucket bucket = histo.getBuckets().get(1); + assertThat(bucket, notNullValue()); + + Percentiles percentiles = bucket.getAggregations().get("percentiles"); + assertThat(percentiles, notNullValue()); + assertThat(percentiles.getName(), equalTo("percentiles")); + assertThat(percentiles.percentile(10), equalTo(Double.NaN)); + assertThat(percentiles.percentile(15), equalTo(Double.NaN)); + } + ); } @Override public void testUnmapped() throws Exception { int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = prepareSearch("idx_unmapped").setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .field("value") - .percentiles(0, 10, 15, 100) - ) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); - - Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertThat(percentiles, notNullValue()); - assertThat(percentiles.getName(), equalTo("percentiles")); - assertThat(percentiles.percentile(0), equalTo(Double.NaN)); - assertThat(percentiles.percentile(10), equalTo(Double.NaN)); - assertThat(percentiles.percentile(15), equalTo(Double.NaN)); - assertThat(percentiles.percentile(100), equalTo(Double.NaN)); + assertResponse( + prepareSearch("idx_unmapped").setQuery(matchAllQuery()) + .addAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .field("value") + .percentiles(0, 10, 15, 100) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(0L)); + + Percentiles percentiles = response.getAggregations().get("percentiles"); + assertThat(percentiles, notNullValue()); + assertThat(percentiles.getName(), equalTo("percentiles")); + assertThat(percentiles.percentile(0), equalTo(Double.NaN)); + assertThat(percentiles.percentile(10), equalTo(Double.NaN)); + assertThat(percentiles.percentile(15), equalTo(Double.NaN)); + assertThat(percentiles.percentile(100), equalTo(Double.NaN)); + } + ); } @Override public void testSingleValuedField() throws Exception { final double[] pcts = randomPercentiles(); int sigDigits = randomIntBetween(1, 5); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .field("value") - .percentiles(pcts) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValue, maxValue, sigDigits); - } - - @Override - public void testSingleValuedFieldGetProperty() throws Exception { - final double[] pcts = randomPercentiles(); - int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - global("global").subAggregation( + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) .method(PercentilesMethod.HDR) .field("value") .percentiles(pcts) - ) - ) - .get(); - - assertHitCount(searchResponse, 10); - - Global global = searchResponse.getAggregations().get("global"); - assertThat(global, notNullValue()); - assertThat(global.getName(), equalTo("global")); - assertThat(global.getDocCount(), equalTo(10L)); - assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().size(), equalTo(1)); + ), + response -> { + assertHitCount(response, 10); - Percentiles percentiles = global.getAggregations().get("percentiles"); - assertThat(percentiles, notNullValue()); - assertThat(percentiles.getName(), equalTo("percentiles")); - assertThat(((InternalAggregation) global).getProperty("percentiles"), sameInstance(percentiles)); + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValue, maxValue, sigDigits); + } + ); + } + @Override + public void testSingleValuedFieldGetProperty() throws Exception { + final double[] pcts = randomPercentiles(); + int sigDigits = randomSignificantDigits(); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + global("global").subAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .field("value") + .percentiles(pcts) + ) + ), + response -> { + assertHitCount(response, 10); + + Global global = response.getAggregations().get("global"); + assertThat(global, notNullValue()); + assertThat(global.getName(), equalTo("global")); + assertThat(global.getDocCount(), equalTo(10L)); + assertThat(global.getAggregations(), notNullValue()); + assertThat(global.getAggregations().asMap().size(), equalTo(1)); + + Percentiles percentiles = global.getAggregations().get("percentiles"); + assertThat(percentiles, notNullValue()); + assertThat(percentiles.getName(), equalTo("percentiles")); + assertThat(((InternalAggregation) global).getProperty("percentiles"), sameInstance(percentiles)); + } + ); } @Override public void testSingleValuedFieldPartiallyUnmapped() throws Exception { final double[] pcts = randomPercentiles(); int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = prepareSearch("idx", "idx_unmapped").setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .field("value") - .percentiles(pcts) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValue, maxValue, sigDigits); + assertResponse( + prepareSearch("idx", "idx_unmapped").setQuery(matchAllQuery()) + .addAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .field("value") + .percentiles(pcts) + ), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValue, maxValue, sigDigits); + } + ); } @Override public void testSingleValuedFieldWithValueScript() throws Exception { final double[] pcts = randomPercentiles(); int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .field("value") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) - .percentiles(pcts) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValue - 1, maxValue - 1, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + .percentiles(pcts) + ), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValue - 1, maxValue - 1, sigDigits); + } + ); } @Override @@ -248,78 +259,86 @@ public void testSingleValuedFieldWithValueScriptWithParams() throws Exception { final double[] pcts = randomPercentiles(); int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .field("value") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) - .percentiles(pcts) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValue - 1, maxValue - 1, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) + .percentiles(pcts) + ), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValue - 1, maxValue - 1, sigDigits); + } + ); } @Override public void testMultiValuedField() throws Exception { final double[] pcts = randomPercentiles(); int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .field("values") - .percentiles(pcts) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValues, maxValues, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .field("values") + .percentiles(pcts) + ), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValues, maxValues, sigDigits); + } + ); } @Override public void testMultiValuedFieldWithValueScript() throws Exception { final double[] pcts = randomPercentiles(); int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) - .percentiles(pcts) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValues - 1, maxValues - 1, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + .percentiles(pcts) + ), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValues - 1, maxValues - 1, sigDigits); + } + ); } public void testMultiValuedFieldWithValueScriptReverse() throws Exception { final double[] pcts = randomPercentiles(); int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "20 - _value", emptyMap())) - .percentiles(pcts) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, 20 - maxValues, 20 - minValues, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "20 - _value", emptyMap())) + .percentiles(pcts) + ), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, 20 - maxValues, 20 - minValues, sigDigits); + } + ); } @Override @@ -329,39 +348,43 @@ public void testMultiValuedFieldWithValueScriptWithParams() throws Exception { final double[] pcts = randomPercentiles(); int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) - .percentiles(pcts) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValues - 1, maxValues - 1, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) + .percentiles(pcts) + ), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValues - 1, maxValues - 1, sigDigits); + } + ); } @Override public void testScriptSingleValued() throws Exception { final double[] pcts = randomPercentiles(); int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", emptyMap())) - .percentiles(pcts) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValue, maxValue, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", emptyMap())) + .percentiles(pcts) + ), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValue, maxValue, sigDigits); + } + ); } @Override @@ -373,19 +396,21 @@ public void testScriptSingleValuedWithParams() throws Exception { final double[] pcts = randomPercentiles(); int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .script(script) - .percentiles(pcts) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValue - 1, maxValue - 1, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .script(script) + .percentiles(pcts) + ), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValue - 1, maxValue - 1, sigDigits); + } + ); } @Override @@ -395,19 +420,21 @@ public void testScriptMultiValued() throws Exception { Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['values']", emptyMap()); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .script(script) - .percentiles(pcts) - ) - .get(); - - assertHitCount(searchResponse, 10); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .script(script) + .percentiles(pcts) + ), + response -> { + assertHitCount(response, 10); - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValues, maxValues, sigDigits); + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValues, maxValues, sigDigits); + } + ); } @Override @@ -416,89 +443,96 @@ public void testScriptMultiValuedWithParams() throws Exception { final double[] pcts = randomPercentiles(); int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .script(script) - .percentiles(pcts) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValues - 1, maxValues - 1, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .script(script) + .percentiles(pcts) + ), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValues - 1, maxValues - 1, sigDigits); + + } + ); } public void testOrderBySubAggregation() { int sigDigits = randomSignificantDigits(); boolean asc = randomBoolean(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field("value") - .interval(2L) - .subAggregation( - percentiles("percentiles").field("value") - .method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .percentiles(99) - ) - .order(BucketOrder.aggregation("percentiles", "99", asc)) - ) - .get(); - - assertHitCount(searchResponse, 10); - - Histogram histo = searchResponse.getAggregations().get("histo"); - double previous = asc ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY; - for (Histogram.Bucket bucket : histo.getBuckets()) { - Percentiles percentiles = bucket.getAggregations().get("percentiles"); - double p99 = percentiles.percentile(99); - if (asc) { - assertThat(p99, greaterThanOrEqualTo(previous)); - } else { - assertThat(p99, lessThanOrEqualTo(previous)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(2L) + .subAggregation( + percentiles("percentiles").field("value") + .method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .percentiles(99) + ) + .order(BucketOrder.aggregation("percentiles", "99", asc)) + ), + response -> { + assertHitCount(response, 10); + + Histogram histo = response.getAggregations().get("histo"); + double previous = asc ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY; + for (Histogram.Bucket bucket : histo.getBuckets()) { + Percentiles percentiles = bucket.getAggregations().get("percentiles"); + double p99 = percentiles.percentile(99); + if (asc) { + assertThat(p99, greaterThanOrEqualTo(previous)); + } else { + assertThat(p99, lessThanOrEqualTo(previous)); + } + previous = p99; + } } - previous = p99; - } + ); } @Override public void testOrderByEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("terms").field("value") - .order(BucketOrder.compound(BucketOrder.aggregation("filter>percentiles.99", true))) - .subAggregation( - filter("filter", termQuery("value", 100)).subAggregation( - percentiles("percentiles").method(PercentilesMethod.HDR).field("value") + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("terms").field("value") + .order(BucketOrder.compound(BucketOrder.aggregation("filter>percentiles.99", true))) + .subAggregation( + filter("filter", termQuery("value", 100)).subAggregation( + percentiles("percentiles").method(PercentilesMethod.HDR).field("value") + ) ) - ) - ) - .get(); - - assertHitCount(searchResponse, 10); - - Terms terms = searchResponse.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - List buckets = terms.getBuckets(); - assertThat(buckets, notNullValue()); - assertThat(buckets.size(), equalTo(10)); - - for (int i = 0; i < 10; i++) { - Terms.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); - assertThat(bucket.getDocCount(), equalTo(1L)); - Filter filter = bucket.getAggregations().get("filter"); - assertThat(filter, notNullValue()); - assertThat(filter.getDocCount(), equalTo(0L)); - Percentiles percentiles = filter.getAggregations().get("percentiles"); - assertThat(percentiles, notNullValue()); - assertThat(percentiles.percentile(99), equalTo(Double.NaN)); - - } + ), + response -> { + assertHitCount(response, 10); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + List buckets = terms.getBuckets(); + assertThat(buckets, notNullValue()); + assertThat(buckets.size(), equalTo(10)); + + for (int i = 0; i < 10; i++) { + Terms.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); + assertThat(bucket.getDocCount(), equalTo(1L)); + Filter filter = bucket.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(0L)); + Percentiles percentiles = filter.getAggregations().get("percentiles"); + assertThat(percentiles, notNullValue()); + assertThat(percentiles.percentile(99), equalTo(Double.NaN)); + + } + } + ); } /** @@ -527,15 +561,15 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - percentiles("foo").method(PercentilesMethod.HDR) - .field("d") - .percentiles(50.0) - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + percentiles("foo").method(PercentilesMethod.HDR) + .field("d") + .percentiles(50.0) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -547,15 +581,15 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a deterministic script gets cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - percentiles("foo").method(PercentilesMethod.HDR) - .field("d") - .percentiles(50.0) - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + percentiles("foo").method(PercentilesMethod.HDR) + .field("d") + .percentiles(50.0) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -567,10 +601,10 @@ public void testScriptCaching() throws Exception { ); // Ensure that non-scripted requests are cached as normal - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation(percentiles("foo").method(PercentilesMethod.HDR).field("d").percentiles(50.0)) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation(percentiles("foo").method(PercentilesMethod.HDR).field("d").percentiles(50.0)) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -581,5 +615,4 @@ public void testScriptCaching() throws Exception { equalTo(2L) ); } - } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java index dae90424495a3..f494a339a7a71 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; @@ -46,6 +45,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; @@ -138,21 +138,24 @@ private static MedianAbsoluteDeviationAggregationBuilder randomBuilder() { @Override public void testEmptyAggregation() throws Exception { - final SearchResponse response = prepareSearch("empty_bucket_idx").addAggregation( - histogram("histogram").field("value").interval(1).minDocCount(0).subAggregation(randomBuilder().field("value")) - ).get(); - - assertHitCount(response, 2); - - final Histogram histogram = response.getAggregations().get("histogram"); - assertThat(histogram, notNullValue()); - final Histogram.Bucket bucket = histogram.getBuckets().get(1); - assertThat(bucket, notNullValue()); - - final MedianAbsoluteDeviation mad = bucket.getAggregations().get("mad"); - assertThat(mad, notNullValue()); - assertThat(mad.getName(), is("mad")); - assertThat(mad.getMedianAbsoluteDeviation(), is(Double.NaN)); + assertResponse( + prepareSearch("empty_bucket_idx").addAggregation( + histogram("histogram").field("value").interval(1).minDocCount(0).subAggregation(randomBuilder().field("value")) + ), + response -> { + assertHitCount(response, 2); + + final Histogram histogram = response.getAggregations().get("histogram"); + assertThat(histogram, notNullValue()); + final Histogram.Bucket bucket = histogram.getBuckets().get(1); + assertThat(bucket, notNullValue()); + + final MedianAbsoluteDeviation mad = bucket.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + assertThat(mad.getName(), is("mad")); + assertThat(mad.getMedianAbsoluteDeviation(), is(Double.NaN)); + } + ); } @Override @@ -162,68 +165,72 @@ public void testUnmapped() throws Exception { @Override public void testSingleValuedField() throws Exception { - final SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(randomBuilder().field("value")).get(); + assertResponse(prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(randomBuilder().field("value")), response -> { + assertHitCount(response, NUMBER_OF_DOCS); - assertHitCount(response, NUMBER_OF_DOCS); - - final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); - assertThat(mad, notNullValue()); - assertThat(mad.getName(), is("mad")); - assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(singleValueExactMAD)); + final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + assertThat(mad.getName(), is("mad")); + assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(singleValueExactMAD)); + }); } @Override public void testSingleValuedFieldGetProperty() throws Exception { - final SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(global("global").subAggregation(randomBuilder().field("value"))) - .get(); - - assertHitCount(response, NUMBER_OF_DOCS); - - final Global global = response.getAggregations().get("global"); - assertThat(global, notNullValue()); - assertThat(global.getName(), is("global")); - assertThat(global.getDocCount(), is((long) NUMBER_OF_DOCS)); - assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().entrySet(), hasSize(1)); - - final MedianAbsoluteDeviation mad = global.getAggregations().get("mad"); - assertThat(mad, notNullValue()); - assertThat(mad.getName(), is("mad")); - assertThat(((InternalAggregation) global).getProperty("mad"), sameInstance(mad)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(global("global").subAggregation(randomBuilder().field("value"))), + response -> { + assertHitCount(response, NUMBER_OF_DOCS); + + final Global global = response.getAggregations().get("global"); + assertThat(global, notNullValue()); + assertThat(global.getName(), is("global")); + assertThat(global.getDocCount(), is((long) NUMBER_OF_DOCS)); + assertThat(global.getAggregations(), notNullValue()); + assertThat(global.getAggregations().asMap().entrySet(), hasSize(1)); + + final MedianAbsoluteDeviation mad = global.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + assertThat(mad.getName(), is("mad")); + assertThat(((InternalAggregation) global).getProperty("mad"), sameInstance(mad)); + } + ); } @Override public void testSingleValuedFieldPartiallyUnmapped() throws Exception { - final SearchResponse response = prepareSearch("idx", "idx_unmapped").setQuery(matchAllQuery()) - .addAggregation(randomBuilder().field("value")) - .get(); - - assertHitCount(response, NUMBER_OF_DOCS); - - final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); - assertThat(mad, notNullValue()); - assertThat(mad.getName(), is("mad")); - assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(singleValueExactMAD)); + assertResponse( + prepareSearch("idx", "idx_unmapped").setQuery(matchAllQuery()).addAggregation(randomBuilder().field("value")), + response -> { + assertHitCount(response, NUMBER_OF_DOCS); + + final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + assertThat(mad.getName(), is("mad")); + assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(singleValueExactMAD)); + } + ); } @Override public void testSingleValuedFieldWithValueScript() throws Exception { - final SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomBuilder().field("value") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", Collections.emptyMap())) - ) - .get(); - - assertHitCount(response, NUMBER_OF_DOCS); - - final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); - assertThat(mad, notNullValue()); - assertThat(mad.getName(), is("mad")); - - final double fromIncrementedSampleMAD = calculateMAD(Arrays.stream(singleValueSample).map(point -> point + 1).toArray()); - assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(fromIncrementedSampleMAD)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomBuilder().field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", Collections.emptyMap())) + ), + response -> { + assertHitCount(response, NUMBER_OF_DOCS); + + final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + assertThat(mad.getName(), is("mad")); + + final double fromIncrementedSampleMAD = calculateMAD(Arrays.stream(singleValueSample).map(point -> point + 1).toArray()); + assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(fromIncrementedSampleMAD)); + } + ); } @Override @@ -231,53 +238,55 @@ public void testSingleValuedFieldWithValueScriptWithParams() throws Exception { final Map params = new HashMap<>(); params.put("inc", 1); - final SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomBuilder().field("value") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + inc", params)) - ) - .get(); - - assertHitCount(response, NUMBER_OF_DOCS); - - final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); - assertThat(mad, notNullValue()); - assertThat(mad.getName(), is("mad")); - - final double fromIncrementedSampleMAD = calculateMAD(Arrays.stream(singleValueSample).map(point -> point + 1).toArray()); - assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(fromIncrementedSampleMAD)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomBuilder().field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + inc", params)) + ), + response -> { + assertHitCount(response, NUMBER_OF_DOCS); + + final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + assertThat(mad.getName(), is("mad")); + + final double fromIncrementedSampleMAD = calculateMAD(Arrays.stream(singleValueSample).map(point -> point + 1).toArray()); + assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(fromIncrementedSampleMAD)); + } + ); } @Override public void testMultiValuedField() throws Exception { - final SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(randomBuilder().field("values")) - .get(); - - assertHitCount(response, NUMBER_OF_DOCS); + assertResponse(prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(randomBuilder().field("values")), response -> { + assertHitCount(response, NUMBER_OF_DOCS); - final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); - assertThat(mad, notNullValue()); - assertThat(mad.getName(), is("mad")); - assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(multiValueExactMAD)); + final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + assertThat(mad.getName(), is("mad")); + assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(multiValueExactMAD)); + }); } @Override public void testMultiValuedFieldWithValueScript() throws Exception { - final SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomBuilder().field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", Collections.emptyMap())) - ) - .get(); - - assertHitCount(response, NUMBER_OF_DOCS); - - final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); - assertThat(mad, notNullValue()); - - final double fromIncrementedSampleMAD = calculateMAD(Arrays.stream(multiValueSample).map(point -> point + 1).toArray()); - assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(fromIncrementedSampleMAD)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomBuilder().field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", Collections.emptyMap())) + ), + response -> { + assertHitCount(response, NUMBER_OF_DOCS); + + final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + + final double fromIncrementedSampleMAD = calculateMAD(Arrays.stream(multiValueSample).map(point -> point + 1).toArray()); + assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(fromIncrementedSampleMAD)); + } + ); } @Override @@ -285,38 +294,42 @@ public void testMultiValuedFieldWithValueScriptWithParams() throws Exception { final Map params = new HashMap<>(); params.put("inc", 1); - final SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomBuilder().field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + inc", params)) - ) - .get(); - - assertHitCount(response, NUMBER_OF_DOCS); - - final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); - assertThat(mad, notNullValue()); - - final double fromIncrementedSampleMAD = calculateMAD(Arrays.stream(multiValueSample).map(point -> point + 1).toArray()); - assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(fromIncrementedSampleMAD)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomBuilder().field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + inc", params)) + ), + response -> { + assertHitCount(response, NUMBER_OF_DOCS); + + final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + + final double fromIncrementedSampleMAD = calculateMAD(Arrays.stream(multiValueSample).map(point -> point + 1).toArray()); + assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(fromIncrementedSampleMAD)); + } + ); } @Override public void testScriptSingleValued() throws Exception { - final SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomBuilder().script( - new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", Collections.emptyMap()) - ) - ) - .get(); - - assertHitCount(response, NUMBER_OF_DOCS); - - final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); - assertThat(mad, notNullValue()); - assertThat(mad.getName(), is("mad")); - assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(singleValueExactMAD)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomBuilder().script( + new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", Collections.emptyMap()) + ) + ), + response -> { + assertHitCount(response, NUMBER_OF_DOCS); + + final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + assertThat(mad.getName(), is("mad")); + assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(singleValueExactMAD)); + } + ); } @Override @@ -324,38 +337,44 @@ public void testScriptSingleValuedWithParams() throws Exception { final Map params = new HashMap<>(); params.put("inc", 1); - final SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomBuilder().script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value + inc", params)) - ) - .get(); - - assertHitCount(response, NUMBER_OF_DOCS); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomBuilder().script( + new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value + inc", params) + ) + ), + response -> { + assertHitCount(response, NUMBER_OF_DOCS); - final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); - assertThat(mad, notNullValue()); - assertThat(mad.getName(), is("mad")); + final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + assertThat(mad.getName(), is("mad")); - final double fromIncrementedSampleMAD = calculateMAD(Arrays.stream(singleValueSample).map(point -> point + 1).toArray()); - assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(fromIncrementedSampleMAD)); + final double fromIncrementedSampleMAD = calculateMAD(Arrays.stream(singleValueSample).map(point -> point + 1).toArray()); + assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(fromIncrementedSampleMAD)); + } + ); } @Override public void testScriptMultiValued() throws Exception { - final SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomBuilder().script( - new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['values']", Collections.emptyMap()) - ) - ) - .get(); - - assertHitCount(response, NUMBER_OF_DOCS); - - final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); - assertThat(mad, notNullValue()); - assertThat(mad.getName(), is("mad")); - assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(multiValueExactMAD)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomBuilder().script( + new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['values']", Collections.emptyMap()) + ) + ), + response -> { + assertHitCount(response, NUMBER_OF_DOCS); + + final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + assertThat(mad.getName(), is("mad")); + assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(multiValueExactMAD)); + } + ); } @Override @@ -363,107 +382,112 @@ public void testScriptMultiValuedWithParams() throws Exception { final Map params = new HashMap<>(); params.put("inc", 1); - final SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomBuilder().script( - new Script( - ScriptType.INLINE, - AggregationTestScriptsPlugin.NAME, - "[ doc['value'].value, doc['value'].value + inc ]", - params + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomBuilder().script( + new Script( + ScriptType.INLINE, + AggregationTestScriptsPlugin.NAME, + "[ doc['value'].value, doc['value'].value + inc ]", + params + ) ) - ) - ) - .get(); + ), + response -> { + assertHitCount(response, NUMBER_OF_DOCS); - assertHitCount(response, NUMBER_OF_DOCS); + final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + assertThat(mad.getName(), is("mad")); - final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); - assertThat(mad, notNullValue()); - assertThat(mad.getName(), is("mad")); - - final double fromIncrementedSampleMAD = calculateMAD( - Arrays.stream(singleValueSample).flatMap(point -> LongStream.of(point, point + 1)).toArray() + final double fromIncrementedSampleMAD = calculateMAD( + Arrays.stream(singleValueSample).flatMap(point -> LongStream.of(point, point + 1)).toArray() + ); + assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(fromIncrementedSampleMAD)); + } ); - assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(fromIncrementedSampleMAD)); } public void testAsSubAggregation() throws Exception { final int rangeBoundary = (MAX_SAMPLE_VALUE + MIN_SAMPLE_VALUE) / 2; - final SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - range("range").field("value") - .addRange(MIN_SAMPLE_VALUE, rangeBoundary) - .addRange(rangeBoundary, MAX_SAMPLE_VALUE) - .subAggregation(randomBuilder().field("value")) - ) - .get(); - - assertHitCount(response, NUMBER_OF_DOCS); - - final long[] lowerBucketSample = Arrays.stream(singleValueSample) - .filter(point -> point >= MIN_SAMPLE_VALUE && point < rangeBoundary) - .toArray(); - final long[] upperBucketSample = Arrays.stream(singleValueSample) - .filter(point -> point >= rangeBoundary && point < MAX_SAMPLE_VALUE) - .toArray(); - - final Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - List buckets = range.getBuckets(); - assertThat(buckets, notNullValue()); - assertThat(buckets, hasSize(2)); - - final Range.Bucket lowerBucket = buckets.get(0); - assertThat(lowerBucket, notNullValue()); - - final MedianAbsoluteDeviation lowerBucketMAD = lowerBucket.getAggregations().get("mad"); - assertThat(lowerBucketMAD, notNullValue()); - assertThat(lowerBucketMAD.getMedianAbsoluteDeviation(), closeToRelative(calculateMAD(lowerBucketSample))); - - final Range.Bucket upperBucket = buckets.get(1); - assertThat(upperBucket, notNullValue()); - - final MedianAbsoluteDeviation upperBucketMAD = upperBucket.getAggregations().get("mad"); - assertThat(upperBucketMAD, notNullValue()); - assertThat(upperBucketMAD.getMedianAbsoluteDeviation(), closeToRelative(calculateMAD(upperBucketSample))); - + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + range("range").field("value") + .addRange(MIN_SAMPLE_VALUE, rangeBoundary) + .addRange(rangeBoundary, MAX_SAMPLE_VALUE) + .subAggregation(randomBuilder().field("value")) + ), + response -> { + assertHitCount(response, NUMBER_OF_DOCS); + + final long[] lowerBucketSample = Arrays.stream(singleValueSample) + .filter(point -> point >= MIN_SAMPLE_VALUE && point < rangeBoundary) + .toArray(); + final long[] upperBucketSample = Arrays.stream(singleValueSample) + .filter(point -> point >= rangeBoundary && point < MAX_SAMPLE_VALUE) + .toArray(); + + final Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + List buckets = range.getBuckets(); + assertThat(buckets, notNullValue()); + assertThat(buckets, hasSize(2)); + + final Range.Bucket lowerBucket = buckets.get(0); + assertThat(lowerBucket, notNullValue()); + + final MedianAbsoluteDeviation lowerBucketMAD = lowerBucket.getAggregations().get("mad"); + assertThat(lowerBucketMAD, notNullValue()); + assertThat(lowerBucketMAD.getMedianAbsoluteDeviation(), closeToRelative(calculateMAD(lowerBucketSample))); + + final Range.Bucket upperBucket = buckets.get(1); + assertThat(upperBucket, notNullValue()); + + final MedianAbsoluteDeviation upperBucketMAD = upperBucket.getAggregations().get("mad"); + assertThat(upperBucketMAD, notNullValue()); + assertThat(upperBucketMAD.getMedianAbsoluteDeviation(), closeToRelative(calculateMAD(upperBucketSample))); + } + ); } @Override public void testOrderByEmptyAggregation() throws Exception { final int numberOfBuckets = 10; - final SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("terms").field("value") - .size(numberOfBuckets) - .order(BucketOrder.compound(BucketOrder.aggregation("filter>mad", true))) - .subAggregation( - filter("filter", termQuery("value", MAX_SAMPLE_VALUE + 1)).subAggregation(randomBuilder().field("value")) - ) - ) - .get(); - - assertHitCount(response, NUMBER_OF_DOCS); - - final Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - List buckets = terms.getBuckets(); - assertThat(buckets, notNullValue()); - assertThat(buckets, hasSize(numberOfBuckets)); - - for (int i = 0; i < numberOfBuckets; i++) { - Terms.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - - Filter filter = bucket.getAggregations().get("filter"); - assertThat(filter, notNullValue()); - assertThat(filter.getDocCount(), equalTo(0L)); - - MedianAbsoluteDeviation mad = filter.getAggregations().get("mad"); - assertThat(mad, notNullValue()); - assertThat(mad.getMedianAbsoluteDeviation(), equalTo(Double.NaN)); - } + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("terms").field("value") + .size(numberOfBuckets) + .order(BucketOrder.compound(BucketOrder.aggregation("filter>mad", true))) + .subAggregation( + filter("filter", termQuery("value", MAX_SAMPLE_VALUE + 1)).subAggregation(randomBuilder().field("value")) + ) + ), + response -> { + assertHitCount(response, NUMBER_OF_DOCS); + + final Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + List buckets = terms.getBuckets(); + assertThat(buckets, notNullValue()); + assertThat(buckets, hasSize(numberOfBuckets)); + + for (int i = 0; i < numberOfBuckets; i++) { + Terms.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + + Filter filter = bucket.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(0L)); + + MedianAbsoluteDeviation mad = filter.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + assertThat(mad.getMedianAbsoluteDeviation(), equalTo(Double.NaN)); + } + } + ); } /** @@ -493,13 +517,13 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - randomBuilder().field("d") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + randomBuilder().field("d") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -511,13 +535,13 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a deterministic script gets cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - randomBuilder().field("d") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + randomBuilder().field("d") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -529,8 +553,7 @@ public void testScriptCaching() throws Exception { ); // Ensure that non-scripted requests are cached as normal - r = prepareSearch("cache_test_idx").setSize(0).addAggregation(randomBuilder().field("d")).get(); - assertNoFailures(r); + assertNoFailures(prepareSearch("cache_test_idx").setSize(0).addAggregation(randomBuilder().field("d"))); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java index 2ea09960071f9..a6876f606ffee 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.support.XContentMapValues; @@ -50,6 +49,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.scriptedMetric; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; @@ -359,37 +359,39 @@ public void testMap() { Script combineScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op aggregation", Collections.emptyMap()); Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op list aggregation", Collections.emptyMap()); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(scriptedMetric("scripted").mapScript(mapScript).combineScript(combineScript).reduceScript(reduceScript)) - .get(); - assertNoFailures(response); - assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); - - Aggregation aggregation = response.getAggregations().get("scripted"); - assertThat(aggregation, notNullValue()); - assertThat(aggregation, instanceOf(ScriptedMetric.class)); - ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; - assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); - assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); - assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); - List aggregationList = (List) scriptedMetricAggregation.aggregation(); - assertThat(aggregationList.size(), equalTo(getNumShards("idx").numPrimaries)); - int numShardsRun = 0; - for (Object object : aggregationList) { - assertThat(object, notNullValue()); - assertThat(object, instanceOf(Map.class)); - Map map = (Map) object; - assertThat(map.size(), lessThanOrEqualTo(1)); - if (map.size() == 1) { - assertThat(map.get("count"), notNullValue()); - assertThat(map.get("count"), instanceOf(Number.class)); - assertThat(map.get("count"), equalTo(1)); - numShardsRun++; + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(scriptedMetric("scripted").mapScript(mapScript).combineScript(combineScript).reduceScript(reduceScript)), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); + + Aggregation aggregation = response.getAggregations().get("scripted"); + assertThat(aggregation, notNullValue()); + assertThat(aggregation, instanceOf(ScriptedMetric.class)); + ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; + assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); + assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); + assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); + List aggregationList = (List) scriptedMetricAggregation.aggregation(); + assertThat(aggregationList.size(), equalTo(getNumShards("idx").numPrimaries)); + int numShardsRun = 0; + for (Object object : aggregationList) { + assertThat(object, notNullValue()); + assertThat(object, instanceOf(Map.class)); + Map map = (Map) object; + assertThat(map.size(), lessThanOrEqualTo(1)); + if (map.size() == 1) { + assertThat(map.get("count"), notNullValue()); + assertThat(map.get("count"), instanceOf(Number.class)); + assertThat(map.get("count"), equalTo(1)); + numShardsRun++; + } + } + // We don't know how many shards will have documents but we need to make + // sure that at least one shard ran the map script + assertThat(numShardsRun, greaterThan(0)); } - } - // We don't know how many shards will have documents but we need to make - // sure that at least one shard ran the map script - assertThat(numShardsRun, greaterThan(0)); + ); } public void testMapWithParams() { @@ -401,45 +403,47 @@ public void testMapWithParams() { Script combineScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op aggregation", Collections.emptyMap()); Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op list aggregation", Collections.emptyMap()); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - scriptedMetric("scripted").params(aggregationParams) - .mapScript(mapScript) - .combineScript(combineScript) - .reduceScript(reduceScript) - ) - .get(); - assertNoFailures(response); - assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); - - Aggregation aggregation = response.getAggregations().get("scripted"); - assertThat(aggregation, notNullValue()); - assertThat(aggregation, instanceOf(ScriptedMetric.class)); - ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; - assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); - assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); - assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); - List aggregationList = (List) scriptedMetricAggregation.aggregation(); - assertThat(aggregationList.size(), equalTo(getNumShards("idx").numPrimaries)); - int numShardsRun = 0; - for (Object object : aggregationList) { - assertThat(object, notNullValue()); - assertThat(object, instanceOf(Map.class)); - Map map = (Map) object; - for (Map.Entry entry : map.entrySet()) { - assertThat(entry, notNullValue()); - assertThat(entry.getKey(), notNullValue()); - assertThat(entry.getKey(), instanceOf(String.class)); - assertThat(entry.getValue(), notNullValue()); - assertThat(entry.getValue(), instanceOf(Number.class)); - String stringValue = (String) entry.getKey(); - assertThat(stringValue, equalTo("12")); - Number numberValue = (Number) entry.getValue(); - assertThat(numberValue, equalTo(1)); - numShardsRun++; + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + scriptedMetric("scripted").params(aggregationParams) + .mapScript(mapScript) + .combineScript(combineScript) + .reduceScript(reduceScript) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); + + Aggregation aggregation = response.getAggregations().get("scripted"); + assertThat(aggregation, notNullValue()); + assertThat(aggregation, instanceOf(ScriptedMetric.class)); + ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; + assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); + assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); + assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); + List aggregationList = (List) scriptedMetricAggregation.aggregation(); + assertThat(aggregationList.size(), equalTo(getNumShards("idx").numPrimaries)); + int numShardsRun = 0; + for (Object object : aggregationList) { + assertThat(object, notNullValue()); + assertThat(object, instanceOf(Map.class)); + Map map = (Map) object; + for (Map.Entry entry : map.entrySet()) { + assertThat(entry, notNullValue()); + assertThat(entry.getKey(), notNullValue()); + assertThat(entry.getKey(), instanceOf(String.class)); + assertThat(entry.getValue(), notNullValue()); + assertThat(entry.getValue(), instanceOf(Number.class)); + String stringValue = (String) entry.getKey(); + assertThat(stringValue, equalTo("12")); + Number numberValue = (Number) entry.getValue(); + assertThat(numberValue, equalTo(1)); + numShardsRun++; + } + } + assertThat(numShardsRun, greaterThan(0)); } - } - assertThat(numShardsRun, greaterThan(0)); + ); } public void testInitMutatesParams() { @@ -449,47 +453,56 @@ public void testInitMutatesParams() { Map params = new HashMap<>(); params.put("vars", varsMap); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - scriptedMetric("scripted").params(params) - .initScript(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap())) - .mapScript( - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", Collections.emptyMap()) - ) - .combineScript(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op aggregation", Collections.emptyMap())) - .reduceScript(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op list aggregation", Collections.emptyMap())) - ) - .get(); - assertNoFailures(response); - assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); - - Aggregation aggregation = response.getAggregations().get("scripted"); - assertThat(aggregation, notNullValue()); - assertThat(aggregation, instanceOf(ScriptedMetric.class)); - ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; - assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); - assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); - assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); - List aggregationList = (List) scriptedMetricAggregation.aggregation(); - assertThat(aggregationList.size(), equalTo(getNumShards("idx").numPrimaries)); - long totalCount = 0; - for (Object object : aggregationList) { - assertThat(object, notNullValue()); - assertThat(object, instanceOf(HashMap.class)); - @SuppressWarnings("unchecked") - Map map = (Map) object; - assertThat(map, hasKey("list")); - assertThat(map.get("list"), instanceOf(List.class)); - List list = (List) map.get("list"); - for (Object o : list) { - assertThat(o, notNullValue()); - assertThat(o, instanceOf(Number.class)); - Number numberValue = (Number) o; - assertThat(numberValue, equalTo(3)); - totalCount += numberValue.longValue(); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + scriptedMetric("scripted").params(params) + .initScript(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap())) + .mapScript( + new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "state.list.add(vars.multiplier)", + Collections.emptyMap() + ) + ) + .combineScript(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op aggregation", Collections.emptyMap())) + .reduceScript( + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op list aggregation", Collections.emptyMap()) + ) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); + + Aggregation aggregation = response.getAggregations().get("scripted"); + assertThat(aggregation, notNullValue()); + assertThat(aggregation, instanceOf(ScriptedMetric.class)); + ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; + assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); + assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); + assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); + List aggregationList = (List) scriptedMetricAggregation.aggregation(); + assertThat(aggregationList.size(), equalTo(getNumShards("idx").numPrimaries)); + long totalCount = 0; + for (Object object : aggregationList) { + assertThat(object, notNullValue()); + assertThat(object, instanceOf(HashMap.class)); + @SuppressWarnings("unchecked") + Map map = (Map) object; + assertThat(map, hasKey("list")); + assertThat(map.get("list"), instanceOf(List.class)); + List list = (List) map.get("list"); + for (Object o : list) { + assertThat(o, notNullValue()); + assertThat(o, instanceOf(Number.class)); + Number numberValue = (Number) o; + assertThat(numberValue, equalTo(3)); + totalCount += numberValue.longValue(); + } + } + assertThat(totalCount, equalTo(numDocs * 3)); } - } - assertThat(totalCount, equalTo(numDocs * 3)); + ); } public void testMapCombineWithParams() { @@ -508,40 +521,42 @@ public void testMapCombineWithParams() { ); Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op list aggregation", Collections.emptyMap()); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - scriptedMetric("scripted").params(params).mapScript(mapScript).combineScript(combineScript).reduceScript(reduceScript) - ) - .get(); - assertNoFailures(response); - assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); - - Aggregation aggregation = response.getAggregations().get("scripted"); - assertThat(aggregation, notNullValue()); - assertThat(aggregation, instanceOf(ScriptedMetric.class)); - ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; - assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); - assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); - assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); - List aggregationList = (List) scriptedMetricAggregation.aggregation(); - assertThat(aggregationList.size(), equalTo(getNumShards("idx").numPrimaries)); - long totalCount = 0; - for (Object object : aggregationList) { - assertThat(object, notNullValue()); - assertThat(object, instanceOf(List.class)); - List list = (List) object; - for (Object o : list) { - assertThat(o, notNullValue()); - assertThat(o, instanceOf(Number.class)); - Number numberValue = (Number) o; - // A particular shard may not have any documents stored on it so - // we have to assume the lower bound may be 0. The check at the - // bottom of the test method will make sure the count is correct - assertThat(numberValue.longValue(), allOf(greaterThanOrEqualTo(0L), lessThanOrEqualTo(numDocs))); - totalCount += numberValue.longValue(); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + scriptedMetric("scripted").params(params).mapScript(mapScript).combineScript(combineScript).reduceScript(reduceScript) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); + + Aggregation aggregation = response.getAggregations().get("scripted"); + assertThat(aggregation, notNullValue()); + assertThat(aggregation, instanceOf(ScriptedMetric.class)); + ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; + assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); + assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); + assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); + List aggregationList = (List) scriptedMetricAggregation.aggregation(); + assertThat(aggregationList.size(), equalTo(getNumShards("idx").numPrimaries)); + long totalCount = 0; + for (Object object : aggregationList) { + assertThat(object, notNullValue()); + assertThat(object, instanceOf(List.class)); + List list = (List) object; + for (Object o : list) { + assertThat(o, notNullValue()); + assertThat(o, instanceOf(Number.class)); + Number numberValue = (Number) o; + // A particular shard may not have any documents stored on it so + // we have to assume the lower bound may be 0. The check at the + // bottom of the test method will make sure the count is correct + assertThat(numberValue.longValue(), allOf(greaterThanOrEqualTo(0L), lessThanOrEqualTo(numDocs))); + totalCount += numberValue.longValue(); + } + } + assertThat(totalCount, equalTo(numDocs)); } - } - assertThat(totalCount, equalTo(numDocs)); + ); } public void testInitMapCombineWithParams() { @@ -566,44 +581,46 @@ public void testInitMapCombineWithParams() { ); Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op list aggregation", Collections.emptyMap()); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - scriptedMetric("scripted").params(params) - .initScript(initScript) - .mapScript(mapScript) - .combineScript(combineScript) - .reduceScript(reduceScript) - ) - .get(); - assertNoFailures(response); - assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); - - Aggregation aggregation = response.getAggregations().get("scripted"); - assertThat(aggregation, notNullValue()); - assertThat(aggregation, instanceOf(ScriptedMetric.class)); - ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; - assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); - assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); - assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); - List aggregationList = (List) scriptedMetricAggregation.aggregation(); - assertThat(aggregationList.size(), equalTo(getNumShards("idx").numPrimaries)); - long totalCount = 0; - for (Object object : aggregationList) { - assertThat(object, notNullValue()); - assertThat(object, instanceOf(List.class)); - List list = (List) object; - for (Object o : list) { - assertThat(o, notNullValue()); - assertThat(o, instanceOf(Number.class)); - Number numberValue = (Number) o; - // A particular shard may not have any documents stored on it so - // we have to assume the lower bound may be 0. The check at the - // bottom of the test method will make sure the count is correct - assertThat(numberValue.longValue(), allOf(greaterThanOrEqualTo(0L), lessThanOrEqualTo(numDocs * 3))); - totalCount += numberValue.longValue(); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + scriptedMetric("scripted").params(params) + .initScript(initScript) + .mapScript(mapScript) + .combineScript(combineScript) + .reduceScript(reduceScript) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); + + Aggregation aggregation = response.getAggregations().get("scripted"); + assertThat(aggregation, notNullValue()); + assertThat(aggregation, instanceOf(ScriptedMetric.class)); + ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; + assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); + assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); + assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); + List aggregationList = (List) scriptedMetricAggregation.aggregation(); + assertThat(aggregationList.size(), equalTo(getNumShards("idx").numPrimaries)); + long totalCount = 0; + for (Object object : aggregationList) { + assertThat(object, notNullValue()); + assertThat(object, instanceOf(List.class)); + List list = (List) object; + for (Object o : list) { + assertThat(o, notNullValue()); + assertThat(o, instanceOf(Number.class)); + Number numberValue = (Number) o; + // A particular shard may not have any documents stored on it so + // we have to assume the lower bound may be 0. The check at the + // bottom of the test method will make sure the count is correct + assertThat(numberValue.longValue(), allOf(greaterThanOrEqualTo(0L), lessThanOrEqualTo(numDocs * 3))); + totalCount += numberValue.longValue(); + } + } + assertThat(totalCount, equalTo(numDocs * 3)); } - } - assertThat(totalCount, equalTo(numDocs * 3)); + ); } public void testInitMapCombineReduceWithParams() { @@ -633,31 +650,33 @@ public void testInitMapCombineReduceWithParams() { Collections.emptyMap() ); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - scriptedMetric("scripted").params(params) - .initScript(initScript) - .mapScript(mapScript) - .combineScript(combineScript) - .reduceScript(reduceScript) - ) - .get(); - assertNoFailures(response); - assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); - - Aggregation aggregation = response.getAggregations().get("scripted"); - assertThat(aggregation, notNullValue()); - assertThat(aggregation, instanceOf(ScriptedMetric.class)); - ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; - assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); - assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); - assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); - List aggregationList = (List) scriptedMetricAggregation.aggregation(); - assertThat(aggregationList.size(), equalTo(1)); - Object object = aggregationList.get(0); - assertThat(object, notNullValue()); - assertThat(object, instanceOf(Number.class)); - assertThat(((Number) object).longValue(), equalTo(numDocs * 3)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + scriptedMetric("scripted").params(params) + .initScript(initScript) + .mapScript(mapScript) + .combineScript(combineScript) + .reduceScript(reduceScript) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); + + Aggregation aggregation = response.getAggregations().get("scripted"); + assertThat(aggregation, notNullValue()); + assertThat(aggregation, instanceOf(ScriptedMetric.class)); + ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; + assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); + assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); + assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); + List aggregationList = (List) scriptedMetricAggregation.aggregation(); + assertThat(aggregationList.size(), equalTo(1)); + Object object = aggregationList.get(0); + assertThat(object, notNullValue()); + assertThat(object, instanceOf(Number.class)); + assertThat(((Number) object).longValue(), equalTo(numDocs * 3)); + } + ); } @SuppressWarnings("rawtypes") @@ -688,42 +707,43 @@ public void testInitMapCombineReduceGetProperty() throws Exception { Collections.emptyMap() ); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - global("global").subAggregation( - scriptedMetric("scripted").params(params) - .initScript(initScript) - .mapScript(mapScript) - .combineScript(combineScript) - .reduceScript(reduceScript) - ) - ) - .get(); - - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(numDocs)); - - Global global = searchResponse.getAggregations().get("global"); - assertThat(global, notNullValue()); - assertThat(global.getName(), equalTo("global")); - assertThat(global.getDocCount(), equalTo(numDocs)); - assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().size(), equalTo(1)); - - ScriptedMetric scriptedMetricAggregation = global.getAggregations().get("scripted"); - assertThat(scriptedMetricAggregation, notNullValue()); - assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); - assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); - assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); - List aggregationList = (List) scriptedMetricAggregation.aggregation(); - assertThat(aggregationList.size(), equalTo(1)); - Object object = aggregationList.get(0); - assertThat(object, notNullValue()); - assertThat(object, instanceOf(Number.class)); - assertThat(((Number) object).longValue(), equalTo(numDocs * 3)); - assertThat(((InternalAggregation) global).getProperty("scripted"), sameInstance(scriptedMetricAggregation)); - assertThat((List) ((InternalAggregation) global).getProperty("scripted.value"), sameInstance(aggregationList)); - assertThat((List) ((InternalAggregation) scriptedMetricAggregation).getProperty("value"), sameInstance(aggregationList)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + global("global").subAggregation( + scriptedMetric("scripted").params(params) + .initScript(initScript) + .mapScript(mapScript) + .combineScript(combineScript) + .reduceScript(reduceScript) + ) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); + + Global global = response.getAggregations().get("global"); + assertThat(global, notNullValue()); + assertThat(global.getName(), equalTo("global")); + assertThat(global.getDocCount(), equalTo(numDocs)); + assertThat(global.getAggregations(), notNullValue()); + assertThat(global.getAggregations().asMap().size(), equalTo(1)); + + ScriptedMetric scriptedMetricAggregation = global.getAggregations().get("scripted"); + assertThat(scriptedMetricAggregation, notNullValue()); + assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); + assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); + assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); + List aggregationList = (List) scriptedMetricAggregation.aggregation(); + assertThat(aggregationList.size(), equalTo(1)); + Object object = aggregationList.get(0); + assertThat(object, notNullValue()); + assertThat(object, instanceOf(Number.class)); + assertThat(((Number) object).longValue(), equalTo(numDocs * 3)); + assertThat(((InternalAggregation) global).getProperty("scripted"), sameInstance(scriptedMetricAggregation)); + assertThat((List) ((InternalAggregation) global).getProperty("scripted.value"), sameInstance(aggregationList)); + assertThat((List) ((InternalAggregation) scriptedMetricAggregation).getProperty("value"), sameInstance(aggregationList)); + } + ); } public void testMapCombineReduceWithParams() { @@ -752,27 +772,29 @@ public void testMapCombineReduceWithParams() { Collections.emptyMap() ); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - scriptedMetric("scripted").params(params).mapScript(mapScript).combineScript(combineScript).reduceScript(reduceScript) - ) - .get(); - assertNoFailures(response); - assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); - - Aggregation aggregation = response.getAggregations().get("scripted"); - assertThat(aggregation, notNullValue()); - assertThat(aggregation, instanceOf(ScriptedMetric.class)); - ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; - assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); - assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); - assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); - List aggregationList = (List) scriptedMetricAggregation.aggregation(); - assertThat(aggregationList.size(), equalTo(1)); - Object object = aggregationList.get(0); - assertThat(object, notNullValue()); - assertThat(object, instanceOf(Number.class)); - assertThat(((Number) object).longValue(), equalTo(numDocs)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + scriptedMetric("scripted").params(params).mapScript(mapScript).combineScript(combineScript).reduceScript(reduceScript) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); + + Aggregation aggregation = response.getAggregations().get("scripted"); + assertThat(aggregation, notNullValue()); + assertThat(aggregation, instanceOf(ScriptedMetric.class)); + ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; + assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); + assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); + assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); + List aggregationList = (List) scriptedMetricAggregation.aggregation(); + assertThat(aggregationList.size(), equalTo(1)); + Object object = aggregationList.get(0); + assertThat(object, notNullValue()); + assertThat(object, instanceOf(Number.class)); + assertThat(((Number) object).longValue(), equalTo(numDocs)); + } + ); } public void testInitMapReduceWithParams() { @@ -797,31 +819,33 @@ public void testInitMapReduceWithParams() { Collections.emptyMap() ); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - scriptedMetric("scripted").params(params) - .initScript(initScript) - .mapScript(mapScript) - .combineScript(combineScript) - .reduceScript(reduceScript) - ) - .get(); - assertNoFailures(response); - assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); - - Aggregation aggregation = response.getAggregations().get("scripted"); - assertThat(aggregation, notNullValue()); - assertThat(aggregation, instanceOf(ScriptedMetric.class)); - ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; - assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); - assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); - assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); - List aggregationList = (List) scriptedMetricAggregation.aggregation(); - assertThat(aggregationList.size(), equalTo(1)); - Object object = aggregationList.get(0); - assertThat(object, notNullValue()); - assertThat(object, instanceOf(Number.class)); - assertThat(((Number) object).longValue(), equalTo(numDocs * 3)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + scriptedMetric("scripted").params(params) + .initScript(initScript) + .mapScript(mapScript) + .combineScript(combineScript) + .reduceScript(reduceScript) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); + + Aggregation aggregation = response.getAggregations().get("scripted"); + assertThat(aggregation, notNullValue()); + assertThat(aggregation, instanceOf(ScriptedMetric.class)); + ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; + assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); + assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); + assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); + List aggregationList = (List) scriptedMetricAggregation.aggregation(); + assertThat(aggregationList.size(), equalTo(1)); + Object object = aggregationList.get(0); + assertThat(object, notNullValue()); + assertThat(object, instanceOf(Number.class)); + assertThat(((Number) object).longValue(), equalTo(numDocs * 3)); + } + ); } public void testMapReduceWithParams() { @@ -844,27 +868,29 @@ public void testMapReduceWithParams() { Collections.emptyMap() ); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - scriptedMetric("scripted").params(params).mapScript(mapScript).combineScript(combineScript).reduceScript(reduceScript) - ) - .get(); - assertNoFailures(response); - assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); - - Aggregation aggregation = response.getAggregations().get("scripted"); - assertThat(aggregation, notNullValue()); - assertThat(aggregation, instanceOf(ScriptedMetric.class)); - ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; - assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); - assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); - assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); - List aggregationList = (List) scriptedMetricAggregation.aggregation(); - assertThat(aggregationList.size(), equalTo(1)); - Object object = aggregationList.get(0); - assertThat(object, notNullValue()); - assertThat(object, instanceOf(Number.class)); - assertThat(((Number) object).longValue(), equalTo(numDocs)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + scriptedMetric("scripted").params(params).mapScript(mapScript).combineScript(combineScript).reduceScript(reduceScript) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); + + Aggregation aggregation = response.getAggregations().get("scripted"); + assertThat(aggregation, notNullValue()); + assertThat(aggregation, instanceOf(ScriptedMetric.class)); + ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; + assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); + assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); + assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); + List aggregationList = (List) scriptedMetricAggregation.aggregation(); + assertThat(aggregationList.size(), equalTo(1)); + Object object = aggregationList.get(0); + assertThat(object, notNullValue()); + assertThat(object, instanceOf(Number.class)); + assertThat(((Number) object).longValue(), equalTo(numDocs)); + } + ); } public void testInitMapCombineReduceWithParamsAndReduceParams() { @@ -897,31 +923,33 @@ public void testInitMapCombineReduceWithParamsAndReduceParams() { reduceParams ); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - scriptedMetric("scripted").params(params) - .initScript(initScript) - .mapScript(mapScript) - .combineScript(combineScript) - .reduceScript(reduceScript) - ) - .get(); - assertNoFailures(response); - assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); - - Aggregation aggregation = response.getAggregations().get("scripted"); - assertThat(aggregation, notNullValue()); - assertThat(aggregation, instanceOf(ScriptedMetric.class)); - ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; - assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); - assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); - assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); - List aggregationList = (List) scriptedMetricAggregation.aggregation(); - assertThat(aggregationList.size(), equalTo(1)); - Object object = aggregationList.get(0); - assertThat(object, notNullValue()); - assertThat(object, instanceOf(Number.class)); - assertThat(((Number) object).longValue(), equalTo(numDocs * 12)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + scriptedMetric("scripted").params(params) + .initScript(initScript) + .mapScript(mapScript) + .combineScript(combineScript) + .reduceScript(reduceScript) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); + + Aggregation aggregation = response.getAggregations().get("scripted"); + assertThat(aggregation, notNullValue()); + assertThat(aggregation, instanceOf(ScriptedMetric.class)); + ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; + assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); + assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); + assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); + List aggregationList = (List) scriptedMetricAggregation.aggregation(); + assertThat(aggregationList.size(), equalTo(1)); + Object object = aggregationList.get(0); + assertThat(object, notNullValue()); + assertThat(object, instanceOf(Number.class)); + assertThat(((Number) object).longValue(), equalTo(numDocs * 12)); + } + ); } public void testInitMapCombineReduceWithParamsStored() { @@ -931,31 +959,33 @@ public void testInitMapCombineReduceWithParamsStored() { Map params = new HashMap<>(); params.put("vars", varsMap); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - scriptedMetric("scripted").params(params) - .initScript(new Script(ScriptType.STORED, null, "initScript_stored", Collections.emptyMap())) - .mapScript(new Script(ScriptType.STORED, null, "mapScript_stored", Collections.emptyMap())) - .combineScript(new Script(ScriptType.STORED, null, "combineScript_stored", Collections.emptyMap())) - .reduceScript(new Script(ScriptType.STORED, null, "reduceScript_stored", Collections.emptyMap())) - ) - .get(); - assertNoFailures(response); - assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); - - Aggregation aggregation = response.getAggregations().get("scripted"); - assertThat(aggregation, notNullValue()); - assertThat(aggregation, instanceOf(ScriptedMetric.class)); - ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; - assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); - assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); - assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); - List aggregationList = (List) scriptedMetricAggregation.aggregation(); - assertThat(aggregationList.size(), equalTo(1)); - Object object = aggregationList.get(0); - assertThat(object, notNullValue()); - assertThat(object, instanceOf(Number.class)); - assertThat(((Number) object).longValue(), equalTo(numDocs * 3)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + scriptedMetric("scripted").params(params) + .initScript(new Script(ScriptType.STORED, null, "initScript_stored", Collections.emptyMap())) + .mapScript(new Script(ScriptType.STORED, null, "mapScript_stored", Collections.emptyMap())) + .combineScript(new Script(ScriptType.STORED, null, "combineScript_stored", Collections.emptyMap())) + .reduceScript(new Script(ScriptType.STORED, null, "reduceScript_stored", Collections.emptyMap())) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); + + Aggregation aggregation = response.getAggregations().get("scripted"); + assertThat(aggregation, notNullValue()); + assertThat(aggregation, instanceOf(ScriptedMetric.class)); + ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; + assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); + assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); + assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); + List aggregationList = (List) scriptedMetricAggregation.aggregation(); + assertThat(aggregationList.size(), equalTo(1)); + Object object = aggregationList.get(0); + assertThat(object, notNullValue()); + assertThat(object, instanceOf(Number.class)); + assertThat(((Number) object).longValue(), equalTo(numDocs * 3)); + } + ); } public void testInitMapCombineReduceWithParamsAsSubAgg() { @@ -985,49 +1015,51 @@ public void testInitMapCombineReduceWithParamsAsSubAgg() { Collections.emptyMap() ); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .setSize(1000) - .addAggregation( - histogram("histo").field("l_value") - .interval(1) - .subAggregation( - scriptedMetric("scripted").params(params) - .initScript(initScript) - .mapScript(mapScript) - .combineScript(combineScript) - .reduceScript(reduceScript) - ) - ) - .get(); - assertNoFailures(response); - assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); - Aggregation aggregation = response.getAggregations().get("histo"); - assertThat(aggregation, notNullValue()); - assertThat(aggregation, instanceOf(Histogram.class)); - Histogram histoAgg = (Histogram) aggregation; - assertThat(histoAgg.getName(), equalTo("histo")); - List buckets = histoAgg.getBuckets(); - assertThat(buckets, notNullValue()); - for (Bucket b : buckets) { - assertThat(b, notNullValue()); - assertThat(b.getDocCount(), equalTo(1L)); - Aggregations subAggs = b.getAggregations(); - assertThat(subAggs, notNullValue()); - assertThat(subAggs.asList().size(), equalTo(1)); - Aggregation subAgg = subAggs.get("scripted"); - assertThat(subAgg, notNullValue()); - assertThat(subAgg, instanceOf(ScriptedMetric.class)); - ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) subAgg; - assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); - assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); - assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); - List aggregationList = (List) scriptedMetricAggregation.aggregation(); - assertThat(aggregationList.size(), equalTo(1)); - Object object = aggregationList.get(0); - assertThat(object, notNullValue()); - assertThat(object, instanceOf(Number.class)); - assertThat(((Number) object).longValue(), equalTo(3L)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .setSize(1000) + .addAggregation( + histogram("histo").field("l_value") + .interval(1) + .subAggregation( + scriptedMetric("scripted").params(params) + .initScript(initScript) + .mapScript(mapScript) + .combineScript(combineScript) + .reduceScript(reduceScript) + ) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); + Aggregation aggregation = response.getAggregations().get("histo"); + assertThat(aggregation, notNullValue()); + assertThat(aggregation, instanceOf(Histogram.class)); + Histogram histoAgg = (Histogram) aggregation; + assertThat(histoAgg.getName(), equalTo("histo")); + List buckets = histoAgg.getBuckets(); + assertThat(buckets, notNullValue()); + for (Bucket b : buckets) { + assertThat(b, notNullValue()); + assertThat(b.getDocCount(), equalTo(1L)); + Aggregations subAggs = b.getAggregations(); + assertThat(subAggs, notNullValue()); + assertThat(subAggs.asList().size(), equalTo(1)); + Aggregation subAgg = subAggs.get("scripted"); + assertThat(subAgg, notNullValue()); + assertThat(subAgg, instanceOf(ScriptedMetric.class)); + ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) subAgg; + assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); + assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); + assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); + List aggregationList = (List) scriptedMetricAggregation.aggregation(); + assertThat(aggregationList.size(), equalTo(1)); + Object object = aggregationList.get(0); + assertThat(object, notNullValue()); + assertThat(object, instanceOf(Number.class)); + assertThat(((Number) object).longValue(), equalTo(3L)); + } + } + ); } public void testEmptyAggregation() throws Exception { @@ -1057,36 +1089,38 @@ public void testEmptyAggregation() throws Exception { Collections.emptyMap() ); - SearchResponse searchResponse = prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field("value") - .interval(1L) - .minDocCount(0) - .subAggregation( - scriptedMetric("scripted").params(params) - .initScript(initScript) - .mapScript(mapScript) - .combineScript(combineScript) - .reduceScript(reduceScript) - ) - ) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - Histogram histo = searchResponse.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - Histogram.Bucket bucket = histo.getBuckets().get(1); - assertThat(bucket, notNullValue()); - - ScriptedMetric scriptedMetric = bucket.getAggregations().get("scripted"); - assertThat(scriptedMetric, notNullValue()); - assertThat(scriptedMetric.getName(), equalTo("scripted")); - assertThat(scriptedMetric.aggregation(), notNullValue()); - assertThat(scriptedMetric.aggregation(), instanceOf(List.class)); - @SuppressWarnings("unchecked") // We'll just get a ClassCastException a couple lines down if we're wrong, its ok. - List aggregationResult = (List) scriptedMetric.aggregation(); - assertThat(aggregationResult.size(), equalTo(1)); - assertThat(aggregationResult.get(0), equalTo(0)); + assertNoFailuresAndResponse( + prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(1L) + .minDocCount(0) + .subAggregation( + scriptedMetric("scripted").params(params) + .initScript(initScript) + .mapScript(mapScript) + .combineScript(combineScript) + .reduceScript(reduceScript) + ) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + Histogram.Bucket bucket = histo.getBuckets().get(1); + assertThat(bucket, notNullValue()); + + ScriptedMetric scriptedMetric = bucket.getAggregations().get("scripted"); + assertThat(scriptedMetric, notNullValue()); + assertThat(scriptedMetric.getName(), equalTo("scripted")); + assertThat(scriptedMetric.aggregation(), notNullValue()); + assertThat(scriptedMetric.aggregation(), instanceOf(List.class)); + @SuppressWarnings("unchecked") // We'll just get a ClassCastException a couple lines down if we're wrong, its ok. + List aggregationResult = (List) scriptedMetric.aggregation(); + assertThat(aggregationResult.size(), equalTo(1)); + assertThat(aggregationResult.get(0), equalTo(0)); + } + ); } /** @@ -1129,12 +1163,15 @@ public void testScriptCaching() throws Exception { ); // Test that a non-deterministic init script causes the result to not be cached - SearchResponse r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - scriptedMetric("foo").initScript(ndInitScript).mapScript(mapScript).combineScript(combineScript).reduceScript(reduceScript) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + scriptedMetric("foo").initScript(ndInitScript) + .mapScript(mapScript) + .combineScript(combineScript) + .reduceScript(reduceScript) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -1146,10 +1183,10 @@ public void testScriptCaching() throws Exception { ); // Test that a non-deterministic map script causes the result to not be cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation(scriptedMetric("foo").mapScript(ndMapScript).combineScript(combineScript).reduceScript(reduceScript)) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation(scriptedMetric("foo").mapScript(ndMapScript).combineScript(combineScript).reduceScript(reduceScript)) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -1161,10 +1198,10 @@ public void testScriptCaching() throws Exception { ); // Test that a non-deterministic combine script causes the result to not be cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation(scriptedMetric("foo").mapScript(mapScript).combineScript(ndRandom).reduceScript(reduceScript)) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation(scriptedMetric("foo").mapScript(mapScript).combineScript(ndRandom).reduceScript(reduceScript)) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -1176,10 +1213,10 @@ public void testScriptCaching() throws Exception { ); // NOTE: random reduce scripts don't hit the query shard context (they are done on the coordinator) and so can be cached. - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation(scriptedMetric("foo").mapScript(mapScript).combineScript(combineScript).reduceScript(ndRandom)) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation(scriptedMetric("foo").mapScript(mapScript).combineScript(combineScript).reduceScript(ndRandom)) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -1191,10 +1228,10 @@ public void testScriptCaching() throws Exception { ); // Test that all deterministic scripts cause the request to be cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation(scriptedMetric("foo").mapScript(mapScript).combineScript(combineScript).reduceScript(reduceScript)) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation(scriptedMetric("foo").mapScript(mapScript).combineScript(combineScript).reduceScript(reduceScript)) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java index eb4d5aa74f2a0..c27751d5c52b8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java @@ -35,6 +35,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @@ -48,165 +49,175 @@ protected Collection> nodePlugins() { @Override public void testEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) - .addAggregation(histogram("histo").field("value").interval(1L).minDocCount(0).subAggregation(stats("stats").field("value"))) - .get(); - - assertShardExecutionState(searchResponse, 0); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - Histogram histo = searchResponse.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - Histogram.Bucket bucket = histo.getBuckets().get(1); - assertThat(bucket, notNullValue()); - - Stats stats = bucket.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getCount(), equalTo(0L)); - assertThat(stats.getSum(), equalTo(0.0)); - assertThat(stats.getMin(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(stats.getMax(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(Double.isNaN(stats.getAvg()), is(true)); + assertResponse( + prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value").interval(1L).minDocCount(0).subAggregation(stats("stats").field("value")) + ), + response -> { + assertShardExecutionState(response, 0); + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + Histogram.Bucket bucket = histo.getBuckets().get(1); + assertThat(bucket, notNullValue()); + + Stats stats = bucket.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat(stats.getCount(), equalTo(0L)); + assertThat(stats.getSum(), equalTo(0.0)); + assertThat(stats.getMin(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(stats.getMax(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(Double.isNaN(stats.getAvg()), is(true)); + } + ); } @Override public void testSingleValuedField() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(stats("stats").field("value")).get(); - - assertShardExecutionState(searchResponse, 0); + assertResponse(prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(stats("stats").field("value")), response -> { + assertShardExecutionState(response, 0); - assertHitCount(searchResponse, 10); + assertHitCount(response, 10); - Stats stats = searchResponse.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10)); - assertThat(stats.getMin(), equalTo(1.0)); - assertThat(stats.getMax(), equalTo(10.0)); - assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); - assertThat(stats.getCount(), equalTo(10L)); + Stats stats = response.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10)); + assertThat(stats.getMin(), equalTo(1.0)); + assertThat(stats.getMax(), equalTo(10.0)); + assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); + assertThat(stats.getCount(), equalTo(10L)); + }); } public void testSingleValuedField_WithFormatter() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(stats("stats").format("0000.0").field("value")) - .get(); - - assertHitCount(searchResponse, 10); - - Stats stats = searchResponse.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10)); - assertThat(stats.getAvgAsString(), equalTo("0005.5")); - assertThat(stats.getMin(), equalTo(1.0)); - assertThat(stats.getMinAsString(), equalTo("0001.0")); - assertThat(stats.getMax(), equalTo(10.0)); - assertThat(stats.getMaxAsString(), equalTo("0010.0")); - assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); - assertThat(stats.getSumAsString(), equalTo("0055.0")); - assertThat(stats.getCount(), equalTo(10L)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(stats("stats").format("0000.0").field("value")), + response -> { + assertHitCount(response, 10); + + Stats stats = response.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10)); + assertThat(stats.getAvgAsString(), equalTo("0005.5")); + assertThat(stats.getMin(), equalTo(1.0)); + assertThat(stats.getMinAsString(), equalTo("0001.0")); + assertThat(stats.getMax(), equalTo(10.0)); + assertThat(stats.getMaxAsString(), equalTo("0010.0")); + assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); + assertThat(stats.getSumAsString(), equalTo("0055.0")); + assertThat(stats.getCount(), equalTo(10L)); + } + ); } @Override public void testSingleValuedFieldGetProperty() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(global("global").subAggregation(stats("stats").field("value"))) - .get(); - - assertHitCount(searchResponse, 10); - - Global global = searchResponse.getAggregations().get("global"); - assertThat(global, notNullValue()); - assertThat(global.getName(), equalTo("global")); - assertThat(global.getDocCount(), equalTo(10L)); - assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().size(), equalTo(1)); - - Stats stats = global.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - Stats statsFromProperty = (Stats) ((InternalAggregation) global).getProperty("stats"); - assertThat(statsFromProperty, notNullValue()); - assertThat(statsFromProperty, sameInstance(stats)); - double expectedAvgValue = (double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10; - assertThat(stats.getAvg(), equalTo(expectedAvgValue)); - assertThat((double) ((InternalAggregation) global).getProperty("stats.avg"), equalTo(expectedAvgValue)); - double expectedMinValue = 1.0; - assertThat(stats.getMin(), equalTo(expectedMinValue)); - assertThat((double) ((InternalAggregation) global).getProperty("stats.min"), equalTo(expectedMinValue)); - double expectedMaxValue = 10.0; - assertThat(stats.getMax(), equalTo(expectedMaxValue)); - assertThat((double) ((InternalAggregation) global).getProperty("stats.max"), equalTo(expectedMaxValue)); - double expectedSumValue = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10; - assertThat(stats.getSum(), equalTo(expectedSumValue)); - assertThat((double) ((InternalAggregation) global).getProperty("stats.sum"), equalTo(expectedSumValue)); - long expectedCountValue = 10; - assertThat(stats.getCount(), equalTo(expectedCountValue)); - assertThat((double) ((InternalAggregation) global).getProperty("stats.count"), equalTo((double) expectedCountValue)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(global("global").subAggregation(stats("stats").field("value"))), + response -> { + assertHitCount(response, 10); + + Global global = response.getAggregations().get("global"); + assertThat(global, notNullValue()); + assertThat(global.getName(), equalTo("global")); + assertThat(global.getDocCount(), equalTo(10L)); + assertThat(global.getAggregations(), notNullValue()); + assertThat(global.getAggregations().asMap().size(), equalTo(1)); + + Stats stats = global.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + Stats statsFromProperty = (Stats) ((InternalAggregation) global).getProperty("stats"); + assertThat(statsFromProperty, notNullValue()); + assertThat(statsFromProperty, sameInstance(stats)); + double expectedAvgValue = (double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10; + assertThat(stats.getAvg(), equalTo(expectedAvgValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.avg"), equalTo(expectedAvgValue)); + double expectedMinValue = 1.0; + assertThat(stats.getMin(), equalTo(expectedMinValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.min"), equalTo(expectedMinValue)); + double expectedMaxValue = 10.0; + assertThat(stats.getMax(), equalTo(expectedMaxValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.max"), equalTo(expectedMaxValue)); + double expectedSumValue = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10; + assertThat(stats.getSum(), equalTo(expectedSumValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.sum"), equalTo(expectedSumValue)); + long expectedCountValue = 10; + assertThat(stats.getCount(), equalTo(expectedCountValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.count"), equalTo((double) expectedCountValue)); + } + ); } @Override public void testMultiValuedField() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(stats("stats").field("values")).get(); - - assertShardExecutionState(searchResponse, 0); + assertResponse(prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(stats("stats").field("values")), response -> { + assertShardExecutionState(response, 0); - assertHitCount(searchResponse, 10); + assertHitCount(response, 10); - Stats stats = searchResponse.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat( - stats.getAvg(), - equalTo((double) (2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12) / 20) - ); - assertThat(stats.getMin(), equalTo(2.0)); - assertThat(stats.getMax(), equalTo(12.0)); - assertThat(stats.getSum(), equalTo((double) 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12)); - assertThat(stats.getCount(), equalTo(20L)); + Stats stats = response.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat( + stats.getAvg(), + equalTo((double) (2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12) / 20) + ); + assertThat(stats.getMin(), equalTo(2.0)); + assertThat(stats.getMax(), equalTo(12.0)); + assertThat( + stats.getSum(), + equalTo((double) 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12) + ); + assertThat(stats.getCount(), equalTo(20L)); + }); } @Override public void testOrderByEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("terms").field("value") - .order(BucketOrder.compound(BucketOrder.aggregation("filter>stats.avg", true))) - .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(stats("stats").field("value"))) - ) - .get(); - - assertHitCount(searchResponse, 10); - - Terms terms = searchResponse.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - List buckets = terms.getBuckets(); - assertThat(buckets, notNullValue()); - assertThat(buckets.size(), equalTo(10)); - - for (int i = 0; i < 10; i++) { - Terms.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); - assertThat(bucket.getDocCount(), equalTo(1L)); - Filter filter = bucket.getAggregations().get("filter"); - assertThat(filter, notNullValue()); - assertThat(filter.getDocCount(), equalTo(0L)); - Stats stats = filter.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getMin(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(stats.getMax(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(stats.getAvg(), equalTo(Double.NaN)); - assertThat(stats.getSum(), equalTo(0.0)); - assertThat(stats.getCount(), equalTo(0L)); - - } + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("terms").field("value") + .order(BucketOrder.compound(BucketOrder.aggregation("filter>stats.avg", true))) + .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(stats("stats").field("value"))) + ), + response -> { + assertHitCount(response, 10); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + List buckets = terms.getBuckets(); + assertThat(buckets, notNullValue()); + assertThat(buckets.size(), equalTo(10)); + + for (int i = 0; i < 10; i++) { + Terms.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); + assertThat(bucket.getDocCount(), equalTo(1L)); + Filter filter = bucket.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(0L)); + Stats stats = filter.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getMin(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(stats.getMax(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(stats.getAvg(), equalTo(Double.NaN)); + assertThat(stats.getSum(), equalTo(0.0)); + assertThat(stats.getCount(), equalTo(0L)); + + } + } + ); } - private void assertShardExecutionState(SearchResponse response, int expectedFailures) throws Exception { + private void assertShardExecutionState(SearchResponse response, int expectedFailures) { ShardSearchFailure[] failures = response.getShardFailures(); if (failures.length != expectedFailures) { for (ShardSearchFailure failure : failures) { @@ -243,13 +254,13 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - stats("foo").field("d") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", Collections.emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + stats("foo").field("d") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", Collections.emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -261,13 +272,13 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a deterministic script gets cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - stats("foo").field("d") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", Collections.emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + stats("foo").field("d") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", Collections.emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -279,8 +290,7 @@ public void testScriptCaching() throws Exception { ); // Ensure that non-scripted requests are cached as normal - r = prepareSearch("cache_test_idx").setSize(0).addAggregation(stats("foo").field("d")).get(); - assertNoFailures(r); + assertNoFailures(prepareSearch("cache_test_idx").setSize(0).addAggregation(stats("foo").field("d"))); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/SumIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/SumIT.java index a837b22694ef5..e60f0308412cb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/SumIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/SumIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; @@ -39,6 +38,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; @@ -77,20 +78,22 @@ public void setupSuiteScopeCluster() throws Exception { @Override public void testEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) - .addAggregation(histogram("histo").field("value").interval(1L).minDocCount(0).subAggregation(sum("sum").field("value"))) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - Histogram histo = searchResponse.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - Histogram.Bucket bucket = histo.getBuckets().get(1); - assertThat(bucket, notNullValue()); - - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - assertThat(sum.getName(), equalTo("sum")); - assertThat(sum.value(), equalTo(0.0)); + assertResponse( + prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) + .addAggregation(histogram("histo").field("value").interval(1L).minDocCount(0).subAggregation(sum("sum").field("value"))), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + Histogram.Bucket bucket = histo.getBuckets().get(1); + assertThat(bucket, notNullValue()); + + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat(sum.getName(), equalTo("sum")); + assertThat(sum.value(), equalTo(0.0)); + } + ); } /** This test has been moved to {@link SumAggregatorTests#testUnmapped()} */ @@ -99,100 +102,104 @@ public void testUnmapped() throws Exception {} @Override public void testSingleValuedField() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(sum("sum").field("value")).get(); - - assertHitCount(searchResponse, 10); + assertResponse(prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(sum("sum").field("value")), response -> { + assertHitCount(response, 10); - Sum sum = searchResponse.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - assertThat(sum.getName(), equalTo("sum")); - assertThat(sum.value(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); + Sum sum = response.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat(sum.getName(), equalTo("sum")); + assertThat(sum.value(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); + }); } public void testSingleValuedFieldWithFormatter() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(sum("sum").format("0000.0").field("value")) - .get(); - - assertHitCount(searchResponse, 10); - - Sum sum = searchResponse.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - assertThat(sum.getName(), equalTo("sum")); - assertThat(sum.value(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); - assertThat(sum.getValueAsString(), equalTo("0055.0")); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(sum("sum").format("0000.0").field("value")), + response -> { + assertHitCount(response, 10); + + Sum sum = response.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat(sum.getName(), equalTo("sum")); + assertThat(sum.value(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); + assertThat(sum.getValueAsString(), equalTo("0055.0")); + } + ); } @Override public void testSingleValuedFieldGetProperty() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(global("global").subAggregation(sum("sum").field("value"))) - .get(); - - assertHitCount(searchResponse, 10); - - Global global = searchResponse.getAggregations().get("global"); - assertThat(global, notNullValue()); - assertThat(global.getName(), equalTo("global")); - assertThat(global.getDocCount(), equalTo(10L)); - assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().size(), equalTo(1)); - - Sum sum = global.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - assertThat(sum.getName(), equalTo("sum")); - double expectedSumValue = (double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10; - assertThat(sum.value(), equalTo(expectedSumValue)); - assertThat((Sum) ((InternalAggregation) global).getProperty("sum"), equalTo(sum)); - assertThat((double) ((InternalAggregation) global).getProperty("sum.value"), equalTo(expectedSumValue)); - assertThat((double) ((InternalAggregation) sum).getProperty("value"), equalTo(expectedSumValue)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(global("global").subAggregation(sum("sum").field("value"))), + response -> { + assertHitCount(response, 10); + + Global global = response.getAggregations().get("global"); + assertThat(global, notNullValue()); + assertThat(global.getName(), equalTo("global")); + assertThat(global.getDocCount(), equalTo(10L)); + assertThat(global.getAggregations(), notNullValue()); + assertThat(global.getAggregations().asMap().size(), equalTo(1)); + + Sum sum = global.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat(sum.getName(), equalTo("sum")); + double expectedSumValue = (double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10; + assertThat(sum.value(), equalTo(expectedSumValue)); + assertThat((Sum) ((InternalAggregation) global).getProperty("sum"), equalTo(sum)); + assertThat((double) ((InternalAggregation) global).getProperty("sum.value"), equalTo(expectedSumValue)); + assertThat((double) ((InternalAggregation) sum).getProperty("value"), equalTo(expectedSumValue)); + } + ); } @Override public void testMultiValuedField() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(sum("sum").field("values")).get(); + assertResponse(prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(sum("sum").field("values")), response -> { + assertHitCount(response, 10); - assertHitCount(searchResponse, 10); - - Sum sum = searchResponse.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - assertThat(sum.getName(), equalTo("sum")); - assertThat(sum.value(), equalTo((double) 2 + 3 + 3 + 4 + 4 + 5 + 5 + 6 + 6 + 7 + 7 + 8 + 8 + 9 + 9 + 10 + 10 + 11 + 11 + 12)); + Sum sum = response.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat(sum.getName(), equalTo("sum")); + assertThat(sum.value(), equalTo((double) 2 + 3 + 3 + 4 + 4 + 5 + 5 + 6 + 6 + 7 + 7 + 8 + 8 + 9 + 9 + 10 + 10 + 11 + 11 + 12)); + }); } @Override public void testOrderByEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("terms").field("value") - .order(BucketOrder.compound(BucketOrder.aggregation("filter>sum", true))) - .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(sum("sum").field("value"))) - ) - .get(); - - assertHitCount(searchResponse, 10); - - Terms terms = searchResponse.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - List buckets = terms.getBuckets(); - assertThat(buckets, notNullValue()); - assertThat(buckets.size(), equalTo(10)); - - for (int i = 0; i < 10; i++) { - Terms.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); - assertThat(bucket.getDocCount(), equalTo(1L)); - Filter filter = bucket.getAggregations().get("filter"); - assertThat(filter, notNullValue()); - assertThat(filter.getDocCount(), equalTo(0L)); - Sum sum = filter.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - assertThat(sum.value(), equalTo(0.0)); - - } + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("terms").field("value") + .order(BucketOrder.compound(BucketOrder.aggregation("filter>sum", true))) + .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(sum("sum").field("value"))) + ), + response -> { + assertHitCount(response, 10); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + List buckets = terms.getBuckets(); + assertThat(buckets, notNullValue()); + assertThat(buckets.size(), equalTo(10)); + + for (int i = 0; i < 10; i++) { + Terms.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); + assertThat(bucket.getDocCount(), equalTo(1L)); + Filter filter = bucket.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(0L)); + Sum sum = filter.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat(sum.value(), equalTo(0.0)); + + } + } + ); } /** @@ -221,12 +228,12 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - sum("foo").field("d").script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, RANDOM_SCRIPT, Collections.emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + sum("foo").field("d").script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, RANDOM_SCRIPT, Collections.emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -238,12 +245,12 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a deterministic script gets cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - sum("foo").field("d").script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, VALUE_SCRIPT, Collections.emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + sum("foo").field("d").script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, VALUE_SCRIPT, Collections.emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -255,8 +262,7 @@ public void testScriptCaching() throws Exception { ); // Ensure that non-scripted requests are cached as normal - r = prepareSearch("cache_test_idx").setSize(0).addAggregation(sum("foo").field("d")).get(); - assertNoFailures(r); + assertNoFailures(prepareSearch("cache_test_idx").setSize(0).addAggregation(sum("foo").field("d"))); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -269,46 +275,48 @@ public void testScriptCaching() throws Exception { } public void testFieldAlias() { - SearchResponse response = prepareSearch("old_index", "new_index").addAggregation(sum("sum").field("route_length_miles")).get(); - - assertNoFailures(response); - - Sum sum = response.getAggregations().get("sum"); - assertThat(sum, IsNull.notNullValue()); - assertThat(sum.getName(), equalTo("sum")); - assertThat(sum.value(), equalTo(192.7)); + assertNoFailuresAndResponse( + prepareSearch("old_index", "new_index").addAggregation(sum("sum").field("route_length_miles")), + response -> { + Sum sum = response.getAggregations().get("sum"); + assertThat(sum, IsNull.notNullValue()); + assertThat(sum.getName(), equalTo("sum")); + assertThat(sum.value(), equalTo(192.7)); + } + ); } public void testFieldAliasInSubAggregation() { - SearchResponse response = prepareSearch("old_index", "new_index").addAggregation( - terms("terms").field("transit_mode").subAggregation(sum("sum").field("route_length_miles")) - ).get(); - - assertNoFailures(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(2)); - - Terms.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("train")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - assertThat(sum.value(), equalTo(142.2)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("bus")); - assertThat(bucket.getDocCount(), equalTo(1L)); - - sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - assertThat(sum.value(), equalTo(50.5)); + assertNoFailuresAndResponse( + prepareSearch("old_index", "new_index").addAggregation( + terms("terms").field("transit_mode").subAggregation(sum("sum").field("route_length_miles")) + ), + response -> { + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(2)); + + Terms.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("train")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat(sum.value(), equalTo(142.2)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("bus")); + assertThat(bucket.getDocCount(), equalTo(1L)); + + sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat(sum.value(), equalTo(50.5)); + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java index 421d6f118c277..3156b934fdd06 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.metrics; import org.apache.logging.log4j.LogManager; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.plugins.Plugin; @@ -40,6 +39,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -96,26 +96,28 @@ private void assertConsistent(double[] pcts, PercentileRanks values, long minVal @Override public void testEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field("value") - .interval(1L) - .minDocCount(0) - .subAggregation(randomCompression(percentileRanks("percentile_ranks", new double[] { 10, 15 }).field("value"))) - ) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - Histogram histo = searchResponse.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - Histogram.Bucket bucket = histo.getBuckets().get(1); - assertThat(bucket, notNullValue()); - - PercentileRanks reversePercentiles = bucket.getAggregations().get("percentile_ranks"); - assertThat(reversePercentiles, notNullValue()); - assertThat(reversePercentiles.getName(), equalTo("percentile_ranks")); - assertThat(reversePercentiles.percent(10), equalTo(Double.NaN)); - assertThat(reversePercentiles.percent(15), equalTo(Double.NaN)); + assertResponse( + prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(1L) + .minDocCount(0) + .subAggregation(randomCompression(percentileRanks("percentile_ranks", new double[] { 10, 15 }).field("value"))) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + Histogram.Bucket bucket = histo.getBuckets().get(1); + assertThat(bucket, notNullValue()); + + PercentileRanks reversePercentiles = bucket.getAggregations().get("percentile_ranks"); + assertThat(reversePercentiles, notNullValue()); + assertThat(reversePercentiles.getName(), equalTo("percentile_ranks")); + assertThat(reversePercentiles.percent(10), equalTo(Double.NaN)); + assertThat(reversePercentiles.percent(15), equalTo(Double.NaN)); + } + ); } public void testNullValuesField() throws Exception { @@ -142,95 +144,109 @@ public void testEmptyValuesField() throws Exception { @Override public void testUnmapped() throws Exception { - SearchResponse searchResponse = prepareSearch("idx_unmapped").setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentileRanks("percentile_ranks", new double[] { 0, 10, 15, 100 })).field("value")) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); - - PercentileRanks reversePercentiles = searchResponse.getAggregations().get("percentile_ranks"); - assertThat(reversePercentiles, notNullValue()); - assertThat(reversePercentiles.getName(), equalTo("percentile_ranks")); - assertThat(reversePercentiles.percent(0), equalTo(Double.NaN)); - assertThat(reversePercentiles.percent(10), equalTo(Double.NaN)); - assertThat(reversePercentiles.percent(15), equalTo(Double.NaN)); - assertThat(reversePercentiles.percent(100), equalTo(Double.NaN)); + assertResponse( + prepareSearch("idx_unmapped").setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentileRanks("percentile_ranks", new double[] { 0, 10, 15, 100 })).field("value")), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(0L)); + + PercentileRanks reversePercentiles = response.getAggregations().get("percentile_ranks"); + assertThat(reversePercentiles, notNullValue()); + assertThat(reversePercentiles.getName(), equalTo("percentile_ranks")); + assertThat(reversePercentiles.percent(0), equalTo(Double.NaN)); + assertThat(reversePercentiles.percent(10), equalTo(Double.NaN)); + assertThat(reversePercentiles.percent(15), equalTo(Double.NaN)); + assertThat(reversePercentiles.percent(100), equalTo(Double.NaN)); + } + ); } @Override public void testSingleValuedField() throws Exception { final double[] pcts = randomPercents(minValue, maxValue); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).field("value")) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).field("value")), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValue); + } + ); } @Override public void testSingleValuedFieldGetProperty() throws Exception { final double[] pcts = randomPercents(minValue, maxValue); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(global("global").subAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).field("value"))) - .get(); - - assertHitCount(searchResponse, 10); - - Global global = searchResponse.getAggregations().get("global"); - assertThat(global, notNullValue()); - assertThat(global.getName(), equalTo("global")); - assertThat(global.getDocCount(), equalTo(10L)); - assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().size(), equalTo(1)); - - PercentileRanks values = global.getAggregations().get("percentile_ranks"); - assertThat(values, notNullValue()); - assertThat(values.getName(), equalTo("percentile_ranks")); - assertThat(((InternalAggregation) global).getProperty("percentile_ranks"), sameInstance(values)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + global("global").subAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).field("value")) + ), + response -> { + assertHitCount(response, 10); + + Global global = response.getAggregations().get("global"); + assertThat(global, notNullValue()); + assertThat(global.getName(), equalTo("global")); + assertThat(global.getDocCount(), equalTo(10L)); + assertThat(global.getAggregations(), notNullValue()); + assertThat(global.getAggregations().asMap().size(), equalTo(1)); + + PercentileRanks values = global.getAggregations().get("percentile_ranks"); + assertThat(values, notNullValue()); + assertThat(values.getName(), equalTo("percentile_ranks")); + assertThat(((InternalAggregation) global).getProperty("percentile_ranks"), sameInstance(values)); + } + ); } public void testSingleValuedFieldOutsideRange() throws Exception { final double[] pcts = new double[] { minValue - 1, maxValue + 1 }; - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).field("value")) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).field("value")), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValue); + } + ); } @Override public void testSingleValuedFieldPartiallyUnmapped() throws Exception { final double[] pcts = randomPercents(minValue, maxValue); - SearchResponse searchResponse = prepareSearch("idx", "idx_unmapped").setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).field("value")) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue); + assertResponse( + prepareSearch("idx", "idx_unmapped").setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).field("value")), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValue); + } + ); } @Override public void testSingleValuedFieldWithValueScript() throws Exception { final double[] pcts = randomPercents(minValue - 1, maxValue - 1); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomCompression(percentileRanks("percentile_ranks", pcts)).field("value") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue - 1); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomCompression(percentileRanks("percentile_ranks", pcts)).field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + ), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValue - 1); + } + ); } @Override @@ -238,61 +254,71 @@ public void testSingleValuedFieldWithValueScriptWithParams() throws Exception { Map params = new HashMap<>(); params.put("dec", 1); final double[] pcts = randomPercents(minValue - 1, maxValue - 1); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomCompression(percentileRanks("percentile_ranks", pcts)).field("value") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue - 1); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomCompression(percentileRanks("percentile_ranks", pcts)).field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) + ), + response -> { + + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValue - 1); + } + ); } @Override public void testMultiValuedField() throws Exception { final double[] pcts = randomPercents(minValues, maxValues); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).field("values")) - .get(); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).field("values")), + response -> { - assertHitCount(searchResponse, 10); + assertHitCount(response, 10); - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValues); + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValues); + } + ); } @Override public void testMultiValuedFieldWithValueScript() throws Exception { final double[] pcts = randomPercents(minValues - 1, maxValues - 1); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomCompression(percentileRanks("percentile_ranks", pcts)).field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValues - 1); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomCompression(percentileRanks("percentile_ranks", pcts)).field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + ), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValues - 1); + } + ); } public void testMultiValuedFieldWithValueScriptReverse() throws Exception { final double[] pcts = randomPercents(-maxValues, -minValues); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomCompression(percentileRanks("percentile_ranks", pcts)).field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value * -1", emptyMap())) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, -maxValues); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomCompression(percentileRanks("percentile_ranks", pcts)).field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value * -1", emptyMap())) + ), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, -maxValues); + } + ); } @Override @@ -300,34 +326,38 @@ public void testMultiValuedFieldWithValueScriptWithParams() throws Exception { Map params = new HashMap<>(); params.put("dec", 1); final double[] pcts = randomPercents(minValues - 1, maxValues - 1); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomCompression(percentileRanks("percentile_ranks", pcts)).field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValues - 1); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomCompression(percentileRanks("percentile_ranks", pcts)).field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) + ), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValues - 1); + } + ); } @Override public void testScriptSingleValued() throws Exception { final double[] pcts = randomPercents(minValue, maxValue); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomCompression(percentileRanks("percentile_ranks", pcts)).script( - new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", emptyMap()) - ) - ) - .get(); - - assertHitCount(searchResponse, 10); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomCompression(percentileRanks("percentile_ranks", pcts)).script( + new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", emptyMap()) + ) + ), + response -> { + assertHitCount(response, 10); - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue); + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValue); + } + ); } @Override @@ -338,28 +368,32 @@ public void testScriptSingleValuedWithParams() throws Exception { Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value - dec", params); final double[] pcts = randomPercents(minValue - 1, maxValue - 1); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).script(script)) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue - 1); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).script(script)), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValue - 1); + } + ); } @Override public void testScriptMultiValued() throws Exception { final double[] pcts = randomPercents(minValues, maxValues); Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['values']", emptyMap()); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).script(script)) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValues); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).script(script)), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValues); + } + ); } @Override @@ -367,78 +401,84 @@ public void testScriptMultiValuedWithParams() throws Exception { Script script = AggregationTestScriptsPlugin.DECREMENT_ALL_VALUES; final double[] pcts = randomPercents(minValues - 1, maxValues - 1); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).script(script)) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValues - 1); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).script(script)), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValues - 1); + } + ); } public void testOrderBySubAggregation() { boolean asc = randomBoolean(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field("value") - .interval(2L) - .subAggregation(randomCompression(percentileRanks("percentile_ranks", new double[] { 99 }).field("value"))) - .order(BucketOrder.aggregation("percentile_ranks", "99", asc)) - ) - .get(); - - assertHitCount(searchResponse, 10); - - Histogram histo = searchResponse.getAggregations().get("histo"); - double previous = asc ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY; - for (Histogram.Bucket bucket : histo.getBuckets()) { - PercentileRanks values = bucket.getAggregations().get("percentile_ranks"); - double p99 = values.percent(99); - if (asc) { - assertThat(p99, greaterThanOrEqualTo(previous)); - } else { - assertThat(p99, lessThanOrEqualTo(previous)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(2L) + .subAggregation(randomCompression(percentileRanks("percentile_ranks", new double[] { 99 }).field("value"))) + .order(BucketOrder.aggregation("percentile_ranks", "99", asc)) + ), + response -> { + assertHitCount(response, 10); + + Histogram histo = response.getAggregations().get("histo"); + double previous = asc ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY; + for (Histogram.Bucket bucket : histo.getBuckets()) { + PercentileRanks values = bucket.getAggregations().get("percentile_ranks"); + double p99 = values.percent(99); + if (asc) { + assertThat(p99, greaterThanOrEqualTo(previous)); + } else { + assertThat(p99, lessThanOrEqualTo(previous)); + } + previous = p99; + } } - previous = p99; - } + ); } @Override public void testOrderByEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("terms").field("value") - .order(BucketOrder.compound(BucketOrder.aggregation("filter>ranks.99", true))) - .subAggregation( - filter("filter", termQuery("value", 100)).subAggregation( - percentileRanks("ranks", new double[] { 99 }).method(PercentilesMethod.TDIGEST).field("value") + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("terms").field("value") + .order(BucketOrder.compound(BucketOrder.aggregation("filter>ranks.99", true))) + .subAggregation( + filter("filter", termQuery("value", 100)).subAggregation( + percentileRanks("ranks", new double[] { 99 }).method(PercentilesMethod.TDIGEST).field("value") + ) ) - ) - ) - .get(); - - assertHitCount(searchResponse, 10); - - Terms terms = searchResponse.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - List buckets = terms.getBuckets(); - assertThat(buckets, notNullValue()); - assertThat(buckets.size(), equalTo(10)); - - for (int i = 0; i < 10; i++) { - Terms.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); - assertThat(bucket.getDocCount(), equalTo(1L)); - Filter filter = bucket.getAggregations().get("filter"); - assertThat(filter, notNullValue()); - assertThat(filter.getDocCount(), equalTo(0L)); - PercentileRanks ranks = filter.getAggregations().get("ranks"); - assertThat(ranks, notNullValue()); - assertThat(ranks.percent(99), equalTo(Double.NaN)); - - } + ), + response -> { + assertHitCount(response, 10); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + List buckets = terms.getBuckets(); + assertThat(buckets, notNullValue()); + assertThat(buckets.size(), equalTo(10)); + + for (int i = 0; i < 10; i++) { + Terms.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); + assertThat(bucket.getDocCount(), equalTo(1L)); + Filter filter = bucket.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(0L)); + PercentileRanks ranks = filter.getAggregations().get("ranks"); + assertThat(ranks, notNullValue()); + assertThat(ranks.percent(99), equalTo(Double.NaN)); + + } + } + ); } /** @@ -467,13 +507,13 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - percentileRanks("foo", new double[] { 50.0 }).field("d") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + percentileRanks("foo", new double[] { 50.0 }).field("d") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -485,13 +525,13 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a deterministic script gets cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - percentileRanks("foo", new double[] { 50.0 }).field("d") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + percentileRanks("foo", new double[] { 50.0 }).field("d") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -503,8 +543,9 @@ public void testScriptCaching() throws Exception { ); // Ensure that non-scripted requests are cached as normal - r = prepareSearch("cache_test_idx").setSize(0).addAggregation(percentileRanks("foo", new double[] { 50.0 }).field("d")).get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0).addAggregation(percentileRanks("foo", new double[] { 50.0 }).field("d")) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -515,5 +556,4 @@ public void testScriptCaching() throws Exception { equalTo(2L) ); } - } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java index 58b2b13853848..6d2c11a5868a6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.metrics; import org.apache.logging.log4j.LogManager; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.plugins.Plugin; @@ -42,6 +41,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -102,108 +102,122 @@ private void assertConsistent(double[] pcts, Percentiles percentiles, long minVa @Override public void testEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field("value") - .interval(1L) - .minDocCount(0) - .subAggregation(randomCompression(percentiles("percentiles").field("value")).percentiles(10, 15)) - ) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - Histogram histo = searchResponse.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - Histogram.Bucket bucket = histo.getBuckets().get(1); - assertThat(bucket, notNullValue()); - - Percentiles percentiles = bucket.getAggregations().get("percentiles"); - assertThat(percentiles, notNullValue()); - assertThat(percentiles.getName(), equalTo("percentiles")); - assertThat(percentiles.percentile(10), equalTo(Double.NaN)); - assertThat(percentiles.percentile(15), equalTo(Double.NaN)); + assertResponse( + prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(1L) + .minDocCount(0) + .subAggregation(randomCompression(percentiles("percentiles").field("value")).percentiles(10, 15)) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + Histogram.Bucket bucket = histo.getBuckets().get(1); + assertThat(bucket, notNullValue()); + + Percentiles percentiles = bucket.getAggregations().get("percentiles"); + assertThat(percentiles, notNullValue()); + assertThat(percentiles.getName(), equalTo("percentiles")); + assertThat(percentiles.percentile(10), equalTo(Double.NaN)); + assertThat(percentiles.percentile(15), equalTo(Double.NaN)); + } + ); } @Override public void testUnmapped() throws Exception { - SearchResponse searchResponse = prepareSearch("idx_unmapped").setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentiles("percentiles")).field("value").percentiles(0, 10, 15, 100)) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); - - Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertThat(percentiles, notNullValue()); - assertThat(percentiles.getName(), equalTo("percentiles")); - assertThat(percentiles.percentile(0), equalTo(Double.NaN)); - assertThat(percentiles.percentile(10), equalTo(Double.NaN)); - assertThat(percentiles.percentile(15), equalTo(Double.NaN)); - assertThat(percentiles.percentile(100), equalTo(Double.NaN)); + assertResponse( + prepareSearch("idx_unmapped").setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentiles("percentiles")).field("value").percentiles(0, 10, 15, 100)), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(0L)); + + Percentiles percentiles = response.getAggregations().get("percentiles"); + assertThat(percentiles, notNullValue()); + assertThat(percentiles.getName(), equalTo("percentiles")); + assertThat(percentiles.percentile(0), equalTo(Double.NaN)); + assertThat(percentiles.percentile(10), equalTo(Double.NaN)); + assertThat(percentiles.percentile(15), equalTo(Double.NaN)); + assertThat(percentiles.percentile(100), equalTo(Double.NaN)); + } + ); } @Override public void testSingleValuedField() throws Exception { final double[] pcts = randomPercentiles(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentiles("percentiles")).field("value").percentiles(pcts)) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValue, maxValue); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentiles("percentiles")).field("value").percentiles(pcts)), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValue, maxValue); + } + ); } @Override public void testSingleValuedFieldGetProperty() throws Exception { final double[] pcts = randomPercentiles(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(global("global").subAggregation(randomCompression(percentiles("percentiles")).field("value").percentiles(pcts))) - .get(); - - assertHitCount(searchResponse, 10); - - Global global = searchResponse.getAggregations().get("global"); - assertThat(global, notNullValue()); - assertThat(global.getName(), equalTo("global")); - assertThat(global.getDocCount(), equalTo(10L)); - assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().size(), equalTo(1)); - - Percentiles percentiles = global.getAggregations().get("percentiles"); - assertThat(percentiles, notNullValue()); - assertThat(percentiles.getName(), equalTo("percentiles")); - assertThat(((InternalAggregation) global).getProperty("percentiles"), sameInstance(percentiles)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + global("global").subAggregation(randomCompression(percentiles("percentiles")).field("value").percentiles(pcts)) + ), + response -> { + assertHitCount(response, 10); + + Global global = response.getAggregations().get("global"); + assertThat(global, notNullValue()); + assertThat(global.getName(), equalTo("global")); + assertThat(global.getDocCount(), equalTo(10L)); + assertThat(global.getAggregations(), notNullValue()); + assertThat(global.getAggregations().asMap().size(), equalTo(1)); + + Percentiles percentiles = global.getAggregations().get("percentiles"); + assertThat(percentiles, notNullValue()); + assertThat(percentiles.getName(), equalTo("percentiles")); + assertThat(((InternalAggregation) global).getProperty("percentiles"), sameInstance(percentiles)); + } + ); } @Override public void testSingleValuedFieldPartiallyUnmapped() throws Exception { final double[] pcts = randomPercentiles(); - SearchResponse searchResponse = prepareSearch("idx", "idx_unmapped").setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentiles("percentiles")).field("value").percentiles(pcts)) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValue, maxValue); + assertResponse( + prepareSearch("idx", "idx_unmapped").setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentiles("percentiles")).field("value").percentiles(pcts)), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValue, maxValue); + } + ); } @Override public void testSingleValuedFieldWithValueScript() throws Exception { final double[] pcts = randomPercentiles(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomCompression(percentiles("percentiles")).field("value") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) - .percentiles(pcts) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValue - 1, maxValue - 1); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomCompression(percentiles("percentiles")).field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + .percentiles(pcts) + ), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValue - 1, maxValue - 1); + } + ); } @Override @@ -211,64 +225,72 @@ public void testSingleValuedFieldWithValueScriptWithParams() throws Exception { Map params = new HashMap<>(); params.put("dec", 1); final double[] pcts = randomPercentiles(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomCompression(percentiles("percentiles")).field("value") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) - .percentiles(pcts) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValue - 1, maxValue - 1); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomCompression(percentiles("percentiles")).field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) + .percentiles(pcts) + ), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValue - 1, maxValue - 1); + } + ); } @Override public void testMultiValuedField() throws Exception { final double[] pcts = randomPercentiles(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentiles("percentiles")).field("values").percentiles(pcts)) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValues, maxValues); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentiles("percentiles")).field("values").percentiles(pcts)), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValues, maxValues); + } + ); } @Override public void testMultiValuedFieldWithValueScript() throws Exception { final double[] pcts = randomPercentiles(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomCompression(percentiles("percentiles")).field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) - .percentiles(pcts) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValues - 1, maxValues - 1); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomCompression(percentiles("percentiles")).field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + .percentiles(pcts) + ), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValues - 1, maxValues - 1); + } + ); } public void testMultiValuedFieldWithValueScriptReverse() throws Exception { final double[] pcts = randomPercentiles(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomCompression(percentiles("percentiles")).field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value * -1", emptyMap())) - .percentiles(pcts) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, -maxValues, -minValues); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomCompression(percentiles("percentiles")).field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value * -1", emptyMap())) + .percentiles(pcts) + ), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, -maxValues, -minValues); + } + ); } @Override @@ -276,32 +298,36 @@ public void testMultiValuedFieldWithValueScriptWithParams() throws Exception { Map params = new HashMap<>(); params.put("dec", 1); final double[] pcts = randomPercentiles(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomCompression(percentiles("percentiles")).field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) - .percentiles(pcts) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValues - 1, maxValues - 1); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomCompression(percentiles("percentiles")).field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) + .percentiles(pcts) + ), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValues - 1, maxValues - 1); + } + ); } @Override public void testScriptSingleValued() throws Exception { Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", emptyMap()); final double[] pcts = randomPercentiles(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentiles("percentiles")).script(script).percentiles(pcts)) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValue, maxValue); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentiles("percentiles")).script(script).percentiles(pcts)), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValue, maxValue); + } + ); } @Override @@ -312,14 +338,16 @@ public void testScriptSingleValuedWithParams() throws Exception { Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value - dec", params); final double[] pcts = randomPercentiles(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentiles("percentiles")).script(script).percentiles(pcts)) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValue - 1, maxValue - 1); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentiles("percentiles")).script(script).percentiles(pcts)), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValue - 1, maxValue - 1); + } + ); } @Override @@ -327,14 +355,16 @@ public void testScriptMultiValued() throws Exception { final double[] pcts = randomPercentiles(); Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['values']", emptyMap()); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentiles("percentiles")).script(script).percentiles(pcts)) - .get(); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentiles("percentiles")).script(script).percentiles(pcts)), + response -> { + assertHitCount(response, 10); - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValues, maxValues); + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValues, maxValues); + } + ); } @Override @@ -342,78 +372,83 @@ public void testScriptMultiValuedWithParams() throws Exception { Script script = AggregationTestScriptsPlugin.DECREMENT_ALL_VALUES; final double[] pcts = randomPercentiles(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentiles("percentiles")).script(script).percentiles(pcts)) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValues - 1, maxValues - 1); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentiles("percentiles")).script(script).percentiles(pcts)), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValues - 1, maxValues - 1); + } + ); } public void testOrderBySubAggregation() { boolean asc = randomBoolean(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field("value") - .interval(2L) - .subAggregation(randomCompression(percentiles("percentiles").field("value").percentiles(99))) - .order(BucketOrder.aggregation("percentiles", "99", asc)) - ) - .get(); - - assertHitCount(searchResponse, 10); - - Histogram histo = searchResponse.getAggregations().get("histo"); - double previous = asc ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY; - for (Histogram.Bucket bucket : histo.getBuckets()) { - Percentiles percentiles = bucket.getAggregations().get("percentiles"); - double p99 = percentiles.percentile(99); - if (asc) { - assertThat(p99, greaterThanOrEqualTo(previous)); - } else { - assertThat(p99, lessThanOrEqualTo(previous)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(2L) + .subAggregation(randomCompression(percentiles("percentiles").field("value").percentiles(99))) + .order(BucketOrder.aggregation("percentiles", "99", asc)) + ), + response -> { + assertHitCount(response, 10); + + Histogram histo = response.getAggregations().get("histo"); + double previous = asc ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY; + for (Histogram.Bucket bucket : histo.getBuckets()) { + Percentiles percentiles = bucket.getAggregations().get("percentiles"); + double p99 = percentiles.percentile(99); + if (asc) { + assertThat(p99, greaterThanOrEqualTo(previous)); + } else { + assertThat(p99, lessThanOrEqualTo(previous)); + } + previous = p99; + } } - previous = p99; - } + ); } @Override public void testOrderByEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("terms").field("value") - .order(BucketOrder.compound(BucketOrder.aggregation("filter>percentiles.99", true))) - .subAggregation( - filter("filter", termQuery("value", 100)).subAggregation( - percentiles("percentiles").method(PercentilesMethod.TDIGEST).field("value") + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("terms").field("value") + .order(BucketOrder.compound(BucketOrder.aggregation("filter>percentiles.99", true))) + .subAggregation( + filter("filter", termQuery("value", 100)).subAggregation( + percentiles("percentiles").method(PercentilesMethod.TDIGEST).field("value") + ) ) - ) - ) - .get(); - - assertHitCount(searchResponse, 10); - - Terms terms = searchResponse.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - List buckets = terms.getBuckets(); - assertThat(buckets, notNullValue()); - assertThat(buckets.size(), equalTo(10)); - - for (int i = 0; i < 10; i++) { - Terms.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); - assertThat(bucket.getDocCount(), equalTo(1L)); - Filter filter = bucket.getAggregations().get("filter"); - assertThat(filter, notNullValue()); - assertThat(filter.getDocCount(), equalTo(0L)); - Percentiles percentiles = filter.getAggregations().get("percentiles"); - assertThat(percentiles, notNullValue()); - assertThat(percentiles.percentile(99), equalTo(Double.NaN)); - - } + ), + response -> { + assertHitCount(response, 10); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + List buckets = terms.getBuckets(); + assertThat(buckets, notNullValue()); + assertThat(buckets.size(), equalTo(10)); + + for (int i = 0; i < 10; i++) { + Terms.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); + assertThat(bucket.getDocCount(), equalTo(1L)); + Filter filter = bucket.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(0L)); + Percentiles percentiles = filter.getAggregations().get("percentiles"); + assertThat(percentiles, notNullValue()); + assertThat(percentiles.percentile(99), equalTo(Double.NaN)); + } + } + ); } /** @@ -442,14 +477,14 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - percentiles("foo").field("d") - .percentiles(50.0) - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + percentiles("foo").field("d") + .percentiles(50.0) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -461,14 +496,14 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a deterministic script gets cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - percentiles("foo").field("d") - .percentiles(50.0) - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + percentiles("foo").field("d") + .percentiles(50.0) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -480,8 +515,7 @@ public void testScriptCaching() throws Exception { ); // Ensure that non-scripted requests are cached as normal - r = prepareSearch("cache_test_idx").setSize(0).addAggregation(percentiles("foo").field("d").percentiles(50.0)).get(); - assertNoFailures(r); + assertNoFailures(prepareSearch("cache_test_idx").setSize(0).addAggregation(percentiles("foo").field("d").percentiles(50.0))); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java index ab9ab37894f70..432da3a05f860 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java @@ -12,7 +12,6 @@ import org.apache.lucene.util.ArrayUtil; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.settings.Settings; @@ -65,6 +64,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xcontent.XContentFactory.smileBuilder; import static org.elasticsearch.xcontent.XContentFactory.yamlBuilder; @@ -311,358 +311,365 @@ private String key(Terms.Bucket bucket) { } public void testBasics() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(TERMS_AGGS_FIELD) - .subAggregation(topHits("hits").sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC))) - ).get(); - - assertNoFailures(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - long higestSortValue = 0; - for (int i = 0; i < 5; i++) { - Terms.Bucket bucket = terms.getBucketByKey("val" + i); - assertThat(bucket, notNullValue()); - assertThat(key(bucket), equalTo("val" + i)); - assertThat(bucket.getDocCount(), equalTo(10L)); - TopHits topHits = bucket.getAggregations().get("hits"); - SearchHits hits = topHits.getHits(); - assertThat(hits.getTotalHits().value, equalTo(10L)); - assertThat(hits.getHits().length, equalTo(3)); - higestSortValue += 10; - assertThat((Long) hits.getAt(0).getSortValues()[0], equalTo(higestSortValue)); - assertThat((Long) hits.getAt(1).getSortValues()[0], equalTo(higestSortValue - 1)); - assertThat((Long) hits.getAt(2).getSortValues()[0], equalTo(higestSortValue - 2)); - - assertThat(hits.getAt(0).getSourceAsMap().size(), equalTo(5)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(TERMS_AGGS_FIELD) + .subAggregation(topHits("hits").sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC))) + ), + response -> { + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + long higestSortValue = 0; + for (int i = 0; i < 5; i++) { + Terms.Bucket bucket = terms.getBucketByKey("val" + i); + assertThat(bucket, notNullValue()); + assertThat(key(bucket), equalTo("val" + i)); + assertThat(bucket.getDocCount(), equalTo(10L)); + TopHits topHits = bucket.getAggregations().get("hits"); + SearchHits hits = topHits.getHits(); + assertThat(hits.getTotalHits().value, equalTo(10L)); + assertThat(hits.getHits().length, equalTo(3)); + higestSortValue += 10; + assertThat((Long) hits.getAt(0).getSortValues()[0], equalTo(higestSortValue)); + assertThat((Long) hits.getAt(1).getSortValues()[0], equalTo(higestSortValue - 1)); + assertThat((Long) hits.getAt(2).getSortValues()[0], equalTo(higestSortValue - 2)); + + assertThat(hits.getAt(0).getSourceAsMap().size(), equalTo(5)); + } + } + ); } public void testIssue11119() throws Exception { // Test that top_hits aggregation is fed scores if query results size=0 - SearchResponse response = prepareSearch("field-collapsing").setSize(0) - .setQuery(matchQuery("text", "x y z")) - .addAggregation(terms("terms").executionHint(randomExecutionHint()).field("group").subAggregation(topHits("hits"))) - .get(); - - assertNoFailures(response); - - assertThat(response.getHits().getTotalHits().value, equalTo(8L)); - assertThat(response.getHits().getHits().length, equalTo(0)); - assertThat(response.getHits().getMaxScore(), equalTo(Float.NaN)); - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(3)); - - for (Terms.Bucket bucket : terms.getBuckets()) { - assertThat(bucket, notNullValue()); - TopHits topHits = bucket.getAggregations().get("hits"); - SearchHits hits = topHits.getHits(); - float bestScore = Float.MAX_VALUE; - for (int h = 0; h < hits.getHits().length; h++) { - float score = hits.getAt(h).getScore(); - assertThat(score, lessThanOrEqualTo(bestScore)); - assertThat(score, greaterThan(0f)); - bestScore = hits.getAt(h).getScore(); + assertNoFailuresAndResponse( + prepareSearch("field-collapsing").setSize(0) + .setQuery(matchQuery("text", "x y z")) + .addAggregation(terms("terms").executionHint(randomExecutionHint()).field("group").subAggregation(topHits("hits"))), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(8L)); + assertThat(response.getHits().getHits().length, equalTo(0)); + assertThat(response.getHits().getMaxScore(), equalTo(Float.NaN)); + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(3)); + + for (Terms.Bucket bucket : terms.getBuckets()) { + assertThat(bucket, notNullValue()); + TopHits topHits = bucket.getAggregations().get("hits"); + SearchHits hits = topHits.getHits(); + float bestScore = Float.MAX_VALUE; + for (int h = 0; h < hits.getHits().length; h++) { + float score = hits.getAt(h).getScore(); + assertThat(score, lessThanOrEqualTo(bestScore)); + assertThat(score, greaterThan(0f)); + bestScore = hits.getAt(h).getScore(); + } + } } - } + ); // Also check that min_score setting works when size=0 // (technically not a test of top_hits but implementation details are // tied up with the need to feed scores into the agg tree even when // users don't want ranked set of query results.) - response = prepareSearch("field-collapsing").setSize(0) - .setMinScore(0.0001f) - .setQuery(matchQuery("text", "x y z")) - .addAggregation(terms("terms").executionHint(randomExecutionHint()).field("group")) - .get(); - - assertNoFailures(response); - - assertThat(response.getHits().getTotalHits().value, equalTo(8L)); - assertThat(response.getHits().getHits().length, equalTo(0)); - assertThat(response.getHits().getMaxScore(), equalTo(Float.NaN)); - terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(3)); + assertNoFailuresAndResponse( + prepareSearch("field-collapsing").setSize(0) + .setMinScore(0.0001f) + .setQuery(matchQuery("text", "x y z")) + .addAggregation(terms("terms").executionHint(randomExecutionHint()).field("group")), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(8L)); + assertThat(response.getHits().getHits().length, equalTo(0)); + assertThat(response.getHits().getMaxScore(), equalTo(Float.NaN)); + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(3)); + } + ); } public void testBreadthFirstWithScoreNeeded() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .collectMode(SubAggCollectionMode.BREADTH_FIRST) - .field(TERMS_AGGS_FIELD) - .subAggregation(topHits("hits").size(3)) - ).get(); - - assertNoFailures(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - Terms.Bucket bucket = terms.getBucketByKey("val" + i); - assertThat(bucket, notNullValue()); - assertThat(key(bucket), equalTo("val" + i)); - assertThat(bucket.getDocCount(), equalTo(10L)); - TopHits topHits = bucket.getAggregations().get("hits"); - SearchHits hits = topHits.getHits(); - assertThat(hits.getTotalHits().value, equalTo(10L)); - assertThat(hits.getHits().length, equalTo(3)); - - assertThat(hits.getAt(0).getSourceAsMap().size(), equalTo(5)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .collectMode(SubAggCollectionMode.BREADTH_FIRST) + .field(TERMS_AGGS_FIELD) + .subAggregation(topHits("hits").size(3)) + ), + response -> { + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + Terms.Bucket bucket = terms.getBucketByKey("val" + i); + assertThat(bucket, notNullValue()); + assertThat(key(bucket), equalTo("val" + i)); + assertThat(bucket.getDocCount(), equalTo(10L)); + TopHits topHits = bucket.getAggregations().get("hits"); + SearchHits hits = topHits.getHits(); + assertThat(hits.getTotalHits().value, equalTo(10L)); + assertThat(hits.getHits().length, equalTo(3)); + + assertThat(hits.getAt(0).getSourceAsMap().size(), equalTo(5)); + } + } + ); } public void testBreadthFirstWithAggOrderAndScoreNeeded() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .collectMode(SubAggCollectionMode.BREADTH_FIRST) - .field(TERMS_AGGS_FIELD) - .order(BucketOrder.aggregation("max", false)) - .subAggregation(max("max").field(SORT_FIELD)) - .subAggregation(topHits("hits").size(3)) - ).get(); - - assertNoFailures(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - int id = 4; - for (Terms.Bucket bucket : terms.getBuckets()) { - assertThat(bucket, notNullValue()); - assertThat(key(bucket), equalTo("val" + id)); - assertThat(bucket.getDocCount(), equalTo(10L)); - TopHits topHits = bucket.getAggregations().get("hits"); - SearchHits hits = topHits.getHits(); - assertThat(hits.getTotalHits().value, equalTo(10L)); - assertThat(hits.getHits().length, equalTo(3)); - - assertThat(hits.getAt(0).getSourceAsMap().size(), equalTo(5)); - id--; - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .collectMode(SubAggCollectionMode.BREADTH_FIRST) + .field(TERMS_AGGS_FIELD) + .order(BucketOrder.aggregation("max", false)) + .subAggregation(max("max").field(SORT_FIELD)) + .subAggregation(topHits("hits").size(3)) + ), + response -> { + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + int id = 4; + for (Terms.Bucket bucket : terms.getBuckets()) { + assertThat(bucket, notNullValue()); + assertThat(key(bucket), equalTo("val" + id)); + assertThat(bucket.getDocCount(), equalTo(10L)); + TopHits topHits = bucket.getAggregations().get("hits"); + SearchHits hits = topHits.getHits(); + assertThat(hits.getTotalHits().value, equalTo(10L)); + assertThat(hits.getHits().length, equalTo(3)); + + assertThat(hits.getAt(0).getSourceAsMap().size(), equalTo(5)); + id--; + } + } + ); } public void testBasicsGetProperty() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(global("global").subAggregation(topHits("hits"))) - .get(); - - assertNoFailures(searchResponse); - - Global global = searchResponse.getAggregations().get("global"); - assertThat(global, notNullValue()); - assertThat(global.getName(), equalTo("global")); - assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().size(), equalTo(1)); - - TopHits topHits = global.getAggregations().get("hits"); - assertThat(topHits, notNullValue()); - assertThat(topHits.getName(), equalTo("hits")); - assertThat((TopHits) ((InternalAggregation) global).getProperty("hits"), sameInstance(topHits)); - + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(global("global").subAggregation(topHits("hits"))), + response -> { + Global global = response.getAggregations().get("global"); + assertThat(global, notNullValue()); + assertThat(global.getName(), equalTo("global")); + assertThat(global.getAggregations(), notNullValue()); + assertThat(global.getAggregations().asMap().size(), equalTo(1)); + + TopHits topHits = global.getAggregations().get("hits"); + assertThat(topHits, notNullValue()); + assertThat(topHits.getName(), equalTo("hits")); + assertThat((TopHits) ((InternalAggregation) global).getProperty("hits"), sameInstance(topHits)); + } + ); } public void testPagination() throws Exception { int size = randomIntBetween(1, 10); int from = randomIntBetween(0, 10); - SearchResponse response = prepareSearch("idx").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(TERMS_AGGS_FIELD) - .subAggregation(topHits("hits").sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC)).from(from).size(size)) - ).get(); - assertNoFailures(response); - - SearchResponse control = prepareSearch("idx").setFrom(from) - .setSize(size) - .setPostFilter(QueryBuilders.termQuery(TERMS_AGGS_FIELD, "val0")) - .addSort(SORT_FIELD, SortOrder.DESC) - .get(); - assertNoFailures(control); - SearchHits controlHits = control.getHits(); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - Terms.Bucket bucket = terms.getBucketByKey("val0"); - assertThat(bucket, notNullValue()); - assertThat(bucket.getDocCount(), equalTo(10L)); - TopHits topHits = bucket.getAggregations().get("hits"); - SearchHits hits = topHits.getHits(); - assertThat(hits.getTotalHits().value, equalTo(controlHits.getTotalHits().value)); - assertThat(hits.getHits().length, equalTo(controlHits.getHits().length)); - for (int i = 0; i < hits.getHits().length; i++) { - logger.info( - "{}: top_hits: [{}][{}] control: [{}][{}]", - i, - hits.getAt(i).getId(), - hits.getAt(i).getSortValues()[0], - controlHits.getAt(i).getId(), - controlHits.getAt(i).getSortValues()[0] - ); - assertThat(hits.getAt(i).getId(), equalTo(controlHits.getAt(i).getId())); - assertThat(hits.getAt(i).getSortValues()[0], equalTo(controlHits.getAt(i).getSortValues()[0])); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(TERMS_AGGS_FIELD) + .subAggregation(topHits("hits").sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC)).from(from).size(size)) + ), + response -> { + assertNoFailuresAndResponse( + prepareSearch("idx").setFrom(from) + .setSize(size) + .setPostFilter(QueryBuilders.termQuery(TERMS_AGGS_FIELD, "val0")) + .addSort(SORT_FIELD, SortOrder.DESC), + control -> { + assertNoFailures(control); + SearchHits controlHits = control.getHits(); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + Terms.Bucket bucket = terms.getBucketByKey("val0"); + assertThat(bucket, notNullValue()); + assertThat(bucket.getDocCount(), equalTo(10L)); + TopHits topHits = bucket.getAggregations().get("hits"); + SearchHits hits = topHits.getHits(); + assertThat(hits.getTotalHits().value, equalTo(controlHits.getTotalHits().value)); + assertThat(hits.getHits().length, equalTo(controlHits.getHits().length)); + for (int i = 0; i < hits.getHits().length; i++) { + logger.info( + "{}: top_hits: [{}][{}] control: [{}][{}]", + i, + hits.getAt(i).getId(), + hits.getAt(i).getSortValues()[0], + controlHits.getAt(i).getId(), + controlHits.getAt(i).getSortValues()[0] + ); + assertThat(hits.getAt(i).getId(), equalTo(controlHits.getAt(i).getId())); + assertThat(hits.getAt(i).getSortValues()[0], equalTo(controlHits.getAt(i).getSortValues()[0])); + } + } + ); + } + ); } public void testSortByBucket() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(TERMS_AGGS_FIELD) - .order(BucketOrder.aggregation("max_sort", false)) - .subAggregation(topHits("hits").sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC)).trackScores(true)) - .subAggregation(max("max_sort").field(SORT_FIELD)) - ).get(); - assertNoFailures(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - long higestSortValue = 50; - int currentBucket = 4; - for (Terms.Bucket bucket : terms.getBuckets()) { - assertThat(key(bucket), equalTo("val" + currentBucket--)); - assertThat(bucket.getDocCount(), equalTo(10L)); - TopHits topHits = bucket.getAggregations().get("hits"); - SearchHits hits = topHits.getHits(); - assertThat(hits.getTotalHits().value, equalTo(10L)); - assertThat(hits.getHits().length, equalTo(3)); - assertThat(hits.getAt(0).getSortValues()[0], equalTo(higestSortValue)); - assertThat(hits.getAt(1).getSortValues()[0], equalTo(higestSortValue - 1)); - assertThat(hits.getAt(2).getSortValues()[0], equalTo(higestSortValue - 2)); - Max max = bucket.getAggregations().get("max_sort"); - assertThat(max.value(), equalTo(((Long) higestSortValue).doubleValue())); - higestSortValue -= 10; - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(TERMS_AGGS_FIELD) + .order(BucketOrder.aggregation("max_sort", false)) + .subAggregation(topHits("hits").sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC)).trackScores(true)) + .subAggregation(max("max_sort").field(SORT_FIELD)) + ), + response -> { + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + long higestSortValue = 50; + int currentBucket = 4; + for (Terms.Bucket bucket : terms.getBuckets()) { + assertThat(key(bucket), equalTo("val" + currentBucket--)); + assertThat(bucket.getDocCount(), equalTo(10L)); + TopHits topHits = bucket.getAggregations().get("hits"); + SearchHits hits = topHits.getHits(); + assertThat(hits.getTotalHits().value, equalTo(10L)); + assertThat(hits.getHits().length, equalTo(3)); + assertThat(hits.getAt(0).getSortValues()[0], equalTo(higestSortValue)); + assertThat(hits.getAt(1).getSortValues()[0], equalTo(higestSortValue - 1)); + assertThat(hits.getAt(2).getSortValues()[0], equalTo(higestSortValue - 2)); + Max max = bucket.getAggregations().get("max_sort"); + assertThat(max.value(), equalTo(((Long) higestSortValue).doubleValue())); + higestSortValue -= 10; + } + } + ); } public void testFieldCollapsing() throws Exception { - SearchResponse response = prepareSearch("field-collapsing").setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setQuery(matchQuery("text", "term rare")) - .addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field("group") - .order(BucketOrder.aggregation("max_score", false)) - .subAggregation(topHits("hits").size(1)) - .subAggregation(max("max_score").field("value")) - ) - .get(); - assertNoFailures(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(3)); - - Iterator bucketIterator = terms.getBuckets().iterator(); - Terms.Bucket bucket = bucketIterator.next(); - assertThat(key(bucket), equalTo("b")); - TopHits topHits = bucket.getAggregations().get("hits"); - SearchHits hits = topHits.getHits(); - assertThat(hits.getTotalHits().value, equalTo(4L)); - assertThat(hits.getHits().length, equalTo(1)); - assertThat(hits.getAt(0).getId(), equalTo("6")); - - bucket = bucketIterator.next(); - assertThat(key(bucket), equalTo("c")); - topHits = bucket.getAggregations().get("hits"); - hits = topHits.getHits(); - assertThat(hits.getTotalHits().value, equalTo(3L)); - assertThat(hits.getHits().length, equalTo(1)); - assertThat(hits.getAt(0).getId(), equalTo("9")); - - bucket = bucketIterator.next(); - assertThat(key(bucket), equalTo("a")); - topHits = bucket.getAggregations().get("hits"); - hits = topHits.getHits(); - assertThat(hits.getTotalHits().value, equalTo(2L)); - assertThat(hits.getHits().length, equalTo(1)); - assertThat(hits.getAt(0).getId(), equalTo("2")); + assertNoFailuresAndResponse( + prepareSearch("field-collapsing").setSearchType(SearchType.DFS_QUERY_THEN_FETCH) + .setQuery(matchQuery("text", "term rare")) + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field("group") + .order(BucketOrder.aggregation("max_score", false)) + .subAggregation(topHits("hits").size(1)) + .subAggregation(max("max_score").field("value")) + ), + response -> { + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(3)); + + Iterator bucketIterator = terms.getBuckets().iterator(); + Terms.Bucket bucket = bucketIterator.next(); + assertThat(key(bucket), equalTo("b")); + TopHits topHits = bucket.getAggregations().get("hits"); + SearchHits hits = topHits.getHits(); + assertThat(hits.getTotalHits().value, equalTo(4L)); + assertThat(hits.getHits().length, equalTo(1)); + assertThat(hits.getAt(0).getId(), equalTo("6")); + + bucket = bucketIterator.next(); + assertThat(key(bucket), equalTo("c")); + topHits = bucket.getAggregations().get("hits"); + hits = topHits.getHits(); + assertThat(hits.getTotalHits().value, equalTo(3L)); + assertThat(hits.getHits().length, equalTo(1)); + assertThat(hits.getAt(0).getId(), equalTo("9")); + + bucket = bucketIterator.next(); + assertThat(key(bucket), equalTo("a")); + topHits = bucket.getAggregations().get("hits"); + hits = topHits.getHits(); + assertThat(hits.getTotalHits().value, equalTo(2L)); + assertThat(hits.getHits().length, equalTo(1)); + assertThat(hits.getAt(0).getId(), equalTo("2")); + } + ); } public void testFetchFeatures() { final boolean seqNoAndTerm = randomBoolean(); - SearchResponse response = prepareSearch("idx").setQuery(matchQuery("text", "text").queryName("test")) - .addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(TERMS_AGGS_FIELD) - .subAggregation( - topHits("hits").size(1) - .highlighter(new HighlightBuilder().field("text")) - .explain(true) - .storedField("text") - .docValueField("field1") - .fetchField("field2") - .scriptField("script", new Script(ScriptType.INLINE, MockScriptEngine.NAME, "5", Collections.emptyMap())) - .fetchSource("text", null) - .version(true) - .seqNoAndPrimaryTerm(seqNoAndTerm) - ) - ) - .get(); - assertNoFailures(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (Terms.Bucket bucket : terms.getBuckets()) { - TopHits topHits = bucket.getAggregations().get("hits"); - SearchHits hits = topHits.getHits(); - assertThat(hits.getTotalHits().value, equalTo(10L)); - assertThat(hits.getHits().length, equalTo(1)); - - SearchHit hit = hits.getAt(0); - HighlightField highlightField = hit.getHighlightFields().get("text"); - assertThat(highlightField.getFragments().length, equalTo(1)); - assertThat(highlightField.getFragments()[0].string(), equalTo("some text to entertain")); - - Explanation explanation = hit.getExplanation(); - assertThat(explanation.toString(), containsString("text:text")); - - long version = hit.getVersion(); - assertThat(version, equalTo(1L)); - - if (seqNoAndTerm) { - assertThat(hit.getSeqNo(), greaterThanOrEqualTo(0L)); - assertThat(hit.getPrimaryTerm(), greaterThanOrEqualTo(1L)); - } else { - assertThat(hit.getSeqNo(), equalTo(SequenceNumbers.UNASSIGNED_SEQ_NO)); - assertThat(hit.getPrimaryTerm(), equalTo(SequenceNumbers.UNASSIGNED_PRIMARY_TERM)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchQuery("text", "text").queryName("test")) + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(TERMS_AGGS_FIELD) + .subAggregation( + topHits("hits").size(1) + .highlighter(new HighlightBuilder().field("text")) + .explain(true) + .storedField("text") + .docValueField("field1") + .fetchField("field2") + .scriptField("script", new Script(ScriptType.INLINE, MockScriptEngine.NAME, "5", Collections.emptyMap())) + .fetchSource("text", null) + .version(true) + .seqNoAndPrimaryTerm(seqNoAndTerm) + ) + ), + response -> { + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (Terms.Bucket bucket : terms.getBuckets()) { + TopHits topHits = bucket.getAggregations().get("hits"); + SearchHits hits = topHits.getHits(); + assertThat(hits.getTotalHits().value, equalTo(10L)); + assertThat(hits.getHits().length, equalTo(1)); + + SearchHit hit = hits.getAt(0); + HighlightField highlightField = hit.getHighlightFields().get("text"); + assertThat(highlightField.fragments().length, equalTo(1)); + assertThat(highlightField.fragments()[0].string(), equalTo("some text to entertain")); + + long version = hit.getVersion(); + assertThat(version, equalTo(1L)); + + if (seqNoAndTerm) { + assertThat(hit.getSeqNo(), greaterThanOrEqualTo(0L)); + assertThat(hit.getPrimaryTerm(), greaterThanOrEqualTo(1L)); + } else { + assertThat(hit.getSeqNo(), equalTo(SequenceNumbers.UNASSIGNED_SEQ_NO)); + assertThat(hit.getPrimaryTerm(), equalTo(SequenceNumbers.UNASSIGNED_PRIMARY_TERM)); + } + + assertThat(hit.getMatchedQueries()[0], equalTo("test")); + + DocumentField field1 = hit.field("field1"); + assertThat(field1.getValue(), equalTo(5L)); + + DocumentField field2 = hit.field("field2"); + assertThat(field2.getValue(), equalTo(2.71f)); + + assertThat(hit.getSourceAsMap().get("text").toString(), equalTo("some text to entertain")); + + field2 = hit.field("script"); + assertThat(field2.getValue().toString(), equalTo("5")); + + assertThat(hit.getSourceAsMap().size(), equalTo(1)); + assertThat(hit.getSourceAsMap().get("text").toString(), equalTo("some text to entertain")); + } } - - assertThat(hit.getMatchedQueries()[0], equalTo("test")); - - DocumentField field1 = hit.field("field1"); - assertThat(field1.getValue(), equalTo(5L)); - - DocumentField field2 = hit.field("field2"); - assertThat(field2.getValue(), equalTo(2.71f)); - - assertThat(hit.getSourceAsMap().get("text").toString(), equalTo("some text to entertain")); - - field2 = hit.field("script"); - assertThat(field2.getValue().toString(), equalTo("5")); - - assertThat(hit.getSourceAsMap().size(), equalTo(1)); - assertThat(hit.getSourceAsMap().get("text").toString(), equalTo("some text to entertain")); - } + ); } public void testInvalidSortField() throws Exception { @@ -679,194 +686,197 @@ public void testInvalidSortField() throws Exception { } public void testEmptyIndex() throws Exception { - SearchResponse response = prepareSearch("empty").addAggregation(topHits("hits")).get(); - assertNoFailures(response); - - TopHits hits = response.getAggregations().get("hits"); - assertThat(hits, notNullValue()); - assertThat(hits.getName(), equalTo("hits")); - assertThat(hits.getHits().getTotalHits().value, equalTo(0L)); + assertNoFailuresAndResponse(prepareSearch("empty").addAggregation(topHits("hits")), response -> { + TopHits hits = response.getAggregations().get("hits"); + assertThat(hits, notNullValue()); + assertThat(hits.getName(), equalTo("hits")); + assertThat(hits.getHits().getTotalHits().value, equalTo(0L)); + }); } public void testTrackScores() throws Exception { boolean[] trackScores = new boolean[] { true, false }; for (boolean trackScore : trackScores) { logger.info("Track score={}", trackScore); - SearchResponse response = prepareSearch("field-collapsing").setQuery(matchQuery("text", "term rare")) - .addAggregation( - terms("terms").field("group") - .subAggregation(topHits("hits").trackScores(trackScore).size(1).sort("_index", SortOrder.DESC)) - ) - .get(); - assertNoFailures(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(3)); - - Terms.Bucket bucket = terms.getBucketByKey("a"); - assertThat(key(bucket), equalTo("a")); - TopHits topHits = bucket.getAggregations().get("hits"); - SearchHits hits = topHits.getHits(); - assertThat(hits.getMaxScore(), trackScore ? not(equalTo(Float.NaN)) : equalTo(Float.NaN)); - assertThat(hits.getAt(0).getScore(), trackScore ? not(equalTo(Float.NaN)) : equalTo(Float.NaN)); - - bucket = terms.getBucketByKey("b"); - assertThat(key(bucket), equalTo("b")); - topHits = bucket.getAggregations().get("hits"); - hits = topHits.getHits(); - assertThat(hits.getMaxScore(), trackScore ? not(equalTo(Float.NaN)) : equalTo(Float.NaN)); - assertThat(hits.getAt(0).getScore(), trackScore ? not(equalTo(Float.NaN)) : equalTo(Float.NaN)); - - bucket = terms.getBucketByKey("c"); - assertThat(key(bucket), equalTo("c")); - topHits = bucket.getAggregations().get("hits"); - hits = topHits.getHits(); - assertThat(hits.getMaxScore(), trackScore ? not(equalTo(Float.NaN)) : equalTo(Float.NaN)); - assertThat(hits.getAt(0).getScore(), trackScore ? not(equalTo(Float.NaN)) : equalTo(Float.NaN)); + assertNoFailuresAndResponse( + prepareSearch("field-collapsing").setQuery(matchQuery("text", "term rare")) + .addAggregation( + terms("terms").field("group") + .subAggregation(topHits("hits").trackScores(trackScore).size(1).sort("_index", SortOrder.DESC)) + ), + response -> { + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(3)); + + Terms.Bucket bucket = terms.getBucketByKey("a"); + assertThat(key(bucket), equalTo("a")); + TopHits topHits = bucket.getAggregations().get("hits"); + SearchHits hits = topHits.getHits(); + assertThat(hits.getMaxScore(), trackScore ? not(equalTo(Float.NaN)) : equalTo(Float.NaN)); + assertThat(hits.getAt(0).getScore(), trackScore ? not(equalTo(Float.NaN)) : equalTo(Float.NaN)); + + bucket = terms.getBucketByKey("b"); + assertThat(key(bucket), equalTo("b")); + topHits = bucket.getAggregations().get("hits"); + hits = topHits.getHits(); + assertThat(hits.getMaxScore(), trackScore ? not(equalTo(Float.NaN)) : equalTo(Float.NaN)); + assertThat(hits.getAt(0).getScore(), trackScore ? not(equalTo(Float.NaN)) : equalTo(Float.NaN)); + + bucket = terms.getBucketByKey("c"); + assertThat(key(bucket), equalTo("c")); + topHits = bucket.getAggregations().get("hits"); + hits = topHits.getHits(); + assertThat(hits.getMaxScore(), trackScore ? not(equalTo(Float.NaN)) : equalTo(Float.NaN)); + assertThat(hits.getAt(0).getScore(), trackScore ? not(equalTo(Float.NaN)) : equalTo(Float.NaN)); + } + ); } } public void testTopHitsInNestedSimple() throws Exception { - SearchResponse searchResponse = prepareSearch("articles").setQuery(matchQuery("title", "title")) - .addAggregation( - nested("to-comments", "comments").subAggregation( - terms("users").field("comments.user").subAggregation(topHits("top-comments").sort("comments.date", SortOrder.ASC)) - ) - ) - .get(); - - Nested nested = searchResponse.getAggregations().get("to-comments"); - assertThat(nested.getDocCount(), equalTo(4L)); - - Terms terms = nested.getAggregations().get("users"); - Terms.Bucket bucket = terms.getBucketByKey("a"); - assertThat(bucket.getDocCount(), equalTo(1L)); - TopHits topHits = bucket.getAggregations().get("top-comments"); - SearchHits searchHits = topHits.getHits(); - assertThat(searchHits.getTotalHits().value, equalTo(1L)); - assertThat(searchHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(searchHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(extractValue("date", searchHits.getAt(0).getSourceAsMap()), equalTo(1)); - - bucket = terms.getBucketByKey("b"); - assertThat(bucket.getDocCount(), equalTo(2L)); - topHits = bucket.getAggregations().get("top-comments"); - searchHits = topHits.getHits(); - assertThat(searchHits.getTotalHits().value, equalTo(2L)); - assertThat(searchHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(searchHits.getAt(0).getNestedIdentity().getOffset(), equalTo(1)); - assertThat(extractValue("date", searchHits.getAt(0).getSourceAsMap()), equalTo(2)); - assertThat(searchHits.getAt(1).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(searchHits.getAt(1).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(extractValue("date", searchHits.getAt(1).getSourceAsMap()), equalTo(3)); - - bucket = terms.getBucketByKey("c"); - assertThat(bucket.getDocCount(), equalTo(1L)); - topHits = bucket.getAggregations().get("top-comments"); - searchHits = topHits.getHits(); - assertThat(searchHits.getTotalHits().value, equalTo(1L)); - assertThat(searchHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(searchHits.getAt(0).getNestedIdentity().getOffset(), equalTo(1)); - assertThat(extractValue("date", searchHits.getAt(0).getSourceAsMap()), equalTo(4)); + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery(matchQuery("title", "title")) + .addAggregation( + nested("to-comments", "comments").subAggregation( + terms("users").field("comments.user").subAggregation(topHits("top-comments").sort("comments.date", SortOrder.ASC)) + ) + ), + response -> { + Nested nested = response.getAggregations().get("to-comments"); + assertThat(nested.getDocCount(), equalTo(4L)); + + Terms terms = nested.getAggregations().get("users"); + Terms.Bucket bucket = terms.getBucketByKey("a"); + assertThat(bucket.getDocCount(), equalTo(1L)); + TopHits topHits = bucket.getAggregations().get("top-comments"); + SearchHits searchHits = topHits.getHits(); + assertThat(searchHits.getTotalHits().value, equalTo(1L)); + assertThat(searchHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(searchHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(extractValue("date", searchHits.getAt(0).getSourceAsMap()), equalTo(1)); + + bucket = terms.getBucketByKey("b"); + assertThat(bucket.getDocCount(), equalTo(2L)); + topHits = bucket.getAggregations().get("top-comments"); + searchHits = topHits.getHits(); + assertThat(searchHits.getTotalHits().value, equalTo(2L)); + assertThat(searchHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(searchHits.getAt(0).getNestedIdentity().getOffset(), equalTo(1)); + assertThat(extractValue("date", searchHits.getAt(0).getSourceAsMap()), equalTo(2)); + assertThat(searchHits.getAt(1).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(searchHits.getAt(1).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(extractValue("date", searchHits.getAt(1).getSourceAsMap()), equalTo(3)); + + bucket = terms.getBucketByKey("c"); + assertThat(bucket.getDocCount(), equalTo(1L)); + topHits = bucket.getAggregations().get("top-comments"); + searchHits = topHits.getHits(); + assertThat(searchHits.getTotalHits().value, equalTo(1L)); + assertThat(searchHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(searchHits.getAt(0).getNestedIdentity().getOffset(), equalTo(1)); + assertThat(extractValue("date", searchHits.getAt(0).getSourceAsMap()), equalTo(4)); + } + ); } public void testTopHitsInSecondLayerNested() throws Exception { - SearchResponse searchResponse = prepareSearch("articles").setQuery(matchQuery("title", "title")) - .addAggregation( - nested("to-comments", "comments").subAggregation( - nested("to-reviewers", "comments.reviewers").subAggregation( - // Also need to sort on _doc because there are two reviewers with the same name - topHits("top-reviewers").sort("comments.reviewers.name", SortOrder.ASC).sort("_doc", SortOrder.DESC).size(7) - ) - ).subAggregation(topHits("top-comments").sort("comments.date", SortOrder.DESC).size(4)) - ) - .get(); - assertNoFailures(searchResponse); - - Nested toComments = searchResponse.getAggregations().get("to-comments"); - assertThat(toComments.getDocCount(), equalTo(4L)); - - TopHits topComments = toComments.getAggregations().get("top-comments"); - assertThat(topComments.getHits().getTotalHits().value, equalTo(4L)); - assertThat(topComments.getHits().getHits().length, equalTo(4)); - - assertThat(topComments.getHits().getAt(0).getId(), equalTo("2")); - assertThat(topComments.getHits().getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(topComments.getHits().getAt(0).getNestedIdentity().getOffset(), equalTo(1)); - assertThat(topComments.getHits().getAt(0).getNestedIdentity().getChild(), nullValue()); - - assertThat(topComments.getHits().getAt(1).getId(), equalTo("2")); - assertThat(topComments.getHits().getAt(1).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(topComments.getHits().getAt(1).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(topComments.getHits().getAt(1).getNestedIdentity().getChild(), nullValue()); - - assertThat(topComments.getHits().getAt(2).getId(), equalTo("1")); - assertThat(topComments.getHits().getAt(2).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(topComments.getHits().getAt(2).getNestedIdentity().getOffset(), equalTo(1)); - assertThat(topComments.getHits().getAt(2).getNestedIdentity().getChild(), nullValue()); - - assertThat(topComments.getHits().getAt(3).getId(), equalTo("1")); - assertThat(topComments.getHits().getAt(3).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(topComments.getHits().getAt(3).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(topComments.getHits().getAt(3).getNestedIdentity().getChild(), nullValue()); - - Nested toReviewers = toComments.getAggregations().get("to-reviewers"); - assertThat(toReviewers.getDocCount(), equalTo(7L)); - - TopHits topReviewers = toReviewers.getAggregations().get("top-reviewers"); - assertThat(topReviewers.getHits().getTotalHits().value, equalTo(7L)); - assertThat(topReviewers.getHits().getHits().length, equalTo(7)); - - assertThat(topReviewers.getHits().getAt(0).getId(), equalTo("1")); - assertThat(extractValue("name", topReviewers.getHits().getAt(0).getSourceAsMap()), equalTo("user a")); - assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("reviewers")); - assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0)); - - assertThat(topReviewers.getHits().getAt(1).getId(), equalTo("1")); - assertThat(extractValue("name", topReviewers.getHits().getAt(1).getSourceAsMap()), equalTo("user b")); - assertThat(topReviewers.getHits().getAt(1).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(topReviewers.getHits().getAt(1).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(topReviewers.getHits().getAt(1).getNestedIdentity().getChild().getField().string(), equalTo("reviewers")); - assertThat(topReviewers.getHits().getAt(1).getNestedIdentity().getChild().getOffset(), equalTo(1)); - - assertThat(topReviewers.getHits().getAt(2).getId(), equalTo("1")); - assertThat(extractValue("name", topReviewers.getHits().getAt(2).getSourceAsMap()), equalTo("user c")); - assertThat(topReviewers.getHits().getAt(2).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(topReviewers.getHits().getAt(2).getNestedIdentity().getOffset(), equalTo(1)); - assertThat(topReviewers.getHits().getAt(2).getNestedIdentity().getChild().getField().string(), equalTo("reviewers")); - assertThat(topReviewers.getHits().getAt(2).getNestedIdentity().getChild().getOffset(), equalTo(0)); - - assertThat(topReviewers.getHits().getAt(3).getId(), equalTo("1")); - assertThat(extractValue("name", topReviewers.getHits().getAt(3).getSourceAsMap()), equalTo("user c")); - assertThat(topReviewers.getHits().getAt(3).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(topReviewers.getHits().getAt(3).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(topReviewers.getHits().getAt(3).getNestedIdentity().getChild().getField().string(), equalTo("reviewers")); - assertThat(topReviewers.getHits().getAt(3).getNestedIdentity().getChild().getOffset(), equalTo(2)); - - assertThat(topReviewers.getHits().getAt(4).getId(), equalTo("1")); - assertThat(extractValue("name", topReviewers.getHits().getAt(4).getSourceAsMap()), equalTo("user d")); - assertThat(topReviewers.getHits().getAt(4).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(topReviewers.getHits().getAt(4).getNestedIdentity().getOffset(), equalTo(1)); - assertThat(topReviewers.getHits().getAt(4).getNestedIdentity().getChild().getField().string(), equalTo("reviewers")); - assertThat(topReviewers.getHits().getAt(4).getNestedIdentity().getChild().getOffset(), equalTo(1)); - - assertThat(topReviewers.getHits().getAt(5).getId(), equalTo("1")); - assertThat(extractValue("name", topReviewers.getHits().getAt(5).getSourceAsMap()), equalTo("user e")); - assertThat(topReviewers.getHits().getAt(5).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(topReviewers.getHits().getAt(5).getNestedIdentity().getOffset(), equalTo(1)); - assertThat(topReviewers.getHits().getAt(5).getNestedIdentity().getChild().getField().string(), equalTo("reviewers")); - assertThat(topReviewers.getHits().getAt(5).getNestedIdentity().getChild().getOffset(), equalTo(2)); - - assertThat(topReviewers.getHits().getAt(6).getId(), equalTo("2")); - assertThat(extractValue("name", topReviewers.getHits().getAt(6).getSourceAsMap()), equalTo("user f")); - assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("reviewers")); - assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0)); + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery(matchQuery("title", "title")) + .addAggregation( + nested("to-comments", "comments").subAggregation( + nested("to-reviewers", "comments.reviewers").subAggregation( + // Also need to sort on _doc because there are two reviewers with the same name + topHits("top-reviewers").sort("comments.reviewers.name", SortOrder.ASC).sort("_doc", SortOrder.DESC).size(7) + ) + ).subAggregation(topHits("top-comments").sort("comments.date", SortOrder.DESC).size(4)) + ), + response -> { + Nested toComments = response.getAggregations().get("to-comments"); + assertThat(toComments.getDocCount(), equalTo(4L)); + + TopHits topComments = toComments.getAggregations().get("top-comments"); + assertThat(topComments.getHits().getTotalHits().value, equalTo(4L)); + assertThat(topComments.getHits().getHits().length, equalTo(4)); + + assertThat(topComments.getHits().getAt(0).getId(), equalTo("2")); + assertThat(topComments.getHits().getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(topComments.getHits().getAt(0).getNestedIdentity().getOffset(), equalTo(1)); + assertThat(topComments.getHits().getAt(0).getNestedIdentity().getChild(), nullValue()); + + assertThat(topComments.getHits().getAt(1).getId(), equalTo("2")); + assertThat(topComments.getHits().getAt(1).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(topComments.getHits().getAt(1).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(topComments.getHits().getAt(1).getNestedIdentity().getChild(), nullValue()); + + assertThat(topComments.getHits().getAt(2).getId(), equalTo("1")); + assertThat(topComments.getHits().getAt(2).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(topComments.getHits().getAt(2).getNestedIdentity().getOffset(), equalTo(1)); + assertThat(topComments.getHits().getAt(2).getNestedIdentity().getChild(), nullValue()); + + assertThat(topComments.getHits().getAt(3).getId(), equalTo("1")); + assertThat(topComments.getHits().getAt(3).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(topComments.getHits().getAt(3).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(topComments.getHits().getAt(3).getNestedIdentity().getChild(), nullValue()); + + Nested toReviewers = toComments.getAggregations().get("to-reviewers"); + assertThat(toReviewers.getDocCount(), equalTo(7L)); + + TopHits topReviewers = toReviewers.getAggregations().get("top-reviewers"); + assertThat(topReviewers.getHits().getTotalHits().value, equalTo(7L)); + assertThat(topReviewers.getHits().getHits().length, equalTo(7)); + + assertThat(topReviewers.getHits().getAt(0).getId(), equalTo("1")); + assertThat(extractValue("name", topReviewers.getHits().getAt(0).getSourceAsMap()), equalTo("user a")); + assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("reviewers")); + assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0)); + + assertThat(topReviewers.getHits().getAt(1).getId(), equalTo("1")); + assertThat(extractValue("name", topReviewers.getHits().getAt(1).getSourceAsMap()), equalTo("user b")); + assertThat(topReviewers.getHits().getAt(1).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(topReviewers.getHits().getAt(1).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(topReviewers.getHits().getAt(1).getNestedIdentity().getChild().getField().string(), equalTo("reviewers")); + assertThat(topReviewers.getHits().getAt(1).getNestedIdentity().getChild().getOffset(), equalTo(1)); + + assertThat(topReviewers.getHits().getAt(2).getId(), equalTo("1")); + assertThat(extractValue("name", topReviewers.getHits().getAt(2).getSourceAsMap()), equalTo("user c")); + assertThat(topReviewers.getHits().getAt(2).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(topReviewers.getHits().getAt(2).getNestedIdentity().getOffset(), equalTo(1)); + assertThat(topReviewers.getHits().getAt(2).getNestedIdentity().getChild().getField().string(), equalTo("reviewers")); + assertThat(topReviewers.getHits().getAt(2).getNestedIdentity().getChild().getOffset(), equalTo(0)); + + assertThat(topReviewers.getHits().getAt(3).getId(), equalTo("1")); + assertThat(extractValue("name", topReviewers.getHits().getAt(3).getSourceAsMap()), equalTo("user c")); + assertThat(topReviewers.getHits().getAt(3).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(topReviewers.getHits().getAt(3).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(topReviewers.getHits().getAt(3).getNestedIdentity().getChild().getField().string(), equalTo("reviewers")); + assertThat(topReviewers.getHits().getAt(3).getNestedIdentity().getChild().getOffset(), equalTo(2)); + + assertThat(topReviewers.getHits().getAt(4).getId(), equalTo("1")); + assertThat(extractValue("name", topReviewers.getHits().getAt(4).getSourceAsMap()), equalTo("user d")); + assertThat(topReviewers.getHits().getAt(4).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(topReviewers.getHits().getAt(4).getNestedIdentity().getOffset(), equalTo(1)); + assertThat(topReviewers.getHits().getAt(4).getNestedIdentity().getChild().getField().string(), equalTo("reviewers")); + assertThat(topReviewers.getHits().getAt(4).getNestedIdentity().getChild().getOffset(), equalTo(1)); + + assertThat(topReviewers.getHits().getAt(5).getId(), equalTo("1")); + assertThat(extractValue("name", topReviewers.getHits().getAt(5).getSourceAsMap()), equalTo("user e")); + assertThat(topReviewers.getHits().getAt(5).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(topReviewers.getHits().getAt(5).getNestedIdentity().getOffset(), equalTo(1)); + assertThat(topReviewers.getHits().getAt(5).getNestedIdentity().getChild().getField().string(), equalTo("reviewers")); + assertThat(topReviewers.getHits().getAt(5).getNestedIdentity().getChild().getOffset(), equalTo(2)); + + assertThat(topReviewers.getHits().getAt(6).getId(), equalTo("2")); + assertThat(extractValue("name", topReviewers.getHits().getAt(6).getSourceAsMap()), equalTo("user f")); + assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("reviewers")); + assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0)); + } + ); } public void testNestedFetchFeatures() { @@ -875,96 +885,102 @@ public void testNestedFetchFeatures() { matchQuery("comments.message", "comment") ).highlighterType(hlType); - SearchResponse searchResponse = prepareSearch("articles").setQuery( - nestedQuery("comments", matchQuery("comments.message", "comment").queryName("test"), ScoreMode.Avg) - ) - .addAggregation( - nested("to-comments", "comments").subAggregation( - topHits("top-comments").size(1) - .highlighter(new HighlightBuilder().field(hlField)) - .explain(true) - .docValueField("comments.user") - .scriptField("script", new Script(ScriptType.INLINE, MockScriptEngine.NAME, "5", Collections.emptyMap())) - .fetchSource("comments.message", null) - .version(true) - .sort("comments.date", SortOrder.ASC) - ) + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery("comments", matchQuery("comments.message", "comment").queryName("test"), ScoreMode.Avg) ) - .get(); - assertHitCount(searchResponse, 2); - Nested nested = searchResponse.getAggregations().get("to-comments"); - assertThat(nested.getDocCount(), equalTo(4L)); - - SearchHits hits = ((TopHits) nested.getAggregations().get("top-comments")).getHits(); - assertThat(hits.getTotalHits().value, equalTo(4L)); - SearchHit searchHit = hits.getAt(0); - assertThat(searchHit.getId(), equalTo("1")); - assertThat(searchHit.getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(searchHit.getNestedIdentity().getOffset(), equalTo(0)); - - HighlightField highlightField = searchHit.getHighlightFields().get("comments.message"); - assertThat(highlightField.getFragments().length, equalTo(1)); - assertThat(highlightField.getFragments()[0].string(), equalTo("some comment")); - - // Can't explain nested hit with the main query, since both are in a different scopes, also the nested doc may not - // even have matched with the main query. - // If top_hits would have a query option then we can explain that query - Explanation explanation = searchHit.getExplanation(); - assertFalse(explanation.isMatch()); - - // Returns the version of the root document. Nested docs don't have a separate version - long version = searchHit.getVersion(); - assertThat(version, equalTo(1L)); - - assertThat(searchHit.getMatchedQueries(), arrayContaining("test")); - - DocumentField field = searchHit.field("comments.user"); - assertThat(field.getValue().toString(), equalTo("a")); - - field = searchHit.field("script"); - assertThat(field.getValue().toString(), equalTo("5")); - - assertThat(searchHit.getSourceAsMap().size(), equalTo(1)); - assertThat(extractValue("message", searchHit.getSourceAsMap()), equalTo("some comment")); + .addAggregation( + nested("to-comments", "comments").subAggregation( + topHits("top-comments").size(1) + .highlighter(new HighlightBuilder().field(hlField)) + .explain(true) + .docValueField("comments.user") + .scriptField("script", new Script(ScriptType.INLINE, MockScriptEngine.NAME, "5", Collections.emptyMap())) + .fetchSource("comments.message", null) + .version(true) + .sort("comments.date", SortOrder.ASC) + ) + ), + response -> { + assertHitCount(response, 2); + Nested nested = response.getAggregations().get("to-comments"); + assertThat(nested.getDocCount(), equalTo(4L)); + + SearchHits hits = ((TopHits) nested.getAggregations().get("top-comments")).getHits(); + assertThat(hits.getTotalHits().value, equalTo(4L)); + SearchHit searchHit = hits.getAt(0); + assertThat(searchHit.getId(), equalTo("1")); + assertThat(searchHit.getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(searchHit.getNestedIdentity().getOffset(), equalTo(0)); + + HighlightField highlightField = searchHit.getHighlightFields().get("comments.message"); + assertThat(highlightField.fragments().length, equalTo(1)); + assertThat(highlightField.fragments()[0].string(), equalTo("some comment")); + + // Can't explain nested hit with the main query, since both are in a different scopes, also the nested doc may not + // even have matched with the main query. + // If top_hits would have a query option then we can explain that query + Explanation explanation = searchHit.getExplanation(); + assertFalse(explanation.isMatch()); + + // Returns the version of the root document. Nested docs don't have a separate version + long version = searchHit.getVersion(); + assertThat(version, equalTo(1L)); + + assertThat(searchHit.getMatchedQueries(), arrayContaining("test")); + + DocumentField field = searchHit.field("comments.user"); + assertThat(field.getValue().toString(), equalTo("a")); + + field = searchHit.field("script"); + assertThat(field.getValue().toString(), equalTo("5")); + + assertThat(searchHit.getSourceAsMap().size(), equalTo(1)); + assertThat(extractValue("message", searchHit.getSourceAsMap()), equalTo("some comment")); + } + ); } public void testTopHitsInNested() throws Exception { - SearchResponse searchResponse = prepareSearch("articles").addAggregation( - histogram("dates").field("date") - .interval(5) - .subAggregation( - nested("to-comments", "comments").subAggregation( - topHits("comments").highlighter( - new HighlightBuilder().field( - new HighlightBuilder.Field("comments.message").highlightQuery(matchQuery("comments.message", "text")) - ) - ).sort("comments.id", SortOrder.ASC) + assertNoFailuresAndResponse( + prepareSearch("articles").addAggregation( + histogram("dates").field("date") + .interval(5) + .subAggregation( + nested("to-comments", "comments").subAggregation( + topHits("comments").highlighter( + new HighlightBuilder().field( + new HighlightBuilder.Field("comments.message").highlightQuery(matchQuery("comments.message", "text")) + ) + ).sort("comments.id", SortOrder.ASC) + ) ) - ) - ).get(); - - Histogram histogram = searchResponse.getAggregations().get("dates"); - for (int i = 0; i < numArticles; i += 5) { - Histogram.Bucket bucket = histogram.getBuckets().get(i / 5); - assertThat(bucket.getDocCount(), equalTo(5L)); - - long numNestedDocs = 10 + (5 * i); - Nested nested = bucket.getAggregations().get("to-comments"); - assertThat(nested.getDocCount(), equalTo(numNestedDocs)); - - TopHits hits = nested.getAggregations().get("comments"); - SearchHits searchHits = hits.getHits(); - assertThat(searchHits.getTotalHits().value, equalTo(numNestedDocs)); - for (int j = 0; j < 3; j++) { - assertThat(searchHits.getAt(j).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(searchHits.getAt(j).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(extractValue("id", searchHits.getAt(j).getSourceAsMap()), equalTo(0)); - - HighlightField highlightField = searchHits.getAt(j).getHighlightFields().get("comments.message"); - assertThat(highlightField.getFragments().length, equalTo(1)); - assertThat(highlightField.getFragments()[0].string(), equalTo("some text")); + ), + response -> { + Histogram histogram = response.getAggregations().get("dates"); + for (int i = 0; i < numArticles; i += 5) { + Histogram.Bucket bucket = histogram.getBuckets().get(i / 5); + assertThat(bucket.getDocCount(), equalTo(5L)); + + long numNestedDocs = 10 + (5 * i); + Nested nested = bucket.getAggregations().get("to-comments"); + assertThat(nested.getDocCount(), equalTo(numNestedDocs)); + + TopHits hits = nested.getAggregations().get("comments"); + SearchHits searchHits = hits.getHits(); + assertThat(searchHits.getTotalHits().value, equalTo(numNestedDocs)); + for (int j = 0; j < 3; j++) { + assertThat(searchHits.getAt(j).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(searchHits.getAt(j).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(extractValue("id", searchHits.getAt(j).getSourceAsMap()), equalTo(0)); + + HighlightField highlightField = searchHits.getAt(j).getHighlightFields().get("comments.message"); + assertThat(highlightField.fragments().length, equalTo(1)); + assertThat(highlightField.fragments()[0].string(), equalTo("some text")); + } + } } - } + ); } public void testUseMaxDocInsteadOfSize() throws Exception { @@ -1037,33 +1053,34 @@ public void testTooHighResultWindow() throws Exception { } public void testNoStoredFields() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(TERMS_AGGS_FIELD) - .subAggregation(topHits("hits").storedField("_none_")) - ).get(); - - assertNoFailures(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - Terms.Bucket bucket = terms.getBucketByKey("val" + i); - assertThat(bucket, notNullValue()); - assertThat(key(bucket), equalTo("val" + i)); - assertThat(bucket.getDocCount(), equalTo(10L)); - TopHits topHits = bucket.getAggregations().get("hits"); - SearchHits hits = topHits.getHits(); - assertThat(hits.getTotalHits().value, equalTo(10L)); - assertThat(hits.getHits().length, equalTo(3)); - for (SearchHit hit : hits) { - assertThat(hit.getSourceAsMap(), nullValue()); - assertThat(hit.getId(), nullValue()); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(TERMS_AGGS_FIELD) + .subAggregation(topHits("hits").storedField("_none_")) + ), + response -> { + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + Terms.Bucket bucket = terms.getBucketByKey("val" + i); + assertThat(bucket, notNullValue()); + assertThat(key(bucket), equalTo("val" + i)); + assertThat(bucket.getDocCount(), equalTo(10L)); + TopHits topHits = bucket.getAggregations().get("hits"); + SearchHits hits = topHits.getHits(); + assertThat(hits.getTotalHits().value, equalTo(10L)); + assertThat(hits.getHits().length, equalTo(3)); + for (SearchHit hit : hits) { + assertThat(hit.getSourceAsMap(), nullValue()); + assertThat(hit.getId(), nullValue()); + } + } } - } + ); } /** @@ -1095,15 +1112,15 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a nondeterministic script field does not get cached - SearchResponse r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - topHits("foo").scriptField( - "bar", - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", Collections.emptyMap()) + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + topHits("foo").scriptField( + "bar", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", Collections.emptyMap()) + ) ) - ) - .get(); - assertNoFailures(r); + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -1115,17 +1132,17 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a nondeterministic script sort does not get cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - topHits("foo").sort( - SortBuilders.scriptSort( - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", Collections.emptyMap()), - ScriptSortType.STRING + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + topHits("foo").sort( + SortBuilders.scriptSort( + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", Collections.emptyMap()), + ScriptSortType.STRING + ) ) ) - ) - .get(); - assertNoFailures(r); + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -1137,12 +1154,15 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a deterministic script field does not get cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - topHits("foo").scriptField("bar", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "5", Collections.emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + topHits("foo").scriptField( + "bar", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "5", Collections.emptyMap()) + ) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -1154,17 +1174,17 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a deterministic script sort does not get cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - topHits("foo").sort( - SortBuilders.scriptSort( - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "5", Collections.emptyMap()), - ScriptSortType.STRING + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + topHits("foo").sort( + SortBuilders.scriptSort( + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "5", Collections.emptyMap()), + ScriptSortType.STRING + ) ) ) - ) - .get(); - assertNoFailures(r); + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -1176,8 +1196,7 @@ public void testScriptCaching() throws Exception { ); // Ensure that non-scripted requests are cached as normal - r = prepareSearch("cache_test_idx").setSize(0).addAggregation(topHits("foo")).get(); - assertNoFailures(r); + assertNoFailures(prepareSearch("cache_test_idx").setSize(0).addAggregation(topHits("foo"))); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -1194,62 +1213,66 @@ public void testScriptCaching() throws Exception { public void testWithRescore() { // Rescore with default sort on relevancy (score) - { - SearchResponse response = prepareSearch("idx").addRescorer(new QueryRescorerBuilder(new MatchAllQueryBuilder().boost(3.0f))) - .addAggregation(terms("terms").field(TERMS_AGGS_FIELD).subAggregation(topHits("hits"))) - .get(); - Terms terms = response.getAggregations().get("terms"); - for (Terms.Bucket bucket : terms.getBuckets()) { - TopHits topHits = bucket.getAggregations().get("hits"); - for (SearchHit hit : topHits.getHits().getHits()) { - assertThat(hit.getScore(), equalTo(4.0f)); + assertNoFailuresAndResponse( + prepareSearch("idx").addRescorer(new QueryRescorerBuilder(new MatchAllQueryBuilder().boost(3.0f))) + .addAggregation(terms("terms").field(TERMS_AGGS_FIELD).subAggregation(topHits("hits"))), + response -> { + Terms terms = response.getAggregations().get("terms"); + for (Terms.Bucket bucket : terms.getBuckets()) { + TopHits topHits = bucket.getAggregations().get("hits"); + for (SearchHit hit : topHits.getHits().getHits()) { + assertThat(hit.getScore(), equalTo(4.0f)); + } } } - } + ); - { - SearchResponse response = prepareSearch("idx").addRescorer(new QueryRescorerBuilder(new MatchAllQueryBuilder().boost(3.0f))) - .addAggregation(terms("terms").field(TERMS_AGGS_FIELD).subAggregation(topHits("hits").sort(SortBuilders.scoreSort()))) - .get(); - Terms terms = response.getAggregations().get("terms"); - for (Terms.Bucket bucket : terms.getBuckets()) { - TopHits topHits = bucket.getAggregations().get("hits"); - for (SearchHit hit : topHits.getHits().getHits()) { - assertThat(hit.getScore(), equalTo(4.0f)); + assertNoFailuresAndResponse( + prepareSearch("idx").addRescorer(new QueryRescorerBuilder(new MatchAllQueryBuilder().boost(3.0f))) + .addAggregation(terms("terms").field(TERMS_AGGS_FIELD).subAggregation(topHits("hits").sort(SortBuilders.scoreSort()))), + response -> { + Terms terms = response.getAggregations().get("terms"); + for (Terms.Bucket bucket : terms.getBuckets()) { + TopHits topHits = bucket.getAggregations().get("hits"); + for (SearchHit hit : topHits.getHits().getHits()) { + assertThat(hit.getScore(), equalTo(4.0f)); + } } } - } + ); // Rescore should not be applied if the sort order is not relevancy - { - SearchResponse response = prepareSearch("idx").addRescorer(new QueryRescorerBuilder(new MatchAllQueryBuilder().boost(3.0f))) + assertNoFailuresAndResponse( + prepareSearch("idx").addRescorer(new QueryRescorerBuilder(new MatchAllQueryBuilder().boost(3.0f))) .addAggregation( terms("terms").field(TERMS_AGGS_FIELD).subAggregation(topHits("hits").sort(SortBuilders.fieldSort("_index"))) - ) - .get(); - Terms terms = response.getAggregations().get("terms"); - for (Terms.Bucket bucket : terms.getBuckets()) { - TopHits topHits = bucket.getAggregations().get("hits"); - for (SearchHit hit : topHits.getHits().getHits()) { - assertThat(hit.getScore(), equalTo(Float.NaN)); + ), + response -> { + Terms terms = response.getAggregations().get("terms"); + for (Terms.Bucket bucket : terms.getBuckets()) { + TopHits topHits = bucket.getAggregations().get("hits"); + for (SearchHit hit : topHits.getHits().getHits()) { + assertThat(hit.getScore(), equalTo(Float.NaN)); + } } } - } + ); - { - SearchResponse response = prepareSearch("idx").addRescorer(new QueryRescorerBuilder(new MatchAllQueryBuilder().boost(3.0f))) + assertNoFailuresAndResponse( + prepareSearch("idx").addRescorer(new QueryRescorerBuilder(new MatchAllQueryBuilder().boost(3.0f))) .addAggregation( terms("terms").field(TERMS_AGGS_FIELD) .subAggregation(topHits("hits").sort(SortBuilders.scoreSort()).sort(SortBuilders.fieldSort("_index"))) - ) - .get(); - Terms terms = response.getAggregations().get("terms"); - for (Terms.Bucket bucket : terms.getBuckets()) { - TopHits topHits = bucket.getAggregations().get("hits"); - for (SearchHit hit : topHits.getHits().getHits()) { - assertThat(hit.getScore(), equalTo(Float.NaN)); + ), + response -> { + Terms terms = response.getAggregations().get("terms"); + for (Terms.Bucket bucket : terms.getBuckets()) { + TopHits topHits = bucket.getAggregations().get("hits"); + for (SearchHit hit : topHits.getHits().getHits()) { + assertThat(hit.getScore(), equalTo(Float.NaN)); + } } } - } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java index d122ee10d90a5..2b60456e2b2ba 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java @@ -7,7 +7,6 @@ */ package org.elasticsearch.search.aggregations.metrics; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; @@ -38,6 +37,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; @@ -67,132 +67,147 @@ protected Collection> nodePlugins() { } public void testUnmapped() throws Exception { - SearchResponse searchResponse = prepareSearch("idx_unmapped").setQuery(matchAllQuery()) - .addAggregation(count("count").field("value")) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); - - ValueCount valueCount = searchResponse.getAggregations().get("count"); - assertThat(valueCount, notNullValue()); - assertThat(valueCount.getName(), equalTo("count")); - assertThat(valueCount.getValue(), equalTo(0L)); + assertResponse(prepareSearch("idx_unmapped").setQuery(matchAllQuery()).addAggregation(count("count").field("value")), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(0L)); + + ValueCount valueCount = response.getAggregations().get("count"); + assertThat(valueCount, notNullValue()); + assertThat(valueCount.getName(), equalTo("count")); + assertThat(valueCount.getValue(), equalTo(0L)); + }); } public void testSingleValuedField() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(count("count").field("value")).get(); - - assertHitCount(searchResponse, 10); - - ValueCount valueCount = searchResponse.getAggregations().get("count"); - assertThat(valueCount, notNullValue()); - assertThat(valueCount.getName(), equalTo("count")); - assertThat(valueCount.getValue(), equalTo(10L)); + assertResponse(prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(count("count").field("value")), response -> { + assertHitCount(response, 10); + + ValueCount valueCount = response.getAggregations().get("count"); + assertThat(valueCount, notNullValue()); + assertThat(valueCount.getName(), equalTo("count")); + assertThat(valueCount.getValue(), equalTo(10L)); + }); } public void testSingleValuedFieldGetProperty() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(global("global").subAggregation(count("count").field("value"))) - .get(); - - assertHitCount(searchResponse, 10); - - Global global = searchResponse.getAggregations().get("global"); - assertThat(global, notNullValue()); - assertThat(global.getName(), equalTo("global")); - assertThat(global.getDocCount(), equalTo(10L)); - assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().size(), equalTo(1)); - - ValueCount valueCount = global.getAggregations().get("count"); - assertThat(valueCount, notNullValue()); - assertThat(valueCount.getName(), equalTo("count")); - assertThat(valueCount.getValue(), equalTo(10L)); - assertThat((ValueCount) ((InternalAggregation) global).getProperty("count"), equalTo(valueCount)); - assertThat((double) ((InternalAggregation) global).getProperty("count.value"), equalTo(10d)); - assertThat((double) ((InternalAggregation) valueCount).getProperty("value"), equalTo(10d)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(global("global").subAggregation(count("count").field("value"))), + response -> { + + assertHitCount(response, 10); + + Global global = response.getAggregations().get("global"); + assertThat(global, notNullValue()); + assertThat(global.getName(), equalTo("global")); + assertThat(global.getDocCount(), equalTo(10L)); + assertThat(global.getAggregations(), notNullValue()); + assertThat(global.getAggregations().asMap().size(), equalTo(1)); + + ValueCount valueCount = global.getAggregations().get("count"); + assertThat(valueCount, notNullValue()); + assertThat(valueCount.getName(), equalTo("count")); + assertThat(valueCount.getValue(), equalTo(10L)); + assertThat((ValueCount) ((InternalAggregation) global).getProperty("count"), equalTo(valueCount)); + assertThat((double) ((InternalAggregation) global).getProperty("count.value"), equalTo(10d)); + assertThat((double) ((InternalAggregation) valueCount).getProperty("value"), equalTo(10d)); + } + ); } public void testSingleValuedFieldPartiallyUnmapped() throws Exception { - SearchResponse searchResponse = prepareSearch("idx", "idx_unmapped").setQuery(matchAllQuery()) - .addAggregation(count("count").field("value")) - .get(); - - assertHitCount(searchResponse, 10); - - ValueCount valueCount = searchResponse.getAggregations().get("count"); - assertThat(valueCount, notNullValue()); - assertThat(valueCount.getName(), equalTo("count")); - assertThat(valueCount.getValue(), equalTo(10L)); + assertResponse( + prepareSearch("idx", "idx_unmapped").setQuery(matchAllQuery()).addAggregation(count("count").field("value")), + response -> { + assertHitCount(response, 10); + + ValueCount valueCount = response.getAggregations().get("count"); + assertThat(valueCount, notNullValue()); + assertThat(valueCount.getName(), equalTo("count")); + assertThat(valueCount.getValue(), equalTo(10L)); + } + ); } public void testMultiValuedField() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(count("count").field("values")).get(); - - assertHitCount(searchResponse, 10); - - ValueCount valueCount = searchResponse.getAggregations().get("count"); - assertThat(valueCount, notNullValue()); - assertThat(valueCount.getName(), equalTo("count")); - assertThat(valueCount.getValue(), equalTo(20L)); + assertResponse(prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(count("count").field("values")), response -> { + assertHitCount(response, 10); + + ValueCount valueCount = response.getAggregations().get("count"); + assertThat(valueCount, notNullValue()); + assertThat(valueCount.getName(), equalTo("count")); + assertThat(valueCount.getValue(), equalTo(20L)); + }); } public void testSingleValuedScript() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - count("count").script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, VALUE_FIELD_SCRIPT, Collections.emptyMap())) - ) - .get(); - - assertHitCount(searchResponse, 10); - - ValueCount valueCount = searchResponse.getAggregations().get("count"); - assertThat(valueCount, notNullValue()); - assertThat(valueCount.getName(), equalTo("count")); - assertThat(valueCount.getValue(), equalTo(10L)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + count("count").script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, VALUE_FIELD_SCRIPT, Collections.emptyMap())) + ), + response -> { + assertHitCount(response, 10); + + ValueCount valueCount = response.getAggregations().get("count"); + assertThat(valueCount, notNullValue()); + assertThat(valueCount.getName(), equalTo("count")); + assertThat(valueCount.getValue(), equalTo(10L)); + } + ); } public void testMultiValuedScript() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - count("count").script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, SUM_VALUES_FIELD_SCRIPT, Collections.emptyMap())) - ) - .get(); - - assertHitCount(searchResponse, 10); - - ValueCount valueCount = searchResponse.getAggregations().get("count"); - assertThat(valueCount, notNullValue()); - assertThat(valueCount.getName(), equalTo("count")); - assertThat(valueCount.getValue(), equalTo(20L)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + count("count").script( + new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, SUM_VALUES_FIELD_SCRIPT, Collections.emptyMap()) + ) + ), + response -> { + assertHitCount(response, 10); + + ValueCount valueCount = response.getAggregations().get("count"); + assertThat(valueCount, notNullValue()); + assertThat(valueCount.getName(), equalTo("count")); + assertThat(valueCount.getValue(), equalTo(20L)); + } + ); } public void testSingleValuedScriptWithParams() throws Exception { Map params = Collections.singletonMap("field", "value"); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(count("count").script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, SUM_FIELD_PARAMS_SCRIPT, params))) - .get(); - - assertHitCount(searchResponse, 10); - - ValueCount valueCount = searchResponse.getAggregations().get("count"); - assertThat(valueCount, notNullValue()); - assertThat(valueCount.getName(), equalTo("count")); - assertThat(valueCount.getValue(), equalTo(10L)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + count("count").script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, SUM_FIELD_PARAMS_SCRIPT, params)) + ), + response -> { + assertHitCount(response, 10); + + ValueCount valueCount = response.getAggregations().get("count"); + assertThat(valueCount, notNullValue()); + assertThat(valueCount.getName(), equalTo("count")); + assertThat(valueCount.getValue(), equalTo(10L)); + } + ); } public void testMultiValuedScriptWithParams() throws Exception { Map params = Collections.singletonMap("field", "values"); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(count("count").script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, SUM_FIELD_PARAMS_SCRIPT, params))) - .get(); - - assertHitCount(searchResponse, 10); - - ValueCount valueCount = searchResponse.getAggregations().get("count"); - assertThat(valueCount, notNullValue()); - assertThat(valueCount.getName(), equalTo("count")); - assertThat(valueCount.getValue(), equalTo(20L)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + count("count").script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, SUM_FIELD_PARAMS_SCRIPT, params)) + ), + response -> { + assertHitCount(response, 10); + + ValueCount valueCount = response.getAggregations().get("count"); + assertThat(valueCount, notNullValue()); + assertThat(valueCount.getName(), equalTo("count")); + assertThat(valueCount.getValue(), equalTo(20L)); + } + ); } /** @@ -221,12 +236,13 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - count("foo").field("d").script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, RANDOM_SCRIPT, Collections.emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + count("foo").field("d") + .script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, RANDOM_SCRIPT, Collections.emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -238,13 +254,13 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a deterministic script gets cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - count("foo").field("d") - .script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, VALUE_FIELD_SCRIPT, Collections.emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + count("foo").field("d") + .script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, VALUE_FIELD_SCRIPT, Collections.emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -256,8 +272,7 @@ public void testScriptCaching() throws Exception { ); // Ensure that non-scripted requests are cached as normal - r = prepareSearch("cache_test_idx").setSize(0).addAggregation(count("foo").field("d")).get(); - assertNoFailures(r); + assertNoFailures(prepareSearch("cache_test_idx").setSize(0).addAggregation(count("foo").field("d"))); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -270,34 +285,35 @@ public void testScriptCaching() throws Exception { } public void testOrderByEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("terms").field("value") - .order(BucketOrder.compound(BucketOrder.aggregation("filter>count", true))) - .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(count("count").field("value"))) - ) - .get(); - - assertHitCount(searchResponse, 10); - - Terms terms = searchResponse.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - List buckets = terms.getBuckets(); - assertThat(buckets, notNullValue()); - assertThat(buckets.size(), equalTo(10)); - - for (int i = 0; i < 10; i++) { - Terms.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); - assertThat(bucket.getDocCount(), equalTo(1L)); - Filter filter = bucket.getAggregations().get("filter"); - assertThat(filter, notNullValue()); - assertThat(filter.getDocCount(), equalTo(0L)); - ValueCount count = filter.getAggregations().get("count"); - assertThat(count, notNullValue()); - assertThat(count.value(), equalTo(0.0)); - - } + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("terms").field("value") + .order(BucketOrder.compound(BucketOrder.aggregation("filter>count", true))) + .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(count("count").field("value"))) + ), + response -> { + assertHitCount(response, 10); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + List buckets = terms.getBuckets(); + assertThat(buckets, notNullValue()); + assertThat(buckets.size(), equalTo(10)); + + for (int i = 0; i < 10; i++) { + Terms.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); + assertThat(bucket.getDocCount(), equalTo(1L)); + Filter filter = bucket.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(0L)); + ValueCount count = filter.getAggregations().get("count"); + assertThat(count, notNullValue()); + assertThat(count.value(), equalTo(0.0)); + } + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchRedStateIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchRedStateIndexIT.java index 74acaf95bd24a..e897196cb3c6c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchRedStateIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchRedStateIndexIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.health.ClusterHealthStatus; @@ -27,6 +26,7 @@ import java.util.List; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.emptyArray; @@ -43,15 +43,16 @@ public void testAllowPartialsWithRedState() throws Exception { final int numShards = cluster().numDataNodes() + 2; buildRedIndex(numShards); - SearchResponse searchResponse = prepareSearch().setSize(0).setAllowPartialSearchResults(true).get(); - assertThat(RestStatus.OK, equalTo(searchResponse.status())); - assertThat("Expect some shards failed", searchResponse.getFailedShards(), allOf(greaterThan(0), lessThanOrEqualTo(numShards))); - assertThat("Expect no shards skipped", searchResponse.getSkippedShards(), equalTo(0)); - assertThat("Expect subset of shards successful", searchResponse.getSuccessfulShards(), lessThan(numShards)); - assertThat("Expected total shards", searchResponse.getTotalShards(), equalTo(numShards)); - for (ShardSearchFailure failure : searchResponse.getShardFailures()) { - assertThat(failure.getCause(), instanceOf(NoShardAvailableActionException.class)); - } + assertResponse(prepareSearch().setSize(0).setAllowPartialSearchResults(true), response -> { + assertThat(RestStatus.OK, equalTo(response.status())); + assertThat("Expect some shards failed", response.getFailedShards(), allOf(greaterThan(0), lessThanOrEqualTo(numShards))); + assertThat("Expect no shards skipped", response.getSkippedShards(), equalTo(0)); + assertThat("Expect subset of shards successful", response.getSuccessfulShards(), lessThan(numShards)); + assertThat("Expected total shards", response.getTotalShards(), equalTo(numShards)); + for (ShardSearchFailure failure : response.getShardFailures()) { + assertThat(failure.getCause(), instanceOf(NoShardAvailableActionException.class)); + } + }); } public void testClusterAllowPartialsWithRedState() throws Exception { @@ -60,18 +61,19 @@ public void testClusterAllowPartialsWithRedState() throws Exception { setClusterDefaultAllowPartialResults(true); - SearchResponse searchResponse = prepareSearch().setSize(0).get(); - assertThat(RestStatus.OK, equalTo(searchResponse.status())); - assertThat("Expect some shards failed", searchResponse.getFailedShards(), allOf(greaterThan(0), lessThanOrEqualTo(numShards))); - assertThat("Expect no shards skipped", searchResponse.getSkippedShards(), equalTo(0)); - assertThat("Expect subset of shards successful", searchResponse.getSuccessfulShards(), lessThan(numShards)); - assertThat("Expected total shards", searchResponse.getTotalShards(), equalTo(numShards)); - for (ShardSearchFailure failure : searchResponse.getShardFailures()) { - assertThat(failure.getCause(), instanceOf(NoShardAvailableActionException.class)); - assertThat(failure.getCause().getStackTrace(), emptyArray()); - // We don't write out the entire, repetitive stacktrace in the reason - assertThat(failure.reason(), equalTo("org.elasticsearch.action.NoShardAvailableActionException" + System.lineSeparator())); - } + assertResponse(prepareSearch().setSize(0), response -> { + assertThat(RestStatus.OK, equalTo(response.status())); + assertThat("Expect some shards failed", response.getFailedShards(), allOf(greaterThan(0), lessThanOrEqualTo(numShards))); + assertThat("Expect no shards skipped", response.getSkippedShards(), equalTo(0)); + assertThat("Expect subset of shards successful", response.getSuccessfulShards(), lessThan(numShards)); + assertThat("Expected total shards", response.getTotalShards(), equalTo(numShards)); + for (ShardSearchFailure failure : response.getShardFailures()) { + assertThat(failure.getCause(), instanceOf(NoShardAvailableActionException.class)); + assertThat(failure.getCause().getStackTrace(), emptyArray()); + // We don't write out the entire, repetitive stacktrace in the reason + assertThat(failure.reason(), equalTo("org.elasticsearch.action.NoShardAvailableActionException" + System.lineSeparator())); + } + }); } public void testDisallowPartialsWithRedState() throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java index eb6dd2f0767f1..19466a949a628 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java @@ -9,13 +9,13 @@ package org.elasticsearch.search.basic; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.test.ESIntegTestCase; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.greaterThanOrEqualTo; /** @@ -66,32 +66,37 @@ private void searchWhileCreatingIndex(boolean createIndex, int numberOfReplicas) // first, verify that search normal search works assertHitCount(prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "test")), 1); Client client = client(); - SearchResponse searchResponse = client.prepareSearch("test") - .setPreference(preference + Integer.toString(counter++)) - .setQuery(QueryBuilders.termQuery("field", "test")) - .get(); - if (searchResponse.getHits().getTotalHits().value != 1) { - refresh(); - SearchResponse searchResponseAfterRefresh = client.prepareSearch("test") - .setPreference(preference) - .setQuery(QueryBuilders.termQuery("field", "test")) - .get(); - logger.info( - "hits count mismatch on any shard search failed, post explicit refresh hits are {}", - searchResponseAfterRefresh.getHits().getTotalHits().value - ); - ensureGreen(); - SearchResponse searchResponseAfterGreen = client.prepareSearch("test") - .setPreference(preference) - .setQuery(QueryBuilders.termQuery("field", "test")) - .get(); - logger.info( - "hits count mismatch on any shard search failed, post explicit wait for green hits are {}", - searchResponseAfterGreen.getHits().getTotalHits().value - ); - assertHitCount(searchResponse, 1); - } - assertHitCount(searchResponse, 1); + assertResponse( + client.prepareSearch("test") + .setPreference(preference + Integer.toString(counter++)) + .setQuery(QueryBuilders.termQuery("field", "test")), + searchResponse -> { + if (searchResponse.getHits().getTotalHits().value != 1) { + refresh(); + assertResponse( + client.prepareSearch("test").setPreference(preference).setQuery(QueryBuilders.termQuery("field", "test")), + searchResponseAfterRefresh -> { + logger.info( + "hits count mismatch on any shard search failed, post explicit refresh hits are {}", + searchResponseAfterRefresh.getHits().getTotalHits().value + ); + ensureGreen(); + assertResponse( + client.prepareSearch("test") + .setPreference(preference) + .setQuery(QueryBuilders.termQuery("field", "test")), + searchResponseAfterGreen -> logger.info( + "hits count mismatch on any shard search failed, post explicit wait for green hits are {}", + searchResponseAfterGreen.getHits().getTotalHits().value + ) + ); + } + ); + assertHitCount(searchResponse, 1); + } + assertHitCount(searchResponse, 1); + } + ); status = clusterAdmin().prepareHealth("test").get().getStatus(); internalCluster().ensureAtLeastNumDataNodes(numberOfReplicas + 1); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java index 24df07217a5a2..f568630168494 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Priority; import org.elasticsearch.search.SearchHits; import org.elasticsearch.test.ESIntegTestCase; @@ -25,6 +24,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.formatShardStatus; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; @@ -74,33 +74,34 @@ private void testSearchAndRelocateConcurrently(final int numberOfReplicas) throw public void run() { try { while (stop.get() == false) { - SearchResponse sr = prepareSearch().setSize(numDocs).get(); - if (sr.getHits().getTotalHits().value != numDocs) { - // if we did not search all shards but had no serious failures that is potentially fine - // if only the hit-count is wrong. this can happen if the cluster-state is behind when the - // request comes in. It's a small window but a known limitation. - if (sr.getTotalShards() != sr.getSuccessfulShards() - && Stream.of(sr.getShardFailures()) - .allMatch(ssf -> ssf.getCause() instanceof NoShardAvailableActionException)) { - nonCriticalExceptions.add( - "Count is " - + sr.getHits().getTotalHits().value - + " but " - + numDocs - + " was expected. " - + formatShardStatus(sr) - ); - } else { - assertHitCount(sr, numDocs); + assertResponse(prepareSearch().setSize(numDocs), response -> { + if (response.getHits().getTotalHits().value != numDocs) { + // if we did not search all shards but had no serious failures that is potentially fine + // if only the hit-count is wrong. this can happen if the cluster-state is behind when the + // request comes in. It's a small window but a known limitation. + if (response.getTotalShards() != response.getSuccessfulShards() + && Stream.of(response.getShardFailures()) + .allMatch(ssf -> ssf.getCause() instanceof NoShardAvailableActionException)) { + nonCriticalExceptions.add( + "Count is " + + response.getHits().getTotalHits().value + + " but " + + numDocs + + " was expected. " + + formatShardStatus(response) + ); + } else { + assertHitCount(response, numDocs); + } } - } - final SearchHits sh = sr.getHits(); - assertThat( - "Expected hits to be the same size the actual hits array", - sh.getTotalHits().value, - equalTo((long) (sh.getHits().length)) - ); + final SearchHits sh = response.getHits(); + assertThat( + "Expected hits to be the same size the actual hits array", + sh.getTotalHits().value, + equalTo((long) (sh.getHits().length)) + ); + }); // this is the more critical but that we hit the actual hit array has a different size than the // actual number of hits. } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsIT.java index 6f701e956788b..ed88a66272f8f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsIT.java @@ -16,7 +16,6 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -41,6 +40,7 @@ import java.util.concurrent.ExecutionException; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; public class SearchWithRandomExceptionsIT extends ESIntegTestCase { @@ -125,28 +125,36 @@ public void testRandomExceptions() throws IOException, InterruptedException, Exe NumShards test = getNumShards("test"); final int numSearches = scaledRandomIntBetween(100, 200); + final int finalNumCreated = numCreated; // we don't check anything here really just making sure we don't leave any open files or a broken index behind. for (int i = 0; i < numSearches; i++) { try { int docToQuery = between(0, numDocs - 1); int expectedResults = added[docToQuery] ? 1 : 0; logger.info("Searching for [test:{}]", English.intToEnglish(docToQuery)); - SearchResponse searchResponse = prepareSearch().setQuery(QueryBuilders.matchQuery("test", English.intToEnglish(docToQuery))) - .setSize(expectedResults) - .get(); - logger.info("Successful shards: [{}] numShards: [{}]", searchResponse.getSuccessfulShards(), test.numPrimaries); - if (searchResponse.getSuccessfulShards() == test.numPrimaries && refreshFailed == false) { - assertResultsAndLogOnFailure(expectedResults, searchResponse); - } + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchQuery("test", English.intToEnglish(docToQuery))).setSize(expectedResults), + response -> { + logger.info("Successful shards: [{}] numShards: [{}]", response.getSuccessfulShards(), test.numPrimaries); + if (response.getSuccessfulShards() == test.numPrimaries && refreshFailed == false) { + assertResultsAndLogOnFailure(expectedResults, response); + } + } + ); // check match all - searchResponse = prepareSearch().setQuery(QueryBuilders.matchAllQuery()) - .setSize(numCreated) - .addSort("_id", SortOrder.ASC) - .get(); - logger.info("Match all Successful shards: [{}] numShards: [{}]", searchResponse.getSuccessfulShards(), test.numPrimaries); - if (searchResponse.getSuccessfulShards() == test.numPrimaries && refreshFailed == false) { - assertResultsAndLogOnFailure(numCreated, searchResponse); - } + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchAllQuery()).setSize(numCreated).addSort("_id", SortOrder.ASC), + response -> { + logger.info( + "Match all Successful shards: [{}] numShards: [{}]", + response.getSuccessfulShards(), + test.numPrimaries + ); + if (response.getSuccessfulShards() == test.numPrimaries && refreshFailed == false) { + assertResultsAndLogOnFailure(finalNumCreated, response); + } + } + ); } catch (SearchPhaseExecutionException ex) { logger.info("expected SearchPhaseException: [{}]", ex.getMessage()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java index 54ad0cd7e0cff..a541187fb599b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java @@ -15,7 +15,6 @@ import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; @@ -33,6 +32,7 @@ import java.util.concurrent.ExecutionException; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCountAndNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; public class SearchWithRandomIOExceptionsIT extends ESIntegTestCase { @@ -41,7 +41,6 @@ protected Collection> nodePlugins() { return Arrays.asList(MockFSIndexStore.TestPlugin.class); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99174") public void testRandomDirectoryIOExceptions() throws IOException, InterruptedException, ExecutionException { String mapping = Strings.toString( XContentFactory.jsonBuilder() @@ -148,32 +147,39 @@ public void testRandomDirectoryIOExceptions() throws IOException, InterruptedExc refreshResponse.getTotalShards() ); final int numSearches = scaledRandomIntBetween(10, 20); + final int finalNumCreated = numCreated; + final int finalNumInitialDocs = numInitialDocs; // we don't check anything here really just making sure we don't leave any open files or a broken index behind. for (int i = 0; i < numSearches; i++) { try { int docToQuery = between(0, numDocs - 1); int expectedResults = added[docToQuery] ? 1 : 0; logger.info("Searching for [test:{}]", English.intToEnglish(docToQuery)); - SearchResponse searchResponse = prepareSearch().setQuery(QueryBuilders.matchQuery("test", English.intToEnglish(docToQuery))) - .setSize(expectedResults) - .get(); - logger.info("Successful shards: [{}] numShards: [{}]", searchResponse.getSuccessfulShards(), numShards.numPrimaries); - if (searchResponse.getSuccessfulShards() == numShards.numPrimaries && refreshFailed == false) { - assertResultsAndLogOnFailure(expectedResults, searchResponse); - } + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchQuery("test", English.intToEnglish(docToQuery))).setSize(expectedResults), + response -> { + logger.info("Successful shards: [{}] numShards: [{}]", response.getSuccessfulShards(), numShards.numPrimaries); + if (response.getSuccessfulShards() == numShards.numPrimaries && refreshFailed == false) { + assertResultsAndLogOnFailure(expectedResults, response); + } + } + ); // check match all - searchResponse = prepareSearch().setQuery(QueryBuilders.matchAllQuery()) - .setSize(numCreated + numInitialDocs) - .addSort("_uid", SortOrder.ASC) - .get(); - logger.info( - "Match all Successful shards: [{}] numShards: [{}]", - searchResponse.getSuccessfulShards(), - numShards.numPrimaries + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchAllQuery()) + .setSize(numCreated + numInitialDocs) + .addSort("_uid", SortOrder.ASC), + response -> { + logger.info( + "Match all Successful shards: [{}] numShards: [{}]", + response.getSuccessfulShards(), + numShards.numPrimaries + ); + if (response.getSuccessfulShards() == numShards.numPrimaries && refreshFailed == false) { + assertResultsAndLogOnFailure(finalNumCreated + finalNumInitialDocs, response); + } + } ); - if (searchResponse.getSuccessfulShards() == numShards.numPrimaries && refreshFailed == false) { - assertResultsAndLogOnFailure(numCreated + numInitialDocs, searchResponse); - } } catch (SearchPhaseExecutionException ex) { logger.info("SearchPhaseException: [{}]", ex.getMessage()); // if a scheduled refresh or flush fails all shards we see all shards failed here diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java index 54abecb5a1905..c4b0346170949 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java @@ -16,7 +16,6 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.Priority; @@ -26,8 +25,10 @@ import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; +import java.util.concurrent.ExecutionException; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; @@ -55,15 +56,18 @@ public void testFailedSearchWithWrongQuery() throws Exception { assertThat(refreshResponse.getFailedShards(), equalTo(0)); for (int i = 0; i < 5; i++) { try { - SearchResponse searchResponse = client().search( - new SearchRequest("test").source(new SearchSourceBuilder().query(new MatchQueryBuilder("foo", "biz"))) - ).actionGet(); - assertThat(searchResponse.getTotalShards(), equalTo(test.numPrimaries)); - assertThat(searchResponse.getSuccessfulShards(), equalTo(0)); - assertThat(searchResponse.getFailedShards(), equalTo(test.numPrimaries)); - fail("search should fail"); - } catch (ElasticsearchException e) { - assertThat(e.unwrapCause(), instanceOf(SearchPhaseExecutionException.class)); + assertResponse( + client().search(new SearchRequest("test").source(new SearchSourceBuilder().query(new MatchQueryBuilder("foo", "biz")))), + response -> { + assertThat(response.getTotalShards(), equalTo(test.numPrimaries)); + assertThat(response.getSuccessfulShards(), equalTo(0)); + assertThat(response.getFailedShards(), equalTo(test.numPrimaries)); + fail("search should fail"); + } + ); + } catch (ExecutionException e) { + assertThat(e.getCause(), instanceOf(ElasticsearchException.class)); + assertThat(((ElasticsearchException) e.getCause()).unwrapCause(), instanceOf(SearchPhaseExecutionException.class)); // all is well } } @@ -93,15 +97,18 @@ public void testFailedSearchWithWrongQuery() throws Exception { for (int i = 0; i < 5; i++) { try { - SearchResponse searchResponse = client().search( - new SearchRequest("test").source(new SearchSourceBuilder().query(new MatchQueryBuilder("foo", "biz"))) - ).actionGet(); - assertThat(searchResponse.getTotalShards(), equalTo(test.numPrimaries)); - assertThat(searchResponse.getSuccessfulShards(), equalTo(0)); - assertThat(searchResponse.getFailedShards(), equalTo(test.numPrimaries)); - fail("search should fail"); - } catch (ElasticsearchException e) { - assertThat(e.unwrapCause(), instanceOf(SearchPhaseExecutionException.class)); + assertResponse( + client().search(new SearchRequest("test").source(new SearchSourceBuilder().query(new MatchQueryBuilder("foo", "biz")))), + response -> { + assertThat(response.getTotalShards(), equalTo(test.numPrimaries)); + assertThat(response.getSuccessfulShards(), equalTo(0)); + assertThat(response.getFailedShards(), equalTo(test.numPrimaries)); + fail("search should fail"); + } + ); + } catch (ExecutionException e) { + assertThat(e.getCause(), instanceOf(ElasticsearchException.class)); + assertThat(((ElasticsearchException) e.getCause()).unwrapCause(), instanceOf(SearchPhaseExecutionException.class)); // all is well } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportTwoNodesSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportTwoNodesSearchIT.java index 54cff6efe3d17..b15c6d76b7e55 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportTwoNodesSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportTwoNodesSearchIT.java @@ -43,6 +43,7 @@ import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -287,15 +288,15 @@ public void testSimpleFacets() throws Exception { .aggregation(AggregationBuilders.global("global").subAggregation(AggregationBuilders.filter("all", termQuery("multi", "test")))) .aggregation(AggregationBuilders.filter("test1", termQuery("name", "test1"))); - SearchResponse searchResponse = client().search(new SearchRequest("test").source(sourceBuilder)).actionGet(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(100L)); + assertNoFailuresAndResponse(client().search(new SearchRequest("test").source(sourceBuilder)), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(100L)); - Global global = searchResponse.getAggregations().get("global"); - Filter all = global.getAggregations().get("all"); - Filter test1 = searchResponse.getAggregations().get("test1"); - assertThat(test1.getDocCount(), equalTo(1L)); - assertThat(all.getDocCount(), equalTo(100L)); + Global global = response.getAggregations().get("global"); + Filter all = global.getAggregations().get("all"); + Filter test1 = response.getAggregations().get("test1"); + assertThat(test1.getDocCount(), equalTo(1L)); + assertThat(all.getDocCount(), equalTo(100L)); + }); } public void testFailedSearchWithWrongQuery() throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSCanMatchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSCanMatchIT.java index 2ddbbec5bc1c8..582df3a5bb396 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSCanMatchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSCanMatchIT.java @@ -13,7 +13,6 @@ import org.apache.lucene.index.PointValues; import org.elasticsearch.action.search.CanMatchNodeRequest; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchTransportService; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -34,7 +33,6 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.AbstractMultiClustersTestCase; -import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.transport.TransportService; import org.hamcrest.Matchers; @@ -46,6 +44,9 @@ import java.util.List; import java.util.Optional; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.in; @@ -103,7 +104,7 @@ protected Collection> nodePlugins(String clusterAlias) { int createIndexAndIndexDocs(String cluster, String index, int numberOfShards, long timestamp, boolean exposeTimestamp) throws Exception { Client client = client(cluster); - ElasticsearchAssertions.assertAcked( + assertAcked( client.admin() .indices() .prepareCreate(index) @@ -175,11 +176,12 @@ public void testCanMatchOnTimeRange() throws Exception { SearchSourceBuilder source = new SearchSourceBuilder().query(new RangeQueryBuilder("@timestamp").from(timestamp)); SearchRequest request = new SearchRequest("local_*", "*:remote_*"); request.source(source).setCcsMinimizeRoundtrips(minimizeRoundTrips); - SearchResponse searchResp = client().search(request).actionGet(); - ElasticsearchAssertions.assertHitCount(searchResp, localDocs + remoteDocs); - int totalShards = oldLocalNumShards + newLocalNumShards + oldRemoteNumShards + newRemoteNumShards; - assertThat(searchResp.getTotalShards(), equalTo(totalShards)); - assertThat(searchResp.getSkippedShards(), equalTo(oldLocalNumShards + oldRemoteNumShards)); + assertResponse(client().search(request), response -> { + assertHitCount(response, localDocs + remoteDocs); + int totalShards = oldLocalNumShards + newLocalNumShards + oldRemoteNumShards + newRemoteNumShards; + assertThat(response.getTotalShards(), equalTo(totalShards)); + assertThat(response.getSkippedShards(), equalTo(oldLocalNumShards + oldRemoteNumShards)); + }); } } finally { for (String cluster : List.of(LOCAL_CLUSTER, REMOTE_CLUSTER)) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java index 1596a9a7e28a8..cc40003c1001a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java @@ -67,6 +67,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -130,13 +131,14 @@ public void testRemoteClusterClientRole() throws Exception { .toList() ); - final SearchResponse resp = localCluster.client(nodeWithRemoteClusterClientRole) - .prepareSearch("demo", "cluster_a:prod") - .setQuery(new MatchAllQueryBuilder()) - .setAllowPartialSearchResults(false) - .setSize(1000) - .get(); - assertHitCount(resp, demoDocs + prodDocs); + assertHitCount( + localCluster.client(nodeWithRemoteClusterClientRole) + .prepareSearch("demo", "cluster_a:prod") + .setQuery(new MatchAllQueryBuilder()) + .setAllowPartialSearchResults(false) + .setSize(1000), + demoDocs + prodDocs + ); } public void testProxyConnectionDisconnect() throws Exception { @@ -398,17 +400,21 @@ public void testLookupFields() throws Exception { .fetchField("to"); SearchRequest request = new SearchRequest("cluster_a:remote_calls").source(searchSourceBuilder); request.setCcsMinimizeRoundtrips(randomBoolean()); - SearchResponse searchResponse = client().search(request).actionGet(); - ElasticsearchAssertions.assertHitCount(searchResponse, 2); - SearchHit hit0 = searchResponse.getHits().getHits()[0]; - assertThat(hit0.getIndex(), equalTo("remote_calls")); - assertThat(hit0.field("from"), nullValue()); - assertThat(hit0.field("to").getValues(), contains(Map.of("name", List.of("Remote C")))); - - SearchHit hit1 = searchResponse.getHits().getHits()[1]; - assertThat(hit1.getIndex(), equalTo("remote_calls")); - assertThat(hit1.field("from").getValues(), contains(Map.of("name", List.of("Remote A")), Map.of("name", List.of("Remote B")))); - assertThat(hit1.field("to").getValues(), contains(Map.of("name", List.of("Remote C")))); + assertResponse(client().search(request), response -> { + ElasticsearchAssertions.assertHitCount(response, 2); + SearchHit hit0 = response.getHits().getHits()[0]; + assertThat(hit0.getIndex(), equalTo("remote_calls")); + assertThat(hit0.field("from"), nullValue()); + assertThat(hit0.field("to").getValues(), contains(Map.of("name", List.of("Remote C")))); + + SearchHit hit1 = response.getHits().getHits()[1]; + assertThat(hit1.getIndex(), equalTo("remote_calls")); + assertThat( + hit1.field("from").getValues(), + contains(Map.of("name", List.of("Remote A")), Map.of("name", List.of("Remote B"))) + ); + assertThat(hit1.field("to").getValues(), contains(Map.of("name", List.of("Remote C")))); + }); } // Search on both clusters { @@ -419,22 +425,26 @@ public void testLookupFields() throws Exception { .fetchField("to"); SearchRequest request = new SearchRequest("local_calls", "cluster_a:remote_calls").source(searchSourceBuilder); request.setCcsMinimizeRoundtrips(randomBoolean()); - SearchResponse searchResponse = client().search(request).actionGet(); - ElasticsearchAssertions.assertHitCount(searchResponse, 3); - SearchHit hit0 = searchResponse.getHits().getHits()[0]; - assertThat(hit0.getIndex(), equalTo("remote_calls")); - assertThat(hit0.field("from"), nullValue()); - assertThat(hit0.field("to").getValues(), contains(Map.of("name", List.of("Remote C")))); - - SearchHit hit1 = searchResponse.getHits().getHits()[1]; - assertThat(hit1.getIndex(), equalTo("remote_calls")); - assertThat(hit1.field("from").getValues(), contains(Map.of("name", List.of("Remote A")), Map.of("name", List.of("Remote B")))); - assertThat(hit1.field("to").getValues(), contains(Map.of("name", List.of("Remote C")))); - - SearchHit hit2 = searchResponse.getHits().getHits()[2]; - assertThat(hit2.getIndex(), equalTo("local_calls")); - assertThat(hit2.field("from").getValues(), contains(Map.of("name", List.of("Local A")))); - assertThat(hit2.field("to").getValues(), contains(Map.of("name", List.of("Local B")), Map.of("name", List.of("Local C")))); + assertResponse(client().search(request), response -> { + assertHitCount(response, 3); + SearchHit hit0 = response.getHits().getHits()[0]; + assertThat(hit0.getIndex(), equalTo("remote_calls")); + assertThat(hit0.field("from"), nullValue()); + assertThat(hit0.field("to").getValues(), contains(Map.of("name", List.of("Remote C")))); + + SearchHit hit1 = response.getHits().getHits()[1]; + assertThat(hit1.getIndex(), equalTo("remote_calls")); + assertThat( + hit1.field("from").getValues(), + contains(Map.of("name", List.of("Remote A")), Map.of("name", List.of("Remote B"))) + ); + assertThat(hit1.field("to").getValues(), contains(Map.of("name", List.of("Remote C")))); + + SearchHit hit2 = response.getHits().getHits()[2]; + assertThat(hit2.getIndex(), equalTo("local_calls")); + assertThat(hit2.field("from").getValues(), contains(Map.of("name", List.of("Local A")))); + assertThat(hit2.field("to").getValues(), contains(Map.of("name", List.of("Local B")), Map.of("name", List.of("Local C")))); + }); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java index 0be427a5fd09d..379cdfc990207 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java @@ -11,6 +11,8 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchResponse.Cluster; +import org.elasticsearch.action.search.SearchResponse.Clusters; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.search.TransportSearchAction; @@ -41,6 +43,7 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -119,39 +122,40 @@ public void testClusterDetailsAfterSuccessfulCCS() throws Exception { } searchRequest.source(new SearchSourceBuilder().query(new MatchAllQueryBuilder()).size(10)); - SearchResponse searchResponse = client(LOCAL_CLUSTER).search(searchRequest).get(); - assertNotNull(searchResponse); - - SearchResponse.Clusters clusters = searchResponse.getClusters(); - assertFalse("search cluster results should NOT be marked as partial", clusters.hasPartialResults()); - assertThat(clusters.getTotal(), equalTo(2)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(2)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), equalTo(0)); - - SearchResponse.Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); - assertNotNull(localClusterSearchInfo); - assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertThat(localClusterSearchInfo.getIndexExpression(), equalTo(localIndex)); - assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); - assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); - assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); - assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); - assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); - assertThat(localClusterSearchInfo.getTook().millis(), greaterThan(0L)); - - SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); - assertNotNull(remoteClusterSearchInfo); - assertThat(remoteClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertThat(remoteClusterSearchInfo.getIndexExpression(), equalTo(remoteIndex)); - assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); - assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); - assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThan(0L)); + assertResponse(client(LOCAL_CLUSTER).search(searchRequest), response -> { + assertNotNull(response); + + Clusters clusters = response.getClusters(); + assertFalse("search cluster results should NOT be marked as partial", clusters.hasPartialResults()); + assertThat(clusters.getTotal(), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SUCCESSFUL), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SKIPPED), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.RUNNING), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.PARTIAL), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.FAILED), equalTo(0)); + + Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertNotNull(localClusterSearchInfo); + assertThat(localClusterSearchInfo.getStatus(), equalTo(Cluster.Status.SUCCESSFUL)); + assertThat(localClusterSearchInfo.getIndexExpression(), equalTo(localIndex)); + assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(localClusterSearchInfo.getTook().millis(), greaterThan(0L)); + + Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); + assertNotNull(remoteClusterSearchInfo); + assertThat(remoteClusterSearchInfo.getStatus(), equalTo(Cluster.Status.SUCCESSFUL)); + assertThat(remoteClusterSearchInfo.getIndexExpression(), equalTo(remoteIndex)); + assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThan(0L)); + }); } // CCS with a search where the timestamp of the query cannot match so should be SUCCESSFUL with all shards skipped @@ -183,47 +187,49 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except searchRequest.source(new SearchSourceBuilder().query(rangeQueryBuilder).size(10)); - SearchResponse searchResponse = client(LOCAL_CLUSTER).search(searchRequest).get(); - assertNotNull(searchResponse); - - SearchResponse.Clusters clusters = searchResponse.getClusters(); - assertFalse("search cluster results should NOT be marked as partial", clusters.hasPartialResults()); - assertThat(clusters.getTotal(), equalTo(2)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(2)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), equalTo(0)); - - SearchResponse.Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); - assertNotNull(localClusterSearchInfo); - SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); - assertNotNull(remoteClusterSearchInfo); - - assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); - assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); - if (dfs) { - // with DFS_QUERY_THEN_FETCH, the local shards are never skipped - assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); - } else { - assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards - 1)); - } - assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); - assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); - assertThat(localClusterSearchInfo.getTook().millis(), greaterThanOrEqualTo(0L)); + assertResponse(client(LOCAL_CLUSTER).search(searchRequest), response -> { + assertNotNull(response); + + Clusters clusters = response.getClusters(); + assertFalse("search cluster results should NOT be marked as partial", clusters.hasPartialResults()); + assertThat(clusters.getTotal(), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SUCCESSFUL), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SKIPPED), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.RUNNING), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.PARTIAL), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.FAILED), equalTo(0)); + + Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertNotNull(localClusterSearchInfo); + Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); + assertNotNull(remoteClusterSearchInfo); + + assertThat(localClusterSearchInfo.getStatus(), equalTo(Cluster.Status.SUCCESSFUL)); + assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); + if (dfs) { + // with DFS_QUERY_THEN_FETCH, the local shards are never skipped + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); + } else { + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards - 1)); + } + assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(localClusterSearchInfo.getTook().millis(), greaterThanOrEqualTo(0L)); + + assertThat(remoteClusterSearchInfo.getStatus(), equalTo(Cluster.Status.SUCCESSFUL)); + assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); + if (clusters.isCcsMinimizeRoundtrips()) { + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards - 1)); + } else { + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards)); + } + assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThanOrEqualTo(0L)); + }); - assertThat(remoteClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); - assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); - if (clusters.isCcsMinimizeRoundtrips()) { - assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards - 1)); - } else { - assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards)); - } - assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThanOrEqualTo(0L)); } public void testClusterDetailsAfterCCSWithFailuresOnOneShardOnly() throws Exception { @@ -251,24 +257,25 @@ public void testClusterDetailsAfterCCSWithFailuresOnOneShardOnly() throws Except ThrowingQueryBuilder queryBuilder = new ThrowingQueryBuilder(randomLong(), new IllegalStateException("index corrupted"), 0); searchRequest.source(new SearchSourceBuilder().query(queryBuilder).size(10)); - SearchResponse searchResponse = client(LOCAL_CLUSTER).search(searchRequest).get(); - assertNotNull(searchResponse); + assertResponse(client(LOCAL_CLUSTER).search(searchRequest), response -> { + assertNotNull(response); - SearchResponse.Clusters clusters = searchResponse.getClusters(); - assertThat(clusters.getTotal(), equalTo(2)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), equalTo(2)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), equalTo(0)); + Clusters clusters = response.getClusters(); + assertThat(clusters.getTotal(), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SUCCESSFUL), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SKIPPED), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.RUNNING), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.PARTIAL), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.FAILED), equalTo(0)); - SearchResponse.Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); - assertNotNull(localClusterSearchInfo); - assertOneFailedShard(localClusterSearchInfo, localNumShards); + Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertNotNull(localClusterSearchInfo); + assertOneFailedShard(localClusterSearchInfo, localNumShards); - SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); - assertNotNull(remoteClusterSearchInfo); - assertOneFailedShard(remoteClusterSearchInfo, remoteNumShards); + Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); + assertNotNull(remoteClusterSearchInfo); + assertOneFailedShard(remoteClusterSearchInfo, remoteNumShards); + }); } // tests bug fix https://github.com/elastic/elasticsearch/issues/100350 @@ -296,39 +303,40 @@ public void testClusterDetailsAfterCCSWhereRemoteClusterHasNoShardsToSearch() th } searchRequest.source(new SearchSourceBuilder().query(new MatchAllQueryBuilder()).size(10)); - SearchResponse searchResponse = client(LOCAL_CLUSTER).search(searchRequest).get(); - assertNotNull(searchResponse); - - SearchResponse.Clusters clusters = searchResponse.getClusters(); - assertFalse("search cluster results should NOT be marked as partial", clusters.hasPartialResults()); - assertThat(clusters.getTotal(), equalTo(2)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(2)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), equalTo(0)); - - SearchResponse.Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); - assertNotNull(localClusterSearchInfo); - assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertThat(localClusterSearchInfo.getIndexExpression(), equalTo(localIndex)); - assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); - assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); - assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); - assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); - assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); - assertThat(localClusterSearchInfo.getTook().millis(), greaterThan(0L)); - - SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); - assertNotNull(remoteClusterSearchInfo); - assertThat(remoteClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertThat(remoteClusterSearchInfo.getIndexExpression(), equalTo("no_such_index*")); - assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(0)); // no shards since index does not exist - assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); - assertNotNull(remoteClusterSearchInfo.getTook()); + assertResponse(client(LOCAL_CLUSTER).search(searchRequest), response -> { + assertNotNull(response); + + Clusters clusters = response.getClusters(); + assertFalse("search cluster results should NOT be marked as partial", clusters.hasPartialResults()); + assertThat(clusters.getTotal(), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SUCCESSFUL), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SKIPPED), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.RUNNING), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.PARTIAL), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.FAILED), equalTo(0)); + + Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertNotNull(localClusterSearchInfo); + assertThat(localClusterSearchInfo.getStatus(), equalTo(Cluster.Status.SUCCESSFUL)); + assertThat(localClusterSearchInfo.getIndexExpression(), equalTo(localIndex)); + assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(localClusterSearchInfo.getTook().millis(), greaterThan(0L)); + + Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); + assertNotNull(remoteClusterSearchInfo); + assertThat(remoteClusterSearchInfo.getStatus(), equalTo(Cluster.Status.SUCCESSFUL)); + assertThat(remoteClusterSearchInfo.getIndexExpression(), equalTo("no_such_index*")); + assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(0)); // no shards since index does not exist + assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); + assertNotNull(remoteClusterSearchInfo.getTook()); + }); } public void testClusterDetailsAfterCCSWithFailuresOnRemoteClusterOnly() throws Exception { @@ -375,59 +383,58 @@ public void testClusterDetailsAfterCCSWithFailuresOnRemoteClusterOnly() throws E Throwable rootCause = ExceptionsHelper.unwrap(ee.getCause(), IllegalStateException.class); assertThat(rootCause.getMessage(), containsString("index corrupted")); } else { - SearchResponse searchResponse = queryFuture.get(); - assertNotNull(searchResponse); - - SearchResponse.Clusters clusters = searchResponse.getClusters(); - if (dfs == false) { - assertThat(clusters.isCcsMinimizeRoundtrips(), equalTo(minimizeRoundtrips)); - } - assertThat(clusters.getTotal(), equalTo(2)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(1)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), equalTo(0)); - if (skipUnavailable) { - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(1)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), equalTo(0)); - } else { - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), equalTo(1)); - } + assertResponse(queryFuture, response -> { + assertNotNull(response); - SearchResponse.Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); - assertNotNull(localClusterSearchInfo); - assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); - assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); - assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); - assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); - assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); - assertThat(localClusterSearchInfo.getTook().millis(), greaterThan(0L)); - - SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); + Clusters clusters = response.getClusters(); + if (dfs == false) { + assertThat(clusters.isCcsMinimizeRoundtrips(), equalTo(minimizeRoundtrips)); + } + assertThat(clusters.getTotal(), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SUCCESSFUL), equalTo(1)); + assertThat(clusters.getClusterStateCount(Cluster.Status.RUNNING), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.PARTIAL), equalTo(0)); + if (skipUnavailable) { + assertThat(clusters.getClusterStateCount(Cluster.Status.SKIPPED), equalTo(1)); + assertThat(clusters.getClusterStateCount(Cluster.Status.FAILED), equalTo(0)); + } else { + assertThat(clusters.getClusterStateCount(Cluster.Status.SKIPPED), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.FAILED), equalTo(1)); + } - assertNotNull(remoteClusterSearchInfo); - SearchResponse.Cluster.Status expectedStatus = skipUnavailable - ? SearchResponse.Cluster.Status.SKIPPED - : SearchResponse.Cluster.Status.FAILED; - assertThat(remoteClusterSearchInfo.getStatus(), equalTo(expectedStatus)); - if (clusters.isCcsMinimizeRoundtrips()) { - assertNull(remoteClusterSearchInfo.getTotalShards()); - assertNull(remoteClusterSearchInfo.getSuccessfulShards()); - assertNull(remoteClusterSearchInfo.getSkippedShards()); - assertNull(remoteClusterSearchInfo.getFailedShards()); - assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(1)); - } else { - assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); - assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(remoteNumShards)); - assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(remoteNumShards)); - } - assertNull(remoteClusterSearchInfo.getTook()); - assertFalse(remoteClusterSearchInfo.isTimedOut()); - ShardSearchFailure remoteShardSearchFailure = remoteClusterSearchInfo.getFailures().get(0); - assertTrue("should have 'index corrupted' in reason", remoteShardSearchFailure.reason().contains("index corrupted")); + Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertNotNull(localClusterSearchInfo); + assertThat(localClusterSearchInfo.getStatus(), equalTo(Cluster.Status.SUCCESSFUL)); + assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(localClusterSearchInfo.getTook().millis(), greaterThan(0L)); + + Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); + + assertNotNull(remoteClusterSearchInfo); + Cluster.Status expectedStatus = skipUnavailable ? Cluster.Status.SKIPPED : Cluster.Status.FAILED; + assertThat(remoteClusterSearchInfo.getStatus(), equalTo(expectedStatus)); + if (clusters.isCcsMinimizeRoundtrips()) { + assertNull(remoteClusterSearchInfo.getTotalShards()); + assertNull(remoteClusterSearchInfo.getSuccessfulShards()); + assertNull(remoteClusterSearchInfo.getSkippedShards()); + assertNull(remoteClusterSearchInfo.getFailedShards()); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(1)); + } else { + assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(remoteNumShards)); + } + assertNull(remoteClusterSearchInfo.getTook()); + assertFalse(remoteClusterSearchInfo.isTimedOut()); + ShardSearchFailure remoteShardSearchFailure = remoteClusterSearchInfo.getFailures().get(0); + assertTrue("should have 'index corrupted' in reason", remoteShardSearchFailure.reason().contains("index corrupted")); + }); } } @@ -458,40 +465,41 @@ public void testCCSWithSearchTimeoutOnRemoteCluster() throws Exception { SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(slowRunningQueryBuilder).timeout(searchTimeout); searchRequest.source(sourceBuilder); - SearchResponse searchResponse = client(LOCAL_CLUSTER).search(searchRequest).get(); - assertNotNull(searchResponse); - - SearchResponse.Clusters clusters = searchResponse.getClusters(); - assertThat(clusters.getTotal(), equalTo(2)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), equalTo(2)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), equalTo(0)); - - SearchResponse.Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); - assertNotNull(localClusterSearchInfo); - assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.PARTIAL)); - assertTrue(localClusterSearchInfo.isTimedOut()); - assertThat(localClusterSearchInfo.getIndexExpression(), equalTo(localIndex)); - assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); - assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); - assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); - assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); - assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); - assertThat(localClusterSearchInfo.getTook().millis(), greaterThanOrEqualTo(0L)); - - SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); - assertNotNull(remoteClusterSearchInfo); - assertThat(remoteClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.PARTIAL)); - assertTrue(remoteClusterSearchInfo.isTimedOut()); - assertThat(remoteClusterSearchInfo.getIndexExpression(), equalTo(remoteIndex)); - assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); - assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); - assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThanOrEqualTo(0L)); + assertResponse(client(LOCAL_CLUSTER).search(searchRequest), response -> { + assertNotNull(response); + + Clusters clusters = response.getClusters(); + assertThat(clusters.getTotal(), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SUCCESSFUL), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SKIPPED), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.RUNNING), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.PARTIAL), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.FAILED), equalTo(0)); + + Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertNotNull(localClusterSearchInfo); + assertThat(localClusterSearchInfo.getStatus(), equalTo(Cluster.Status.PARTIAL)); + assertTrue(localClusterSearchInfo.isTimedOut()); + assertThat(localClusterSearchInfo.getIndexExpression(), equalTo(localIndex)); + assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(localClusterSearchInfo.getTook().millis(), greaterThanOrEqualTo(0L)); + + Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); + assertNotNull(remoteClusterSearchInfo); + assertThat(remoteClusterSearchInfo.getStatus(), equalTo(Cluster.Status.PARTIAL)); + assertTrue(remoteClusterSearchInfo.isTimedOut()); + assertThat(remoteClusterSearchInfo.getIndexExpression(), equalTo(remoteIndex)); + assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThanOrEqualTo(0L)); + }); } public void testRemoteClusterOnlyCCSSuccessfulResult() throws Exception { @@ -513,29 +521,30 @@ public void testRemoteClusterOnlyCCSSuccessfulResult() throws Exception { } searchRequest.source(new SearchSourceBuilder().query(new MatchAllQueryBuilder()).size(10)); - SearchResponse searchResponse = client(LOCAL_CLUSTER).search(searchRequest).get(); - assertNotNull(searchResponse); - - SearchResponse.Clusters clusters = searchResponse.getClusters(); - assertFalse("search cluster results should NOT be marked as partial", clusters.hasPartialResults()); - assertThat(clusters.getTotal(), equalTo(1)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(1)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), equalTo(0)); - - assertNull(clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)); - - SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); - assertNotNull(remoteClusterSearchInfo); - assertThat(remoteClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); - assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); - assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThan(0L)); + assertResponse(client(LOCAL_CLUSTER).search(searchRequest), response -> { + assertNotNull(response); + + Clusters clusters = response.getClusters(); + assertFalse("search cluster results should NOT be marked as partial", clusters.hasPartialResults()); + assertThat(clusters.getTotal(), equalTo(1)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SUCCESSFUL), equalTo(1)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SKIPPED), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.RUNNING), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.PARTIAL), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.FAILED), equalTo(0)); + + assertNull(clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)); + + Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); + assertNotNull(remoteClusterSearchInfo); + assertThat(remoteClusterSearchInfo.getStatus(), equalTo(Cluster.Status.SUCCESSFUL)); + assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThan(0L)); + }); } public void testRemoteClusterOnlyCCSWithFailuresOnOneShardOnly() throws Exception { @@ -560,22 +569,22 @@ public void testRemoteClusterOnlyCCSWithFailuresOnOneShardOnly() throws Exceptio ThrowingQueryBuilder queryBuilder = new ThrowingQueryBuilder(randomLong(), new IllegalStateException("index corrupted"), 0); searchRequest.source(new SearchSourceBuilder().query(queryBuilder).size(10)); - SearchResponse searchResponse = client(LOCAL_CLUSTER).search(searchRequest).get(); - assertNotNull(searchResponse); + assertResponse(client(LOCAL_CLUSTER).search(searchRequest), response -> { - SearchResponse.Clusters clusters = searchResponse.getClusters(); - assertThat(clusters.getTotal(), equalTo(1)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), equalTo(1)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), equalTo(0)); + Clusters clusters = response.getClusters(); + assertThat(clusters.getTotal(), equalTo(1)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SUCCESSFUL), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SKIPPED), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.RUNNING), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.PARTIAL), equalTo(1)); + assertThat(clusters.getClusterStateCount(Cluster.Status.FAILED), equalTo(0)); - assertNull(clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)); + assertNull(clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)); - SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); - assertNotNull(remoteClusterSearchInfo); - assertOneFailedShard(remoteClusterSearchInfo, remoteNumShards); + Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); + assertNotNull(remoteClusterSearchInfo); + assertOneFailedShard(remoteClusterSearchInfo, remoteNumShards); + }); } public void testRemoteClusterOnlyCCSWithFailuresOnAllShards() throws Exception { @@ -612,44 +621,43 @@ public void testRemoteClusterOnlyCCSWithFailuresOnAllShards() throws Exception { Throwable rootCause = ExceptionsHelper.unwrap(ee, IllegalStateException.class); assertThat(rootCause.getMessage(), containsString("index corrupted")); } else { - SearchResponse searchResponse = queryFuture.get(); - assertNotNull(searchResponse); - SearchResponse.Clusters clusters = searchResponse.getClusters(); - assertThat(clusters.getTotal(), equalTo(1)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), equalTo(0)); - if (skipUnavailable) { - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(1)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), equalTo(0)); - } else { - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), equalTo(1)); - } + assertResponse(queryFuture, response -> { + assertNotNull(response); + Clusters clusters = response.getClusters(); + assertThat(clusters.getTotal(), equalTo(1)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SUCCESSFUL), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.RUNNING), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.PARTIAL), equalTo(0)); + if (skipUnavailable) { + assertThat(clusters.getClusterStateCount(Cluster.Status.SKIPPED), equalTo(1)); + assertThat(clusters.getClusterStateCount(Cluster.Status.FAILED), equalTo(0)); + } else { + assertThat(clusters.getClusterStateCount(Cluster.Status.SKIPPED), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.FAILED), equalTo(1)); + } - assertNull(clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)); + assertNull(clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)); - SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); - assertNotNull(remoteClusterSearchInfo); - SearchResponse.Cluster.Status expectedStatus = skipUnavailable - ? SearchResponse.Cluster.Status.SKIPPED - : SearchResponse.Cluster.Status.FAILED; - assertThat(remoteClusterSearchInfo.getStatus(), equalTo(expectedStatus)); - assertNull(remoteClusterSearchInfo.getTotalShards()); - assertNull(remoteClusterSearchInfo.getSuccessfulShards()); - assertNull(remoteClusterSearchInfo.getSkippedShards()); - assertNull(remoteClusterSearchInfo.getFailedShards()); - assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(1)); - assertNull(remoteClusterSearchInfo.getTook()); - assertFalse(remoteClusterSearchInfo.isTimedOut()); - ShardSearchFailure remoteShardSearchFailure = remoteClusterSearchInfo.getFailures().get(0); - assertTrue("should have 'index corrupted' in reason", remoteShardSearchFailure.reason().contains("index corrupted")); + Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); + assertNotNull(remoteClusterSearchInfo); + Cluster.Status expectedStatus = skipUnavailable ? Cluster.Status.SKIPPED : Cluster.Status.FAILED; + assertThat(remoteClusterSearchInfo.getStatus(), equalTo(expectedStatus)); + assertNull(remoteClusterSearchInfo.getTotalShards()); + assertNull(remoteClusterSearchInfo.getSuccessfulShards()); + assertNull(remoteClusterSearchInfo.getSkippedShards()); + assertNull(remoteClusterSearchInfo.getFailedShards()); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(1)); + assertNull(remoteClusterSearchInfo.getTook()); + assertFalse(remoteClusterSearchInfo.isTimedOut()); + ShardSearchFailure remoteShardSearchFailure = remoteClusterSearchInfo.getFailures().get(0); + assertTrue("should have 'index corrupted' in reason", remoteShardSearchFailure.reason().contains("index corrupted")); + }); } } - private static void assertOneFailedShard(SearchResponse.Cluster cluster, int totalShards) { + private static void assertOneFailedShard(Cluster cluster, int totalShards) { assertNotNull(cluster); - assertThat(cluster.getStatus(), equalTo(SearchResponse.Cluster.Status.PARTIAL)); + assertThat(cluster.getStatus(), equalTo(Cluster.Status.PARTIAL)); assertThat(cluster.getTotalShards(), equalTo(totalShards)); assertThat(cluster.getSuccessfulShards(), equalTo(totalShards - 1)); assertThat(cluster.getSkippedShards(), equalTo(0)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchLeakIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchLeakIT.java index fa84353b7c9cb..8b6f4112cfc17 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchLeakIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchLeakIT.java @@ -31,6 +31,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; public class CrossClusterSearchLeakIT extends AbstractMultiClustersTestCase { @@ -136,18 +137,23 @@ public void testSearch() throws Exception { } for (ActionFuture future : futures) { - SearchResponse searchResponse = future.get(); - if (searchResponse.getScrollId() != null) { - ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); - clearScrollRequest.scrollIds(List.of(searchResponse.getScrollId())); - client(LOCAL_CLUSTER).clearScroll(clearScrollRequest).get(); - } + assertResponse(future, response -> { + if (response.getScrollId() != null) { + ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); + clearScrollRequest.scrollIds(List.of(response.getScrollId())); + try { + client(LOCAL_CLUSTER).clearScroll(clearScrollRequest).get(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } - Terms terms = searchResponse.getAggregations().get("f"); - assertThat(terms.getBuckets().size(), equalTo(docs)); - for (Terms.Bucket bucket : terms.getBuckets()) { - assertThat(bucket.getDocCount(), equalTo(1L)); - } + Terms terms = response.getAggregations().get("f"); + assertThat(terms.getBuckets().size(), equalTo(docs)); + for (Terms.Bucket bucket : terms.getBuckets()) { + assertThat(bucket.getDocCount(), equalTo(1L)); + } + }); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java index b600098d82b33..15afd6897a40e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java @@ -14,7 +14,6 @@ import org.apache.lucene.index.TermsEnum; import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.io.stream.StreamInput; @@ -39,7 +38,7 @@ import java.util.Objects; import static java.util.Collections.singletonList; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.CoreMatchers.equalTo; @@ -72,21 +71,22 @@ public void testPlugin() throws Exception { indicesAdmin().prepareRefresh().get(); - SearchResponse response = prepareSearch().setSource( - new SearchSourceBuilder().ext(Collections.singletonList(new TermVectorsFetchBuilder("test"))) - ).get(); - assertNoFailures(response); - assertThat( - ((Map) response.getHits().getAt(0).field("term_vectors_fetch").getValues().get(0)).get("i"), - equalTo(2) - ); - assertThat( - ((Map) response.getHits().getAt(0).field("term_vectors_fetch").getValues().get(0)).get("am"), - equalTo(2) - ); - assertThat( - ((Map) response.getHits().getAt(0).field("term_vectors_fetch").getValues().get(0)).get("sam"), - equalTo(1) + assertNoFailuresAndResponse( + prepareSearch().setSource(new SearchSourceBuilder().ext(Collections.singletonList(new TermVectorsFetchBuilder("test")))), + response -> { + assertThat( + ((Map) response.getHits().getAt(0).field("term_vectors_fetch").getValues().get(0)).get("i"), + equalTo(2) + ); + assertThat( + ((Map) response.getHits().getAt(0).field("term_vectors_fetch").getValues().get(0)).get("am"), + equalTo(2) + ); + assertThat( + ((Map) response.getHits().getAt(0).field("term_vectors_fetch").getValues().get(0)).get("sam"), + equalTo(1) + ); + } ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java index 00c5342577231..ef3b382da7089 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java @@ -12,7 +12,6 @@ import org.apache.lucene.util.ArrayUtil; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettings; @@ -27,6 +26,7 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; +import org.elasticsearch.search.fetch.subphase.highlight.HighlightField; import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESIntegTestCase; @@ -52,6 +52,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCountAndNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHitsWithoutFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; @@ -142,77 +143,84 @@ public void testSimpleNested() throws Exception { ); indexRandom(true, requests); - SearchResponse response = prepareSearch("articles").setQuery( - nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.Avg).innerHit(new InnerHitBuilder("comment")) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - assertSearchHit(response, 1, hasId("1")); - assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); - SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comment"); - assertThat(innerHits.getTotalHits().value, equalTo(2L)); - assertThat(innerHits.getHits().length, equalTo(2)); - assertThat(innerHits.getAt(0).getId(), equalTo("1")); - assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(innerHits.getAt(1).getId(), equalTo("1")); - assertThat(innerHits.getAt(1).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(innerHits.getAt(1).getNestedIdentity().getOffset(), equalTo(1)); - - response = prepareSearch("articles").setQuery( - nestedQuery("comments", matchQuery("comments.message", "elephant"), ScoreMode.Avg).innerHit(new InnerHitBuilder("comment")) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - assertSearchHit(response, 1, hasId("2")); - assertThat(response.getHits().getAt(0).getShard(), notNullValue()); - assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); - innerHits = response.getHits().getAt(0).getInnerHits().get("comment"); - assertThat(innerHits.getTotalHits().value, equalTo(3L)); - assertThat(innerHits.getHits().length, equalTo(3)); - assertThat(innerHits.getAt(0).getId(), equalTo("2")); - assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(innerHits.getAt(1).getId(), equalTo("2")); - assertThat(innerHits.getAt(1).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(innerHits.getAt(1).getNestedIdentity().getOffset(), equalTo(1)); - assertThat(innerHits.getAt(2).getId(), equalTo("2")); - assertThat(innerHits.getAt(2).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(innerHits.getAt(2).getNestedIdentity().getOffset(), equalTo(2)); - - response = prepareSearch("articles").setQuery( - nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.Avg).innerHit( - new InnerHitBuilder().setHighlightBuilder(new HighlightBuilder().field("comments.message")) - .setExplain(true) - .addFetchField("comments.mes*") - .addScriptField("script", new Script(ScriptType.INLINE, MockScriptEngine.NAME, "5", Collections.emptyMap())) - .setSize(1) - ) - ).get(); - assertNoFailures(response); - innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); - assertThat(innerHits.getTotalHits().value, equalTo(2L)); - assertThat(innerHits.getHits().length, equalTo(1)); - assertThat( - innerHits.getAt(0).getHighlightFields().get("comments.message").getFragments()[0].string(), - equalTo("fox eat quick") + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.Avg).innerHit(new InnerHitBuilder("comment")) + ), + response -> { + assertHitCount(response, 1); + assertSearchHit(response, 1, hasId("1")); + assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); + SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comment"); + assertThat(innerHits.getTotalHits().value, equalTo(2L)); + assertThat(innerHits.getHits().length, equalTo(2)); + assertThat(innerHits.getAt(0).getId(), equalTo("1")); + assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(innerHits.getAt(1).getId(), equalTo("1")); + assertThat(innerHits.getAt(1).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(innerHits.getAt(1).getNestedIdentity().getOffset(), equalTo(1)); + } ); - assertThat(innerHits.getAt(0).getExplanation().toString(), containsString("weight(comments.message:fox in")); - assertThat( - innerHits.getAt(0).getFields().get("comments").getValue(), - equalTo(Collections.singletonMap("message", Collections.singletonList("fox eat quick"))) + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery("comments", matchQuery("comments.message", "elephant"), ScoreMode.Avg).innerHit(new InnerHitBuilder("comment")) + ), + response -> { + assertHitCount(response, 1); + assertSearchHit(response, 1, hasId("2")); + assertThat(response.getHits().getAt(0).getShard(), notNullValue()); + assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); + SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comment"); + assertThat(innerHits.getTotalHits().value, equalTo(3L)); + assertThat(innerHits.getHits().length, equalTo(3)); + assertThat(innerHits.getAt(0).getId(), equalTo("2")); + assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(innerHits.getAt(1).getId(), equalTo("2")); + assertThat(innerHits.getAt(1).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(innerHits.getAt(1).getNestedIdentity().getOffset(), equalTo(1)); + assertThat(innerHits.getAt(2).getId(), equalTo("2")); + assertThat(innerHits.getAt(2).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(innerHits.getAt(2).getNestedIdentity().getOffset(), equalTo(2)); + } + ); + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.Avg).innerHit( + new InnerHitBuilder().setHighlightBuilder(new HighlightBuilder().field("comments.message")) + .setExplain(true) + .addFetchField("comments.mes*") + .addScriptField("script", new Script(ScriptType.INLINE, MockScriptEngine.NAME, "5", Collections.emptyMap())) + .setSize(1) + ) + ), + response -> { + SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); + assertThat(innerHits.getTotalHits().value, equalTo(2L)); + assertThat(innerHits.getHits().length, equalTo(1)); + HighlightField highlightField = innerHits.getAt(0).getHighlightFields().get("comments.message"); + assertThat(highlightField.fragments()[0].string(), equalTo("fox eat quick")); + assertThat(innerHits.getAt(0).getExplanation().toString(), containsString("weight(comments.message:fox in")); + assertThat( + innerHits.getAt(0).getFields().get("comments").getValue(), + equalTo(Collections.singletonMap("message", Collections.singletonList("fox eat quick"))) + ); + assertThat(innerHits.getAt(0).getFields().get("script").getValue().toString(), equalTo("5")); + } + ); + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.Avg).innerHit( + new InnerHitBuilder().addDocValueField("comments.mes*").setSize(1) + ) + ), + response -> { + SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); + assertThat(innerHits.getHits().length, equalTo(1)); + assertThat(innerHits.getAt(0).getFields().get("comments.message").getValue().toString(), equalTo("eat")); + } ); - assertThat(innerHits.getAt(0).getFields().get("script").getValue().toString(), equalTo("5")); - - response = prepareSearch("articles").setQuery( - nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.Avg).innerHit( - new InnerHitBuilder().addDocValueField("comments.mes*").setSize(1) - ) - ).get(); - assertNoFailures(response); - innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); - assertThat(innerHits.getHits().length, equalTo(1)); - assertThat(innerHits.getAt(0).getFields().get("comments.message").getValue().toString(), equalTo("eat")); } public void testRandomNested() throws Exception { @@ -250,32 +258,31 @@ public void testRandomNested() throws Exception { new InnerHitBuilder("b").addSort(new FieldSortBuilder("_doc").order(SortOrder.ASC)).setSize(size) ) ); - SearchResponse searchResponse = prepareSearch("idx").setQuery(boolQuery).setSize(numDocs).addSort("foo", SortOrder.ASC).get(); - - assertNoFailures(searchResponse); - assertHitCount(searchResponse, numDocs); - assertThat(searchResponse.getHits().getHits().length, equalTo(numDocs)); - for (int i = 0; i < numDocs; i++) { - SearchHit searchHit = searchResponse.getHits().getAt(i); - assertThat(searchHit.getShard(), notNullValue()); - SearchHits inner = searchHit.getInnerHits().get("a"); - assertThat(inner.getTotalHits().value, equalTo((long) field1InnerObjects[i])); - for (int j = 0; j < field1InnerObjects[i] && j < size; j++) { - SearchHit innerHit = inner.getAt(j); - assertThat(innerHit.getNestedIdentity().getField().string(), equalTo("field1")); - assertThat(innerHit.getNestedIdentity().getOffset(), equalTo(j)); - assertThat(innerHit.getNestedIdentity().getChild(), nullValue()); - } - - inner = searchHit.getInnerHits().get("b"); - assertThat(inner.getTotalHits().value, equalTo((long) field2InnerObjects[i])); - for (int j = 0; j < field2InnerObjects[i] && j < size; j++) { - SearchHit innerHit = inner.getAt(j); - assertThat(innerHit.getNestedIdentity().getField().string(), equalTo("field2")); - assertThat(innerHit.getNestedIdentity().getOffset(), equalTo(j)); - assertThat(innerHit.getNestedIdentity().getChild(), nullValue()); + assertNoFailuresAndResponse(prepareSearch("idx").setQuery(boolQuery).setSize(numDocs).addSort("foo", SortOrder.ASC), response -> { + assertHitCount(response, numDocs); + assertThat(response.getHits().getHits().length, equalTo(numDocs)); + for (int i = 0; i < numDocs; i++) { + SearchHit searchHit = response.getHits().getAt(i); + assertThat(searchHit.getShard(), notNullValue()); + SearchHits inner = searchHit.getInnerHits().get("a"); + assertThat(inner.getTotalHits().value, equalTo((long) field1InnerObjects[i])); + for (int j = 0; j < field1InnerObjects[i] && j < size; j++) { + SearchHit innerHit = inner.getAt(j); + assertThat(innerHit.getNestedIdentity().getField().string(), equalTo("field1")); + assertThat(innerHit.getNestedIdentity().getOffset(), equalTo(j)); + assertThat(innerHit.getNestedIdentity().getChild(), nullValue()); + } + + inner = searchHit.getInnerHits().get("b"); + assertThat(inner.getTotalHits().value, equalTo((long) field2InnerObjects[i])); + for (int j = 0; j < field2InnerObjects[i] && j < size; j++) { + SearchHit innerHit = inner.getAt(j); + assertThat(innerHit.getNestedIdentity().getField().string(), equalTo("field2")); + assertThat(innerHit.getNestedIdentity().getOffset(), equalTo(j)); + assertThat(innerHit.getNestedIdentity().getChild(), nullValue()); + } } - } + }); } public void testNestedMultipleLayers() throws Exception { @@ -359,140 +366,154 @@ public void testNestedMultipleLayers() throws Exception { indexRandom(true, requests); // Check we can load the first doubly-nested document. - SearchResponse response = prepareSearch("articles").setQuery( - nestedQuery( - "comments", - nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "good"), ScoreMode.Avg).innerHit( - new InnerHitBuilder("remark") - ), - ScoreMode.Avg - ).innerHit(new InnerHitBuilder()) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - assertSearchHit(response, 1, hasId("1")); - assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); - SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); - assertThat(innerHits.getTotalHits().value, equalTo(1L)); - assertThat(innerHits.getHits().length, equalTo(1)); - assertThat(innerHits.getAt(0).getId(), equalTo("1")); - assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - innerHits = innerHits.getAt(0).getInnerHits().get("remark"); - assertThat(innerHits.getTotalHits().value, equalTo(1L)); - assertThat(innerHits.getHits().length, equalTo(1)); - assertThat(innerHits.getAt(0).getId(), equalTo("1")); - assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("remarks")); - assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0)); - + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery( + "comments", + nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "good"), ScoreMode.Avg).innerHit( + new InnerHitBuilder("remark") + ), + ScoreMode.Avg + ).innerHit(new InnerHitBuilder()) + ), + response -> { + assertHitCount(response, 1); + assertSearchHit(response, 1, hasId("1")); + assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); + SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); + assertThat(innerHits.getTotalHits().value, equalTo(1L)); + assertThat(innerHits.getHits().length, equalTo(1)); + assertThat(innerHits.getAt(0).getId(), equalTo("1")); + assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + innerHits = innerHits.getAt(0).getInnerHits().get("remark"); + assertThat(innerHits.getTotalHits().value, equalTo(1L)); + assertThat(innerHits.getHits().length, equalTo(1)); + assertThat(innerHits.getAt(0).getId(), equalTo("1")); + assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("remarks")); + assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0)); + } + ); // Check we can load the second doubly-nested document. - response = prepareSearch("articles").setQuery( - nestedQuery( - "comments", - nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "neutral"), ScoreMode.Avg).innerHit( - new InnerHitBuilder("remark") - ), - ScoreMode.Avg - ).innerHit(new InnerHitBuilder()) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - assertSearchHit(response, 1, hasId("1")); - assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); - innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); - assertThat(innerHits.getTotalHits().value, equalTo(1L)); - assertThat(innerHits.getHits().length, equalTo(1)); - assertThat(innerHits.getAt(0).getId(), equalTo("1")); - assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(1)); - innerHits = innerHits.getAt(0).getInnerHits().get("remark"); - assertThat(innerHits.getTotalHits().value, equalTo(1L)); - assertThat(innerHits.getHits().length, equalTo(1)); - assertThat(innerHits.getAt(0).getId(), equalTo("1")); - assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(1)); - assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("remarks")); - assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0)); - + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery( + "comments", + nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "neutral"), ScoreMode.Avg).innerHit( + new InnerHitBuilder("remark") + ), + ScoreMode.Avg + ).innerHit(new InnerHitBuilder()) + ), + response -> { + assertHitCount(response, 1); + assertSearchHit(response, 1, hasId("1")); + assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); + SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); + assertThat(innerHits.getTotalHits().value, equalTo(1L)); + assertThat(innerHits.getHits().length, equalTo(1)); + assertThat(innerHits.getAt(0).getId(), equalTo("1")); + assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(1)); + innerHits = innerHits.getAt(0).getInnerHits().get("remark"); + assertThat(innerHits.getTotalHits().value, equalTo(1L)); + assertThat(innerHits.getHits().length, equalTo(1)); + assertThat(innerHits.getAt(0).getId(), equalTo("1")); + assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(1)); + assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("remarks")); + assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0)); + } + ); // Directly refer to the second level: - response = prepareSearch("articles").setQuery( - nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "bad"), ScoreMode.Avg).innerHit(new InnerHitBuilder()) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - assertSearchHit(response, 1, hasId("2")); - assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); - innerHits = response.getHits().getAt(0).getInnerHits().get("comments.remarks"); - assertThat(innerHits.getTotalHits().value, equalTo(1L)); - assertThat(innerHits.getHits().length, equalTo(1)); - assertThat(innerHits.getAt(0).getId(), equalTo("2")); - assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("remarks")); - assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0)); - - response = prepareSearch("articles").setQuery( - nestedQuery( - "comments", + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "bad"), ScoreMode.Avg).innerHit( - new InnerHitBuilder("remark") - ), - ScoreMode.Avg - ).innerHit(new InnerHitBuilder()) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - assertSearchHit(response, 1, hasId("2")); - assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); - innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); - assertThat(innerHits.getTotalHits().value, equalTo(1L)); - assertThat(innerHits.getHits().length, equalTo(1)); - assertThat(innerHits.getAt(0).getId(), equalTo("2")); - assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - innerHits = innerHits.getAt(0).getInnerHits().get("remark"); - assertThat(innerHits.getTotalHits().value, equalTo(1L)); - assertThat(innerHits.getHits().length, equalTo(1)); - assertThat(innerHits.getAt(0).getId(), equalTo("2")); - assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("remarks")); - assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0)); - - // Check that inner hits contain _source even when it's disabled on the parent request. - response = prepareSearch("articles").setFetchSource(false) - .setQuery( + new InnerHitBuilder() + ) + ), + response -> { + assertHitCount(response, 1); + assertSearchHit(response, 1, hasId("2")); + assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); + SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comments.remarks"); + assertThat(innerHits.getTotalHits().value, equalTo(1L)); + assertThat(innerHits.getHits().length, equalTo(1)); + assertThat(innerHits.getAt(0).getId(), equalTo("2")); + assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("remarks")); + assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0)); + } + ); + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( nestedQuery( "comments", - nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "good"), ScoreMode.Avg).innerHit( + nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "bad"), ScoreMode.Avg).innerHit( new InnerHitBuilder("remark") ), ScoreMode.Avg ).innerHit(new InnerHitBuilder()) - ) - .get(); - assertNoFailures(response); - innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); - innerHits = innerHits.getAt(0).getInnerHits().get("remark"); - assertNotNull(innerHits.getAt(0).getSourceAsMap()); - assertFalse(innerHits.getAt(0).getSourceAsMap().isEmpty()); - - response = prepareSearch("articles").setQuery( - nestedQuery( - "comments", - nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "good"), ScoreMode.Avg).innerHit( - new InnerHitBuilder("remark") + ), + response -> { + assertHitCount(response, 1); + assertSearchHit(response, 1, hasId("2")); + assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); + SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); + assertThat(innerHits.getTotalHits().value, equalTo(1L)); + assertThat(innerHits.getHits().length, equalTo(1)); + assertThat(innerHits.getAt(0).getId(), equalTo("2")); + assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + innerHits = innerHits.getAt(0).getInnerHits().get("remark"); + assertThat(innerHits.getTotalHits().value, equalTo(1L)); + assertThat(innerHits.getHits().length, equalTo(1)); + assertThat(innerHits.getAt(0).getId(), equalTo("2")); + assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("remarks")); + assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0)); + } + ); + // Check that inner hits contain _source even when it's disabled on the parent request. + assertNoFailuresAndResponse( + prepareSearch("articles").setFetchSource(false) + .setQuery( + nestedQuery( + "comments", + nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "good"), ScoreMode.Avg).innerHit( + new InnerHitBuilder("remark") + ), + ScoreMode.Avg + ).innerHit(new InnerHitBuilder()) ), - ScoreMode.Avg - ).innerHit(new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE)) - ).get(); - assertNoFailures(response); - innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); - innerHits = innerHits.getAt(0).getInnerHits().get("remark"); - assertNotNull(innerHits.getAt(0).getSourceAsMap()); - assertFalse(innerHits.getAt(0).getSourceAsMap().isEmpty()); + response -> { + SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); + innerHits = innerHits.getAt(0).getInnerHits().get("remark"); + assertNotNull(innerHits.getAt(0).getSourceAsMap()); + assertFalse(innerHits.getAt(0).getSourceAsMap().isEmpty()); + } + ); + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery( + "comments", + nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "good"), ScoreMode.Avg).innerHit( + new InnerHitBuilder("remark") + ), + ScoreMode.Avg + ).innerHit(new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE)) + ), + response -> { + SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); + innerHits = innerHits.getAt(0).getInnerHits().get("remark"); + assertNotNull(innerHits.getAt(0).getSourceAsMap()); + assertFalse(innerHits.getAt(0).getSourceAsMap().isEmpty()); + } + ); } // Issue #9723 @@ -514,20 +535,23 @@ public void testNestedDefinedAsObject() throws Exception { ); indexRandom(true, requests); - SearchResponse response = prepareSearch("articles").setQuery( - nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.Avg).innerHit(new InnerHitBuilder()) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits().value, equalTo(1L)); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getId(), equalTo("1")); - assertThat( - response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getField().string(), - equalTo("comments") + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.Avg).innerHit(new InnerHitBuilder()) + ), + response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getId(), equalTo("1")); + assertThat( + response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getField().string(), + equalTo("comments") + ); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getChild(), nullValue()); + } ); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getChild(), nullValue()); } public void testInnerHitsWithObjectFieldThatHasANestedField() throws Exception { @@ -583,56 +607,62 @@ public void testInnerHitsWithObjectFieldThatHasANestedField() throws Exception { ); indexRandom(true, requests); - SearchResponse resp1 = prepareSearch("articles").setQuery( - nestedQuery("comments.messages", matchQuery("comments.messages.message", "fox"), ScoreMode.Avg).innerHit( - new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.FETCH_SOURCE) - ) - ).get(); - assertNoFailures(resp1); - assertHitCount(resp1, 1); - SearchHit parent = resp1.getHits().getAt(0); - assertThat(parent.getId(), equalTo("1")); - SearchHits inner = parent.getInnerHits().get("comments.messages"); - assertThat(inner.getTotalHits().value, equalTo(2L)); - assertThat(inner.getAt(0).getSourceAsString(), equalTo("{\"message\":\"no fox\"}")); - assertThat(inner.getAt(1).getSourceAsString(), equalTo("{\"message\":\"fox eat quick\"}")); - - SearchResponse response = prepareSearch("articles").setQuery( - nestedQuery("comments.messages", matchQuery("comments.messages.message", "fox"), ScoreMode.Avg).innerHit( - new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE) - ) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - SearchHit hit = response.getHits().getAt(0); - assertThat(hit.getId(), equalTo("1")); - SearchHits messages = hit.getInnerHits().get("comments.messages"); - assertThat(messages.getTotalHits().value, equalTo(2L)); - assertThat(messages.getAt(0).getId(), equalTo("1")); - assertThat(messages.getAt(0).getNestedIdentity().getField().string(), equalTo("comments.messages")); - assertThat(messages.getAt(0).getNestedIdentity().getOffset(), equalTo(2)); - assertThat(messages.getAt(0).getNestedIdentity().getChild(), nullValue()); - assertThat(messages.getAt(1).getId(), equalTo("1")); - assertThat(messages.getAt(1).getNestedIdentity().getField().string(), equalTo("comments.messages")); - assertThat(messages.getAt(1).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(messages.getAt(1).getNestedIdentity().getChild(), nullValue()); - - response = prepareSearch("articles").setQuery( - nestedQuery("comments.messages", matchQuery("comments.messages.message", "bear"), ScoreMode.Avg).innerHit( - new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE) - ) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - hit = response.getHits().getAt(0); - assertThat(hit.getId(), equalTo("1")); - messages = hit.getInnerHits().get("comments.messages"); - assertThat(messages.getTotalHits().value, equalTo(1L)); - assertThat(messages.getAt(0).getId(), equalTo("1")); - assertThat(messages.getAt(0).getNestedIdentity().getField().string(), equalTo("comments.messages")); - assertThat(messages.getAt(0).getNestedIdentity().getOffset(), equalTo(1)); - assertThat(messages.getAt(0).getNestedIdentity().getChild(), nullValue()); - + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery("comments.messages", matchQuery("comments.messages.message", "fox"), ScoreMode.Avg).innerHit( + new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.FETCH_SOURCE) + ) + ), + response -> { + assertHitCount(response, 1); + SearchHit parent = response.getHits().getAt(0); + assertThat(parent.getId(), equalTo("1")); + SearchHits inner = parent.getInnerHits().get("comments.messages"); + assertThat(inner.getTotalHits().value, equalTo(2L)); + assertThat(inner.getAt(0).getSourceAsString(), equalTo("{\"message\":\"no fox\"}")); + assertThat(inner.getAt(1).getSourceAsString(), equalTo("{\"message\":\"fox eat quick\"}")); + } + ); + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery("comments.messages", matchQuery("comments.messages.message", "fox"), ScoreMode.Avg).innerHit( + new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE) + ) + ), + response -> { + assertHitCount(response, 1); + SearchHit hit = response.getHits().getAt(0); + assertThat(hit.getId(), equalTo("1")); + SearchHits messages = hit.getInnerHits().get("comments.messages"); + assertThat(messages.getTotalHits().value, equalTo(2L)); + assertThat(messages.getAt(0).getId(), equalTo("1")); + assertThat(messages.getAt(0).getNestedIdentity().getField().string(), equalTo("comments.messages")); + assertThat(messages.getAt(0).getNestedIdentity().getOffset(), equalTo(2)); + assertThat(messages.getAt(0).getNestedIdentity().getChild(), nullValue()); + assertThat(messages.getAt(1).getId(), equalTo("1")); + assertThat(messages.getAt(1).getNestedIdentity().getField().string(), equalTo("comments.messages")); + assertThat(messages.getAt(1).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(messages.getAt(1).getNestedIdentity().getChild(), nullValue()); + } + ); + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery("comments.messages", matchQuery("comments.messages.message", "bear"), ScoreMode.Avg).innerHit( + new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE) + ) + ), + response -> { + assertHitCount(response, 1); + SearchHit hit = response.getHits().getAt(0); + assertThat(hit.getId(), equalTo("1")); + SearchHits messages = hit.getInnerHits().get("comments.messages"); + assertThat(messages.getTotalHits().value, equalTo(1L)); + assertThat(messages.getAt(0).getId(), equalTo("1")); + assertThat(messages.getAt(0).getNestedIdentity().getField().string(), equalTo("comments.messages")); + assertThat(messages.getAt(0).getNestedIdentity().getOffset(), equalTo(1)); + assertThat(messages.getAt(0).getNestedIdentity().getChild(), nullValue()); + } + ); // index the message in an object form instead of an array requests = new ArrayList<>(); requests.add( @@ -650,21 +680,24 @@ public void testInnerHitsWithObjectFieldThatHasANestedField() throws Exception { ) ); indexRandom(true, requests); - response = prepareSearch("articles").setQuery( - nestedQuery("comments.messages", matchQuery("comments.messages.message", "fox"), ScoreMode.Avg).innerHit( - new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE) - ) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - hit = response.getHits().getAt(0); - assertThat(hit.getId(), equalTo("1")); - messages = hit.getInnerHits().get("comments.messages"); - assertThat(messages.getTotalHits().value, equalTo(1L)); - assertThat(messages.getAt(0).getId(), equalTo("1")); - assertThat(messages.getAt(0).getNestedIdentity().getField().string(), equalTo("comments.messages")); - assertThat(messages.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(messages.getAt(0).getNestedIdentity().getChild(), nullValue()); + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery("comments.messages", matchQuery("comments.messages.message", "fox"), ScoreMode.Avg).innerHit( + new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE) + ) + ), + response -> { + assertHitCount(response, 1); + SearchHit hit = response.getHits().getAt(0); + assertThat(hit.getId(), equalTo("1")); + SearchHits messages = hit.getInnerHits().get("comments.messages"); + assertThat(messages.getTotalHits().value, equalTo(1L)); + assertThat(messages.getAt(0).getId(), equalTo("1")); + assertThat(messages.getAt(0).getNestedIdentity().getField().string(), equalTo("comments.messages")); + assertThat(messages.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(messages.getAt(0).getNestedIdentity().getChild(), nullValue()); + } + ); } public void testMatchesQueriesNestedInnerHits() throws Exception { @@ -760,28 +793,28 @@ public void testMatchesQueriesNestedInnerHits() throws Exception { query = nestedQuery("nested1", query, ScoreMode.Avg).innerHit( new InnerHitBuilder().addSort(new FieldSortBuilder("nested1.n_field1").order(SortOrder.ASC)) ); - SearchResponse searchResponse = prepareSearch("test").setQuery(query).setSize(numDocs).addSort("field1", SortOrder.ASC).get(); - assertNoFailures(searchResponse); - assertAllSuccessful(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) numDocs)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("0")); - assertThat(searchResponse.getHits().getAt(0).getInnerHits().get("nested1").getTotalHits().value, equalTo(2L)); - assertThat(searchResponse.getHits().getAt(0).getInnerHits().get("nested1").getAt(0).getMatchedQueries().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getInnerHits().get("nested1").getAt(0).getMatchedQueries()[0], equalTo("test1")); - assertThat(searchResponse.getHits().getAt(0).getInnerHits().get("nested1").getAt(1).getMatchedQueries().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getInnerHits().get("nested1").getAt(1).getMatchedQueries()[0], equalTo("test3")); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getInnerHits().get("nested1").getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getAt(1).getInnerHits().get("nested1").getAt(0).getMatchedQueries().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(1).getInnerHits().get("nested1").getAt(0).getMatchedQueries()[0], equalTo("test2")); - - for (int i = 2; i < numDocs; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(String.valueOf(i))); - assertThat(searchResponse.getHits().getAt(i).getInnerHits().get("nested1").getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getAt(i).getInnerHits().get("nested1").getAt(0).getMatchedQueries().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(i).getInnerHits().get("nested1").getAt(0).getMatchedQueries()[0], equalTo("test3")); - } + assertNoFailuresAndResponse(prepareSearch("test").setQuery(query).setSize(numDocs).addSort("field1", SortOrder.ASC), response -> { + assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo((long) numDocs)); + assertThat(response.getHits().getAt(0).getId(), equalTo("0")); + assertThat(response.getHits().getAt(0).getInnerHits().get("nested1").getTotalHits().value, equalTo(2L)); + assertThat(response.getHits().getAt(0).getInnerHits().get("nested1").getAt(0).getMatchedQueries().length, equalTo(1)); + assertThat(response.getHits().getAt(0).getInnerHits().get("nested1").getAt(0).getMatchedQueries()[0], equalTo("test1")); + assertThat(response.getHits().getAt(0).getInnerHits().get("nested1").getAt(1).getMatchedQueries().length, equalTo(1)); + assertThat(response.getHits().getAt(0).getInnerHits().get("nested1").getAt(1).getMatchedQueries()[0], equalTo("test3")); + + assertThat(response.getHits().getAt(1).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getInnerHits().get("nested1").getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(1).getInnerHits().get("nested1").getAt(0).getMatchedQueries().length, equalTo(1)); + assertThat(response.getHits().getAt(1).getInnerHits().get("nested1").getAt(0).getMatchedQueries()[0], equalTo("test2")); + + for (int i = 2; i < numDocs; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(String.valueOf(i))); + assertThat(response.getHits().getAt(i).getInnerHits().get("nested1").getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(i).getInnerHits().get("nested1").getAt(0).getMatchedQueries().length, equalTo(1)); + assertThat(response.getHits().getAt(i).getInnerHits().get("nested1").getAt(0).getMatchedQueries()[0], equalTo("test3")); + } + }); } public void testNestedSource() throws Exception { @@ -812,64 +845,76 @@ public void testNestedSource() throws Exception { // the field name (comments.message) used for source filtering should be the same as when using that field for // other features (like in the query dsl or aggs) in order for consistency: - SearchResponse response = prepareSearch().setQuery( - nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.None).innerHit( - new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.of(true, new String[] { "comments.message" }, null)) - ) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits().value, equalTo(2L)); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().size(), equalTo(1)); - assertThat( - response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().get("message"), - equalTo("fox eat quick") - ); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(1).getSourceAsMap().size(), equalTo(1)); - assertThat( - response.getHits().getAt(0).getInnerHits().get("comments").getAt(1).getSourceAsMap().get("message"), - equalTo("fox ate rabbit x y z") + assertNoFailuresAndResponse( + prepareSearch().setQuery( + nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.None).innerHit( + new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.of(true, new String[] { "comments.message" }, null)) + ) + ), + response -> { + assertHitCount(response, 1); + + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits().value, equalTo(2L)); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().size(), equalTo(1)); + assertThat( + response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().get("message"), + equalTo("fox eat quick") + ); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(1).getSourceAsMap().size(), equalTo(1)); + assertThat( + response.getHits().getAt(0).getInnerHits().get("comments").getAt(1).getSourceAsMap().get("message"), + equalTo("fox ate rabbit x y z") + ); + } ); - response = prepareSearch().setQuery( - nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.None).innerHit(new InnerHitBuilder()) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits().value, equalTo(2L)); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().size(), equalTo(2)); - assertThat( - response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().get("message"), - equalTo("fox eat quick") - ); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().size(), equalTo(2)); - assertThat( - response.getHits().getAt(0).getInnerHits().get("comments").getAt(1).getSourceAsMap().get("message"), - equalTo("fox ate rabbit x y z") + assertNoFailuresAndResponse( + prepareSearch().setQuery( + nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.None).innerHit(new InnerHitBuilder()) + ), + response -> { + assertHitCount(response, 1); + + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits().value, equalTo(2L)); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().size(), equalTo(2)); + assertThat( + response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().get("message"), + equalTo("fox eat quick") + ); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().size(), equalTo(2)); + assertThat( + response.getHits().getAt(0).getInnerHits().get("comments").getAt(1).getSourceAsMap().get("message"), + equalTo("fox ate rabbit x y z") + ); + } ); // Source filter on a field that does not exist inside the nested document and just check that we do not fail and // return an empty _source: - response = prepareSearch().setQuery( - nestedQuery("comments", matchQuery("comments.message", "away"), ScoreMode.None).innerHit( - new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.of(true, new String[] { "comments.missing_field" }, null)) - ) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits().value, equalTo(1L)); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().size(), equalTo(0)); - + assertNoFailuresAndResponse( + prepareSearch().setQuery( + nestedQuery("comments", matchQuery("comments.message", "away"), ScoreMode.None).innerHit( + new InnerHitBuilder().setFetchSourceContext( + FetchSourceContext.of(true, new String[] { "comments.missing_field" }, null) + ) + ) + ), + response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().size(), equalTo(0)); + } + ); // Check that inner hits contain _source even when it's disabled on the root request. - response = prepareSearch().setFetchSource(false) - .setQuery(nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.None).innerHit(new InnerHitBuilder())) - .get(); - assertNoFailures(response); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits().value, equalTo(2L)); - assertFalse(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().isEmpty()); + assertNoFailuresAndResponse( + prepareSearch().setFetchSource(false) + .setQuery(nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.None).innerHit(new InnerHitBuilder())), + response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits().value, equalTo(2L)); + assertFalse(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().isEmpty()); + } + ); } public void testInnerHitsWithIgnoreUnmapped() throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesIT.java index d7347ef21328f..b5243ed5a52ab 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.fetch.subphase; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.query.MatchAllQueryBuilder; @@ -32,6 +31,7 @@ import static org.elasticsearch.index.query.QueryBuilders.termsQuery; import static org.elasticsearch.index.query.QueryBuilders.wrapperQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasKey; @@ -46,44 +46,53 @@ public void testSimpleMatchedQueryFromFilteredQuery() throws Exception { client().prepareIndex("test").setId("3").setSource("name", "test3", "number", 3).get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery( - boolQuery().must(matchAllQuery()) - .filter( - boolQuery().should(rangeQuery("number").lt(2).queryName("test1")).should(rangeQuery("number").gte(2).queryName("test2")) - ) - ).get(); - assertHitCount(searchResponse, 3L); - for (SearchHit hit : searchResponse.getHits()) { - if (hit.getId().equals("3") || hit.getId().equals("2")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("test2")); - assertThat(hit.getMatchedQueryScore("test2"), equalTo(1f)); - } else if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("test1")); - assertThat(hit.getMatchedQueryScore("test1"), equalTo(1f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); + assertResponse( + prepareSearch().setQuery( + boolQuery().must(matchAllQuery()) + .filter( + boolQuery().should(rangeQuery("number").lt(2).queryName("test1")) + .should(rangeQuery("number").gte(2).queryName("test2")) + ) + ), + response -> { + assertHitCount(response, 3L); + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("3") || hit.getId().equals("2")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("test2")); + assertThat(hit.getMatchedQueryScore("test2"), equalTo(1f)); + } else if (hit.getId().equals("1")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("test1")); + assertThat(hit.getMatchedQueryScore("test1"), equalTo(1f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } + } } - } - - searchResponse = prepareSearch().setQuery( - boolQuery().should(rangeQuery("number").lte(2).queryName("test1")).should(rangeQuery("number").gt(2).queryName("test2")) - ).get(); - assertHitCount(searchResponse, 3L); - for (SearchHit hit : searchResponse.getHits()) { - if (hit.getId().equals("1") || hit.getId().equals("2")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("test1")); - assertThat(hit.getMatchedQueryScore("test1"), equalTo(1f)); - } else if (hit.getId().equals("3")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("test2")); - assertThat(hit.getMatchedQueryScore("test2"), equalTo(1f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); + ); + + assertResponse( + prepareSearch().setQuery( + boolQuery().should(rangeQuery("number").lte(2).queryName("test1")).should(rangeQuery("number").gt(2).queryName("test2")) + ), + response -> { + assertHitCount(response, 3L); + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("1") || hit.getId().equals("2")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("test1")); + assertThat(hit.getMatchedQueryScore("test1"), equalTo(1f)); + } else if (hit.getId().equals("3")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("test2")); + assertThat(hit.getMatchedQueryScore("test2"), equalTo(1f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } + } } - } + ); } public void testSimpleMatchedQueryFromTopLevelFilter() throws Exception { @@ -95,50 +104,55 @@ public void testSimpleMatchedQueryFromTopLevelFilter() throws Exception { client().prepareIndex("test").setId("3").setSource("name", "test").get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .setPostFilter( - boolQuery().should(termQuery("name", "test").queryName("name")).should(termQuery("title", "title1").queryName("title")) - ) - .get(); - assertHitCount(searchResponse, 3L); - for (SearchHit hit : searchResponse.getHits()) { - if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); - assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); - assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); - } else if (hit.getId().equals("2") || hit.getId().equals("3")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); - assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .setPostFilter( + boolQuery().should(termQuery("name", "test").queryName("name")).should(termQuery("title", "title1").queryName("title")) + ), + response -> { + assertHitCount(response, 3L); + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("1")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); + assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); + assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); + } else if (hit.getId().equals("2") || hit.getId().equals("3")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); + assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } + } } - } - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .setPostFilter( - boolQuery().should(termQuery("name", "test").queryName("name")).should(termQuery("title", "title1").queryName("title")) - ) - .get(); - - assertHitCount(searchResponse, 3L); - for (SearchHit hit : searchResponse.getHits()) { - if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); - assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); - assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); - } else if (hit.getId().equals("2") || hit.getId().equals("3")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); - assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); + ); + + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .setPostFilter( + boolQuery().should(termQuery("name", "test").queryName("name")).should(termQuery("title", "title1").queryName("title")) + ), + response -> { + assertHitCount(response, 3L); + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("1")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); + assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); + assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); + } else if (hit.getId().equals("2") || hit.getId().equals("3")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); + assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } + } } - } + ); } public void testSimpleMatchedQueryFromTopLevelFilterAndFilteredQuery() throws Exception { @@ -150,37 +164,44 @@ public void testSimpleMatchedQueryFromTopLevelFilterAndFilteredQuery() throws Ex client().prepareIndex("test").setId("3").setSource("name", "test", "title", "title3").get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery( - boolQuery().must(matchAllQuery()).filter(termsQuery("title", "title1", "title2", "title3").queryName("title")) - ).setPostFilter(termQuery("name", "test").queryName("name")).get(); - assertHitCount(searchResponse, 3L); - for (SearchHit hit : searchResponse.getHits()) { - if (hit.getId().equals("1") || hit.getId().equals("2") || hit.getId().equals("3")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); - assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); - assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); + assertResponse( + prepareSearch().setQuery( + boolQuery().must(matchAllQuery()).filter(termsQuery("title", "title1", "title2", "title3").queryName("title")) + ).setPostFilter(termQuery("name", "test").queryName("name")), + response -> { + assertHitCount(response, 3L); + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("1") || hit.getId().equals("2") || hit.getId().equals("3")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); + assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); + assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } + } } - } - - searchResponse = prepareSearch().setQuery(termsQuery("title", "title1", "title2", "title3").queryName("title")) - .setPostFilter(matchQuery("name", "test").queryName("name")) - .get(); - assertHitCount(searchResponse, 3L); - for (SearchHit hit : searchResponse.getHits()) { - if (hit.getId().equals("1") || hit.getId().equals("2") || hit.getId().equals("3")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); - assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); - assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); + ); + + assertResponse( + prepareSearch().setQuery(termsQuery("title", "title1", "title2", "title3").queryName("title")) + .setPostFilter(matchQuery("name", "test").queryName("name")), + response -> { + assertHitCount(response, 3L); + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("1") || hit.getId().equals("2") || hit.getId().equals("3")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); + assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); + assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } + } } - } + ); } public void testRegExpQuerySupportsName() { @@ -190,18 +211,19 @@ public void testRegExpQuerySupportsName() { client().prepareIndex("test1").setId("1").setSource("title", "title1").get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(QueryBuilders.regexpQuery("title", "title1").queryName("regex")).get(); - assertHitCount(searchResponse, 1L); + assertResponse(prepareSearch().setQuery(QueryBuilders.regexpQuery("title", "title1").queryName("regex")), response -> { + assertHitCount(response, 1L); - for (SearchHit hit : searchResponse.getHits()) { - if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("regex")); - assertThat(hit.getMatchedQueryScore("regex"), equalTo(1f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("1")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("regex")); + assertThat(hit.getMatchedQueryScore("regex"), equalTo(1f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } } - } + }); } public void testPrefixQuerySupportsName() { @@ -211,18 +233,19 @@ public void testPrefixQuerySupportsName() { client().prepareIndex("test1").setId("1").setSource("title", "title1").get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(QueryBuilders.prefixQuery("title", "title").queryName("prefix")).get(); - assertHitCount(searchResponse, 1L); + assertResponse(prepareSearch().setQuery(QueryBuilders.prefixQuery("title", "title").queryName("prefix")), response -> { + assertHitCount(response, 1L); - for (SearchHit hit : searchResponse.getHits()) { - if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("prefix")); - assertThat(hit.getMatchedQueryScore("prefix"), equalTo(1f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("1")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("prefix")); + assertThat(hit.getMatchedQueryScore("prefix"), equalTo(1f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } } - } + }); } public void testFuzzyQuerySupportsName() { @@ -232,18 +255,19 @@ public void testFuzzyQuerySupportsName() { client().prepareIndex("test1").setId("1").setSource("title", "title1").get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(QueryBuilders.fuzzyQuery("title", "titel1").queryName("fuzzy")).get(); - assertHitCount(searchResponse, 1L); + assertResponse(prepareSearch().setQuery(QueryBuilders.fuzzyQuery("title", "titel1").queryName("fuzzy")), response -> { + assertHitCount(response, 1L); - for (SearchHit hit : searchResponse.getHits()) { - if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("fuzzy")); - assertThat(hit.getMatchedQueryScore("fuzzy"), greaterThan(0f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("1")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("fuzzy")); + assertThat(hit.getMatchedQueryScore("fuzzy"), greaterThan(0f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } } - } + }); } public void testWildcardQuerySupportsName() { @@ -253,18 +277,19 @@ public void testWildcardQuerySupportsName() { client().prepareIndex("test1").setId("1").setSource("title", "title1").get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(QueryBuilders.wildcardQuery("title", "titl*").queryName("wildcard")).get(); - assertHitCount(searchResponse, 1L); + assertResponse(prepareSearch().setQuery(QueryBuilders.wildcardQuery("title", "titl*").queryName("wildcard")), response -> { + assertHitCount(response, 1L); - for (SearchHit hit : searchResponse.getHits()) { - if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("wildcard")); - assertThat(hit.getMatchedQueryScore("wildcard"), equalTo(1f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("1")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("wildcard")); + assertThat(hit.getMatchedQueryScore("wildcard"), equalTo(1f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } } - } + }); } public void testSpanFirstQuerySupportsName() { @@ -274,20 +299,22 @@ public void testSpanFirstQuerySupportsName() { client().prepareIndex("test1").setId("1").setSource("title", "title1 title2").get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery( - QueryBuilders.spanFirstQuery(QueryBuilders.spanTermQuery("title", "title1"), 10).queryName("span") - ).get(); - assertHitCount(searchResponse, 1L); - - for (SearchHit hit : searchResponse.getHits()) { - if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("span")); - assertThat(hit.getMatchedQueryScore("span"), greaterThan(0f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); + assertResponse( + prepareSearch().setQuery(QueryBuilders.spanFirstQuery(QueryBuilders.spanTermQuery("title", "title1"), 10).queryName("span")), + response -> { + assertHitCount(response, 1L); + + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("1")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("span")); + assertThat(hit.getMatchedQueryScore("span"), greaterThan(0f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } + } } - } + ); } /** @@ -304,26 +331,29 @@ public void testMatchedWithShould() throws Exception { // Execute search at least two times to load it in cache int iter = scaledRandomIntBetween(2, 10); for (int i = 0; i < iter; i++) { - SearchResponse searchResponse = prepareSearch().setQuery( - boolQuery().minimumShouldMatch(1) - .should(queryStringQuery("dolor").queryName("dolor")) - .should(queryStringQuery("elit").queryName("elit")) - ).get(); - - assertHitCount(searchResponse, 2L); - for (SearchHit hit : searchResponse.getHits()) { - if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("dolor")); - assertThat(hit.getMatchedQueryScore("dolor"), greaterThan(0f)); - } else if (hit.getId().equals("2")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("elit")); - assertThat(hit.getMatchedQueryScore("elit"), greaterThan(0f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); + assertResponse( + prepareSearch().setQuery( + boolQuery().minimumShouldMatch(1) + .should(queryStringQuery("dolor").queryName("dolor")) + .should(queryStringQuery("elit").queryName("elit")) + ), + response -> { + assertHitCount(response, 2L); + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("1")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("dolor")); + assertThat(hit.getMatchedQueryScore("dolor"), greaterThan(0f)); + } else if (hit.getId().equals("2")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("elit")); + assertThat(hit.getMatchedQueryScore("elit"), greaterThan(0f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } + } } - } + ); } } @@ -340,12 +370,13 @@ public void testMatchedWithWrapperQuery() throws Exception { BytesReference termBytes = XContentHelper.toXContent(termQueryBuilder, XContentType.JSON, false); QueryBuilder[] queries = new QueryBuilder[] { wrapperQuery(matchBytes), constantScoreQuery(wrapperQuery(termBytes)) }; for (QueryBuilder query : queries) { - SearchResponse searchResponse = prepareSearch().setQuery(query).get(); - assertHitCount(searchResponse, 1L); - SearchHit hit = searchResponse.getHits().getAt(0); - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("abc")); - assertThat(hit.getMatchedQueryScore("abc"), greaterThan(0f)); + assertResponse(prepareSearch().setQuery(query), response -> { + assertHitCount(response, 1L); + SearchHit hit = response.getHits().getAt(0); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("abc")); + assertThat(hit.getMatchedQueryScore("abc"), greaterThan(0f)); + }); } } @@ -357,16 +388,19 @@ public void testMatchedWithRescoreQuery() throws Exception { client().prepareIndex("test").setId("2").setSource("content", "hello you").get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(new MatchAllQueryBuilder().queryName("all")) - .setRescorer( - new QueryRescorerBuilder(new MatchPhraseQueryBuilder("content", "hello you").boost(10).queryName("rescore_phrase")) - ) - .get(); - assertHitCount(searchResponse, 2L); - assertThat(searchResponse.getHits().getAt(0).getMatchedQueries().length, equalTo(2)); - assertThat(searchResponse.getHits().getAt(0).getMatchedQueries(), equalTo(new String[] { "all", "rescore_phrase" })); - - assertThat(searchResponse.getHits().getAt(1).getMatchedQueries().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(1).getMatchedQueries(), equalTo(new String[] { "all" })); + assertResponse( + prepareSearch().setQuery(new MatchAllQueryBuilder().queryName("all")) + .setRescorer( + new QueryRescorerBuilder(new MatchPhraseQueryBuilder("content", "hello you").boost(10).queryName("rescore_phrase")) + ), + response -> { + assertHitCount(response, 2L); + assertThat(response.getHits().getAt(0).getMatchedQueries().length, equalTo(2)); + assertThat(response.getHits().getAt(0).getMatchedQueries(), equalTo(new String[] { "all", "rescore_phrase" })); + + assertThat(response.getHits().getAt(1).getMatchedQueries().length, equalTo(1)); + assertThat(response.getHits().getAt(1).getMatchedQueries(), equalTo(new String[] { "all" })); + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/CustomHighlighterSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/CustomHighlighterSearchIT.java index 6b790f9e6f090..0dbf3af735b44 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/CustomHighlighterSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/CustomHighlighterSearchIT.java @@ -7,7 +7,6 @@ */ package org.elasticsearch.search.fetch.subphase.highlight; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; @@ -22,6 +21,7 @@ import java.util.Map; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHighlight; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; /** @@ -45,10 +45,11 @@ protected void setup() throws Exception { } public void testThatCustomHighlightersAreSupported() throws IOException { - SearchResponse searchResponse = prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()) - .highlighter(new HighlightBuilder().field("name").highlighterType("test-custom")) - .get(); - assertHighlight(searchResponse, 0, "name", 0, equalTo("standard response for name at position 1")); + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()) + .highlighter(new HighlightBuilder().field("name").highlighterType("test-custom")), + response -> assertHighlight(response, 0, "name", 0, equalTo("standard response for name at position 1")) + ); } public void testThatCustomHighlighterCanBeConfiguredPerField() throws Exception { @@ -58,44 +59,49 @@ public void testThatCustomHighlighterCanBeConfiguredPerField() throws Exception options.put("myFieldOption", "someValue"); highlightConfig.options(options); - SearchResponse searchResponse = prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()) - .highlighter(new HighlightBuilder().field(highlightConfig)) - .get(); - - assertHighlight(searchResponse, 0, "name", 0, equalTo("standard response for name at position 1")); - assertHighlight(searchResponse, 0, "name", 1, equalTo("field:myFieldOption:someValue")); + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).highlighter(new HighlightBuilder().field(highlightConfig)), + response -> { + assertHighlight(response, 0, "name", 0, equalTo("standard response for name at position 1")); + assertHighlight(response, 0, "name", 1, equalTo("field:myFieldOption:someValue")); + } + ); } public void testThatCustomHighlighterCanBeConfiguredGlobally() throws Exception { Map options = new HashMap<>(); options.put("myGlobalOption", "someValue"); - SearchResponse searchResponse = prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()) - .highlighter(new HighlightBuilder().field("name").highlighterType("test-custom").options(options)) - .get(); - - assertHighlight(searchResponse, 0, "name", 0, equalTo("standard response for name at position 1")); - assertHighlight(searchResponse, 0, "name", 1, equalTo("field:myGlobalOption:someValue")); + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()) + .highlighter(new HighlightBuilder().field("name").highlighterType("test-custom").options(options)), + response -> { + assertHighlight(response, 0, "name", 0, equalTo("standard response for name at position 1")); + assertHighlight(response, 0, "name", 1, equalTo("field:myGlobalOption:someValue")); + } + ); } public void testThatCustomHighlighterReceivesFieldsInOrder() throws Exception { - SearchResponse searchResponse = prepareSearch("test").setQuery( - QueryBuilders.boolQuery().must(QueryBuilders.matchAllQuery()).should(QueryBuilders.termQuery("name", "arbitrary")) - ) - .highlighter( - new HighlightBuilder().highlighterType("test-custom") - .field("name") - .field("other_name") - .field("other_other_name") - .useExplicitFieldOrder(true) + assertResponse( + prepareSearch("test").setQuery( + QueryBuilders.boolQuery().must(QueryBuilders.matchAllQuery()).should(QueryBuilders.termQuery("name", "arbitrary")) ) - .get(); - - assertHighlight(searchResponse, 0, "name", 0, equalTo("standard response for name at position 1")); - assertHighlight(searchResponse, 0, "other_name", 0, equalTo("standard response for other_name at position 2")); - assertHighlight(searchResponse, 0, "other_other_name", 0, equalTo("standard response for other_other_name at position 3")); - assertHighlight(searchResponse, 1, "name", 0, equalTo("standard response for name at position 1")); - assertHighlight(searchResponse, 1, "other_name", 0, equalTo("standard response for other_name at position 2")); - assertHighlight(searchResponse, 1, "other_other_name", 0, equalTo("standard response for other_other_name at position 3")); + .highlighter( + new HighlightBuilder().highlighterType("test-custom") + .field("name") + .field("other_name") + .field("other_other_name") + .useExplicitFieldOrder(true) + ), + response -> { + assertHighlight(response, 0, "name", 0, equalTo("standard response for name at position 1")); + assertHighlight(response, 0, "other_name", 0, equalTo("standard response for other_name at position 2")); + assertHighlight(response, 0, "other_other_name", 0, equalTo("standard response for other_other_name at position 3")); + assertHighlight(response, 1, "name", 0, equalTo("standard response for name at position 1")); + assertHighlight(response, 1, "other_name", 0, equalTo("standard response for other_name at position 2")); + assertHighlight(response, 1, "other_other_name", 0, equalTo("standard response for other_other_name at position 3")); + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index 79a28a053b3c2..5dcfd861c91a0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -19,7 +19,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.Strings; import org.elasticsearch.common.geo.GeoPoint; @@ -94,7 +93,9 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCountAndNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNotHighlighted; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; @@ -139,13 +140,16 @@ public void testHighlightingWithKeywordIgnoreBoundaryScanner() throws IOExceptio refresh(); for (BoundaryScannerType scanner : BoundaryScannerType.values()) { - SearchResponse search = prepareSearch().addSort(SortBuilders.fieldSort("sort")) - .setQuery(matchQuery("tags", "foo bar")) - .highlighter(new HighlightBuilder().field(new Field("tags")).numOfFragments(2).boundaryScannerType(scanner)) - .get(); - assertHighlight(search, 0, "tags", 0, 2, equalTo("foo bar")); - assertHighlight(search, 0, "tags", 1, 2, equalTo("foo bar")); - assertHighlight(search, 1, "tags", 0, 1, equalTo("foo bar")); + assertResponse( + prepareSearch().addSort(SortBuilders.fieldSort("sort")) + .setQuery(matchQuery("tags", "foo bar")) + .highlighter(new HighlightBuilder().field(new Field("tags")).numOfFragments(2).boundaryScannerType(scanner)), + response -> { + assertHighlight(response, 0, "tags", 0, 2, equalTo("foo bar")); + assertHighlight(response, 0, "tags", 1, 2, equalTo("foo bar")); + assertHighlight(response, 1, "tags", 0, 1, equalTo("foo bar")); + } + ); } } @@ -164,10 +168,10 @@ public void testHighlightingWithStoredKeyword() throws IOException { assertAcked(prepareCreate("test").setMapping(mappings)); client().prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("text", "foo").endObject()).get(); refresh(); - SearchResponse search = prepareSearch().setQuery(matchQuery("text", "foo")) - .highlighter(new HighlightBuilder().field(new Field("text"))) - .get(); - assertHighlight(search, 0, "text", 0, equalTo("foo")); + assertResponse( + prepareSearch().setQuery(matchQuery("text", "foo")).highlighter(new HighlightBuilder().field(new Field("text"))), + response -> assertHighlight(response, 0, "text", 0, equalTo("foo")) + ); } public void testHighlightingWithWildcardName() throws IOException { @@ -189,10 +193,11 @@ public void testHighlightingWithWildcardName() throws IOException { client().prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("text", "text").endObject()).get(); refresh(); for (String type : ALL_TYPES) { - SearchResponse search = prepareSearch().setQuery(constantScoreQuery(matchQuery("text", "text"))) - .highlighter(new HighlightBuilder().field(new Field("*").highlighterType(type))) - .get(); - assertHighlight(search, 0, "text", 0, equalTo("text")); + assertResponse( + prepareSearch().setQuery(constantScoreQuery(matchQuery("text", "text"))) + .highlighter(new HighlightBuilder().field(new Field("*").highlighterType(type))), + response -> assertHighlight(response, 0, "text", 0, equalTo("text")) + ); } } @@ -220,8 +225,10 @@ public void testFieldAlias() throws IOException { for (String type : ALL_TYPES) { HighlightBuilder builder = new HighlightBuilder().field(new Field("alias").highlighterType(type)) .requireFieldMatch(randomBoolean()); - SearchResponse search = prepareSearch().setQuery(matchQuery("alias", "foo")).highlighter(builder).get(); - assertHighlight(search, 0, "alias", 0, equalTo("foo")); + assertResponse( + prepareSearch().setQuery(matchQuery("alias", "foo")).highlighter(builder), + response -> assertHighlight(response, 0, "alias", 0, equalTo("foo")) + ); } } @@ -250,8 +257,10 @@ public void testFieldAliasWithSourceLookup() throws IOException { for (String type : ALL_TYPES) { HighlightBuilder builder = new HighlightBuilder().field(new Field("alias").highlighterType(type)) .requireFieldMatch(randomBoolean()); - SearchResponse search = prepareSearch().setQuery(matchQuery("alias", "bar")).highlighter(builder).get(); - assertHighlight(search, 0, "alias", 0, equalTo("foo bar")); + assertResponse( + prepareSearch().setQuery(matchQuery("alias", "bar")).highlighter(builder), + response -> assertHighlight(response, 0, "alias", 0, equalTo("foo bar")) + ); } } @@ -275,8 +284,10 @@ public void testFieldAliasWithWildcardField() throws IOException { refresh(); HighlightBuilder builder = new HighlightBuilder().field(new Field("al*")).requireFieldMatch(false); - SearchResponse search = prepareSearch().setQuery(matchQuery("alias", "foo")).highlighter(builder).get(); - assertHighlight(search, 0, "alias", 0, equalTo("foo")); + assertResponse( + prepareSearch().setQuery(matchQuery("alias", "foo")).highlighter(builder), + response -> assertHighlight(response, 0, "alias", 0, equalTo("foo")) + ); } public void testHighlightingWhenFieldsAreNotStoredThereIsNoSource() throws IOException { @@ -309,15 +320,16 @@ public void testHighlightingWhenFieldsAreNotStoredThereIsNoSource() throws IOExc .get(); refresh(); for (String type : ALL_TYPES) { - SearchResponse search = prepareSearch().setQuery(constantScoreQuery(matchQuery("text", "text"))) - .highlighter(new HighlightBuilder().field(new Field("*").highlighterType(type))) - .get(); - assertHighlight(search, 0, "text", 0, equalTo("text")); - search = prepareSearch().setQuery(constantScoreQuery(matchQuery("text", "text"))) - .highlighter(new HighlightBuilder().field(new Field("unstored_text"))) - .get(); - assertNoFailures(search); - assertThat(search.getHits().getAt(0).getHighlightFields().size(), equalTo(0)); + assertResponse( + prepareSearch().setQuery(constantScoreQuery(matchQuery("text", "text"))) + .highlighter(new HighlightBuilder().field(new Field("*").highlighterType(type))), + response -> assertHighlight(response, 0, "text", 0, equalTo("text")) + ); + assertNoFailuresAndResponse( + prepareSearch().setQuery(constantScoreQuery(matchQuery("text", "text"))) + .highlighter(new HighlightBuilder().field(new Field("unstored_text"))), + response -> assertThat(response.getHits().getAt(0).getHighlightFields().size(), equalTo(0)) + ); } } @@ -330,10 +342,12 @@ public void testHighTermFrequencyDoc() throws IOException { } client().prepareIndex("test").setId("1").setSource("name", builder.toString()).get(); refresh(); - SearchResponse search = prepareSearch().setQuery(constantScoreQuery(matchQuery("name", "abc"))) - .highlighter(new HighlightBuilder().field("name")) - .get(); - assertHighlight(search, 0, "name", 0, startsWith("abc abc abc abc")); + assertResponse( + prepareSearch().setQuery(constantScoreQuery(matchQuery("name", "abc"))).highlighter(new HighlightBuilder().field("name")), + response -> { + assertHighlight(response, 0, "name", 0, startsWith("abc abc abc abc")); + } + ); } public void testEnsureNoNegativeOffsets() throws Exception { @@ -433,22 +447,31 @@ public void testSourceLookupHighlightingUsingPlainHighlighter() throws Exception } indexRandom(true, indexRequestBuilders); - SearchResponse search = prepareSearch().setQuery(matchQuery("title", "bug")) - .highlighter(new HighlightBuilder().field("title", -1, 0)) - .get(); - - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight(search, i, "title", 0, equalTo("This is a test on the highlighting bug present in elasticsearch")); - } - - search = prepareSearch().setQuery(matchQuery("attachments.body", "attachment")) - .highlighter(new HighlightBuilder().field("attachments.body", -1, 0)) - .get(); + assertResponse( + prepareSearch().setQuery(matchQuery("title", "bug")).highlighter(new HighlightBuilder().field("title", -1, 0)), + response -> { + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight( + response, + i, + "title", + 0, + equalTo("This is a test on the highlighting bug present in elasticsearch") + ); + } + } + ); - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight(search, i, "attachments.body", 0, equalTo("attachment 1")); - assertHighlight(search, i, "attachments.body", 1, equalTo("attachment 2")); - } + assertResponse( + prepareSearch().setQuery(matchQuery("attachments.body", "attachment")) + .highlighter(new HighlightBuilder().field("attachments.body", -1, 0)), + response -> { + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight(response, i, "attachments.body", 0, equalTo("attachment 1")); + assertHighlight(response, i, "attachments.body", 1, equalTo("attachment 2")); + } + } + ); } @@ -500,23 +523,32 @@ public void testSourceLookupHighlightingUsingFastVectorHighlighter() throws Exce } indexRandom(true, indexRequestBuilders); - SearchResponse search = prepareSearch().setQuery(matchQuery("title", "bug")) - .highlighter(new HighlightBuilder().field("title", -1, 0)) - .get(); - - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight(search, i, "title", 0, equalTo("This is a test on the highlighting bug present in elasticsearch")); - } - - search = prepareSearch().setQuery(matchQuery("attachments.body", "attachment")) - .highlighter(new HighlightBuilder().field("attachments.body", -1, 2)) - .execute() - .get(); + assertResponse( + prepareSearch().setQuery(matchQuery("title", "bug")).highlighter(new HighlightBuilder().field("title", -1, 0)), + response -> { + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight( + response, + i, + "title", + 0, + equalTo("This is a test on the highlighting bug present in elasticsearch") + ); + } + } + ); - for (int i = 0; i < 5; i++) { - assertHighlight(search, i, "attachments.body", 0, equalTo("attachment 1")); - assertHighlight(search, i, "attachments.body", 1, equalTo("attachment 2")); - } + assertResponse( + prepareSearch().setQuery(matchQuery("attachments.body", "attachment")) + .highlighter(new HighlightBuilder().field("attachments.body", -1, 2)) + .execute(), + response -> { + for (int i = 0; i < 5; i++) { + assertHighlight(response, i, "attachments.body", 0, equalTo("attachment 1")); + assertHighlight(response, i, "attachments.body", 1, equalTo("attachment 2")); + } + } + ); } public void testSourceLookupHighlightingUsingPostingsHighlighter() throws Exception { @@ -571,46 +603,52 @@ public void testSourceLookupHighlightingUsingPostingsHighlighter() throws Except } indexRandom(true, indexRequestBuilders); - SearchResponse search = prepareSearch().setQuery(matchQuery("title", "bug")) - // asking for the whole field to be highlighted - .highlighter(new HighlightBuilder().field("title", -1, 0)) - .get(); - - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight( - search, - i, - "title", - 0, - equalTo("This is a test on the highlighting bug present in elasticsearch. Hopefully it works.") - ); - assertHighlight(search, i, "title", 1, 2, equalTo("This is the second bug to perform highlighting on.")); - } - - search = prepareSearch().setQuery(matchQuery("title", "bug")) - // sentences will be generated out of each value - .highlighter(new HighlightBuilder().field("title")) - .get(); - - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight( - search, - i, - "title", - 0, - equalTo("This is a test on the highlighting bug present in elasticsearch. Hopefully it works.") - ); - assertHighlight(search, i, "title", 1, 2, equalTo("This is the second bug to perform highlighting on.")); - } + assertResponse( + prepareSearch().setQuery(matchQuery("title", "bug")) + // asking for the whole field to be highlighted + .highlighter(new HighlightBuilder().field("title", -1, 0)), + response -> { + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight( + response, + i, + "title", + 0, + equalTo("This is a test on the highlighting bug present in elasticsearch. Hopefully it works.") + ); + assertHighlight(response, i, "title", 1, 2, equalTo("This is the second bug to perform highlighting on.")); + } + } + ); - search = prepareSearch().setQuery(matchQuery("attachments.body", "attachment")) - .highlighter(new HighlightBuilder().field("attachments.body", -1, 2)) - .get(); + assertResponse( + prepareSearch().setQuery(matchQuery("title", "bug")) + // sentences will be generated out of each value + .highlighter(new HighlightBuilder().field("title")), + response -> { + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight( + response, + i, + "title", + 0, + equalTo("This is a test on the highlighting bug present in elasticsearch. Hopefully it works.") + ); + assertHighlight(response, i, "title", 1, 2, equalTo("This is the second bug to perform highlighting on.")); + } + } + ); - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight(search, i, "attachments.body", 0, equalTo("attachment for this test")); - assertHighlight(search, i, "attachments.body", 1, 2, equalTo("attachment 2")); - } + assertResponse( + prepareSearch().setQuery(matchQuery("attachments.body", "attachment")) + .highlighter(new HighlightBuilder().field("attachments.body", -1, 2)), + response -> { + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight(response, i, "attachments.body", 0, equalTo("attachment for this test")); + assertHighlight(response, i, "attachments.body", 1, 2, equalTo("attachment 2")); + } + } + ); } public void testHighlightIssue1994() throws Exception { @@ -631,21 +669,35 @@ public void testHighlightIssue1994() throws Exception { client().prepareIndex("test").setId("2").setSource("titleTV", new String[] { "some text to highlight", "highlight other text" }) ); - SearchResponse search = prepareSearch().setQuery(matchQuery("title", "bug")) - .highlighter(new HighlightBuilder().field("title", -1, 2).field("titleTV", -1, 2).requireFieldMatch(false)) - .get(); - - assertHighlight(search, 0, "title", 0, equalTo("This is a test on the highlighting bug present in elasticsearch")); - assertHighlight(search, 0, "title", 1, 2, equalTo("The bug is bugging us")); - assertHighlight(search, 0, "titleTV", 0, equalTo("This is a test on the highlighting bug present in elasticsearch")); - assertHighlight(search, 0, "titleTV", 1, 2, equalTo("The bug is bugging us")); - - search = prepareSearch().setQuery(matchQuery("titleTV", "highlight")) - .highlighter(new HighlightBuilder().field("titleTV", -1, 2)) - .get(); - - assertHighlight(search, 0, "titleTV", 0, equalTo("some text to highlight")); - assertHighlight(search, 0, "titleTV", 1, 2, equalTo("highlight other text")); + assertResponse( + prepareSearch().setQuery(matchQuery("title", "bug")) + .highlighter(new HighlightBuilder().field("title", -1, 2).field("titleTV", -1, 2).requireFieldMatch(false)), + response -> { + assertHighlight( + response, + 0, + "title", + 0, + equalTo("This is a test on the highlighting bug present in elasticsearch") + ); + assertHighlight(response, 0, "title", 1, 2, equalTo("The bug is bugging us")); + assertHighlight( + response, + 0, + "titleTV", + 0, + equalTo("This is a test on the highlighting bug present in elasticsearch") + ); + assertHighlight(response, 0, "titleTV", 1, 2, equalTo("The bug is bugging us")); + } + ); + assertResponse( + prepareSearch().setQuery(matchQuery("titleTV", "highlight")).highlighter(new HighlightBuilder().field("titleTV", -1, 2)), + response -> { + assertHighlight(response, 0, "titleTV", 0, equalTo("some text to highlight")); + assertHighlight(response, 0, "titleTV", 1, 2, equalTo("highlight other text")); + } + ); } public void testGlobalHighlightingSettingsOverriddenAtFieldLevel() { @@ -679,11 +731,11 @@ public void testGlobalHighlightingSettingsOverriddenAtFieldLevel() { ) ); - SearchResponse searchResponse = prepareSearch("test").setSource(source).get(); - - assertHighlight(searchResponse, 0, "field1", 0, 2, equalTo("test")); - assertHighlight(searchResponse, 0, "field1", 1, 2, equalTo("test")); - assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("yet another test")); + assertResponse(prepareSearch("test").setSource(source), response -> { + assertHighlight(response, 0, "field1", 0, 2, equalTo("test")); + assertHighlight(response, 0, "field1", 1, 2, equalTo("test")); + assertHighlight(response, 0, "field2", 0, 1, equalTo("yet another test")); + }); } // Issue #5175 @@ -718,18 +770,18 @@ public void testHighlightingOnWildcardFields() throws Exception { .query(termQuery("field-postings", "test")) .highlighter(highlight().field("field*").preTags("").postTags("").requireFieldMatch(false)); - SearchResponse searchResponse = client().search(new SearchRequest("test").source(source)).actionGet(); - - assertHighlight( - searchResponse, - 0, - "field-postings", - 0, - 1, - equalTo("This is the first test sentence. Here is the second one.") - ); - assertHighlight(searchResponse, 0, "field-fvh", 0, 1, equalTo("This is the test with term_vectors")); - assertHighlight(searchResponse, 0, "field-plain", 0, 1, equalTo("This is the test for the plain highlighter")); + assertResponse(client().search(new SearchRequest("test").source(source)), response -> { + assertHighlight( + response, + 0, + "field-postings", + 0, + 1, + equalTo("This is the first test sentence. Here is the second one.") + ); + assertHighlight(response, 0, "field-fvh", 0, 1, equalTo("This is the test with term_vectors")); + assertHighlight(response, 0, "field-plain", 0, 1, equalTo("This is the test for the plain highlighter")); + }); } public void testPlainHighlighter() throws Exception { @@ -756,23 +808,23 @@ public void testPlainHighlighterOrder() throws Exception { SearchSourceBuilder source = searchSource().query(matchQuery("field1", "brown dog")) .highlighter(highlight().highlighterType("plain").field("field1").preTags("").postTags("").fragmentSize(25)); - SearchResponse searchResponse = prepareSearch("test").setSource(source).get(); - - assertHighlight(searchResponse, 0, "field1", 0, 3, equalTo("The quick brown fox")); - assertHighlight(searchResponse, 0, "field1", 1, 3, equalTo(" jumps over the lazy brown dog")); - assertHighlight(searchResponse, 0, "field1", 2, 3, equalTo(" dog doesn't care")); + assertResponse(prepareSearch("test").setSource(source), response -> { + assertHighlight(response, 0, "field1", 0, 3, equalTo("The quick brown fox")); + assertHighlight(response, 0, "field1", 1, 3, equalTo(" jumps over the lazy brown dog")); + assertHighlight(response, 0, "field1", 2, 3, equalTo(" dog doesn't care")); + }); // lets be explicit about the order source = searchSource().query(matchQuery("field1", "brown dog")) .highlighter( highlight().highlighterType("plain").field("field1").order("none").preTags("").postTags("").fragmentSize(25) ); - searchResponse = prepareSearch("test").setSource(source).get(); - - assertHighlight(searchResponse, 0, "field1", 0, 3, equalTo("The quick brown fox")); - assertHighlight(searchResponse, 0, "field1", 1, 3, equalTo(" jumps over the lazy brown dog")); - assertHighlight(searchResponse, 0, "field1", 2, 3, equalTo(" dog doesn't care")); + assertResponse(prepareSearch("test").setSource(source), response -> { + assertHighlight(response, 0, "field1", 0, 3, equalTo("The quick brown fox")); + assertHighlight(response, 0, "field1", 1, 3, equalTo(" jumps over the lazy brown dog")); + assertHighlight(response, 0, "field1", 2, 3, equalTo(" dog doesn't care")); + }); } { // order by score @@ -781,11 +833,11 @@ public void testPlainHighlighterOrder() throws Exception { highlight().highlighterType("plain").order("score").field("field1").preTags("").postTags("").fragmentSize(25) ); - SearchResponse searchResponse = prepareSearch("test").setSource(source).get(); - - assertHighlight(searchResponse, 0, "field1", 0, 3, equalTo(" jumps over the lazy brown dog")); - assertHighlight(searchResponse, 0, "field1", 1, 3, equalTo("The quick brown fox")); - assertHighlight(searchResponse, 0, "field1", 2, 3, equalTo(" dog doesn't care")); + assertResponse(prepareSearch("test").setSource(source), response -> { + assertHighlight(response, 0, "field1", 0, 3, equalTo(" jumps over the lazy brown dog")); + assertHighlight(response, 0, "field1", 1, 3, equalTo("The quick brown fox")); + assertHighlight(response, 0, "field1", 2, 3, equalTo(" dog doesn't care")); + }); } } @@ -836,25 +888,25 @@ public void testHighlighterWithSentenceBoundaryScanner() throws Exception { .postTags("") .boundaryScannerType(BoundaryScannerType.SENTENCE) ); - SearchResponse searchResponse = prepareSearch("test").setSource(source).get(); - - assertHighlight( - searchResponse, - 0, - "field1", - 0, - 2, - anyOf(equalTo("A sentence with few words"), equalTo("A sentence with few words. ")) - ); + assertResponse(prepareSearch("test").setSource(source), response -> { + assertHighlight( + response, + 0, + "field1", + 0, + 2, + anyOf(equalTo("A sentence with few words"), equalTo("A sentence with few words. ")) + ); - assertHighlight( - searchResponse, - 0, - "field1", - 1, - 2, - anyOf(equalTo("Another sentence with"), equalTo("Another sentence with even more words. ")) - ); + assertHighlight( + response, + 0, + "field1", + 1, + 2, + anyOf(equalTo("Another sentence with"), equalTo("Another sentence with even more words. ")) + ); + }); } } @@ -879,25 +931,25 @@ public void testHighlighterWithSentenceBoundaryScannerAndLocale() throws Excepti .boundaryScannerLocale(Locale.ENGLISH.toLanguageTag()) ); - SearchResponse searchResponse = prepareSearch("test").setSource(source).get(); - - assertHighlight( - searchResponse, - 0, - "field1", - 0, - 2, - anyOf(equalTo("A sentence with few words"), equalTo("A sentence with few words. ")) - ); + assertResponse(prepareSearch("test").setSource(source), response -> { + assertHighlight( + response, + 0, + "field1", + 0, + 2, + anyOf(equalTo("A sentence with few words"), equalTo("A sentence with few words. ")) + ); - assertHighlight( - searchResponse, - 0, - "field1", - 1, - 2, - anyOf(equalTo("Another sentence with"), equalTo("Another sentence with even more words. ")) - ); + assertHighlight( + response, + 0, + "field1", + 1, + 2, + anyOf(equalTo("Another sentence with"), equalTo("Another sentence with even more words. ")) + ); + }); } } @@ -971,12 +1023,16 @@ public void testFVHManyMatches() throws Exception { client().prepareIndex("test").setSource("field1", value).get(); refresh(); + final long[] tookDefaultPhrase = new long[1]; + final long[] tookLargePhrase = new long[1]; + logger.info("--> highlighting and searching on field1 with default phrase limit"); SearchSourceBuilder source = searchSource().query(termQuery("field1", "t")) .highlighter(highlight().highlighterType("fvh").field("field1", 20, 1).order("score").preTags("").postTags("")); - SearchResponse defaultPhraseLimit = client().search(new SearchRequest("test").source(source)).actionGet(); - assertHighlight(defaultPhraseLimit, 0, "field1", 0, 1, containsString("t")); - + assertResponse(client().search(new SearchRequest("test").source(source)), defaultPhraseLimit -> { + assertHighlight(defaultPhraseLimit, 0, "field1", 0, 1, containsString("t")); + tookDefaultPhrase[0] = defaultPhraseLimit.getTook().getMillis(); + }); logger.info("--> highlighting and searching on field1 with large phrase limit"); source = searchSource().query(termQuery("field1", "t")) .highlighter( @@ -987,15 +1043,16 @@ public void testFVHManyMatches() throws Exception { .postTags("") .phraseLimit(30000) ); - SearchResponse largePhraseLimit = client().search(new SearchRequest("test").source(source)).actionGet(); - assertHighlight(largePhraseLimit, 0, "field1", 0, 1, containsString("t")); - + assertResponse(client().search(new SearchRequest("test").source(source)), largePhraseLimit -> { + assertHighlight(largePhraseLimit, 0, "field1", 0, 1, containsString("t")); + tookLargePhrase[0] = largePhraseLimit.getTook().getMillis(); + }); /* * I hate comparing times because it can be inconsistent but default is * in the neighborhood of 300ms and the large phrase limit is in the * neighborhood of 8 seconds. */ - assertThat(defaultPhraseLimit.getTook().getMillis(), lessThan(largePhraseLimit.getTook().getMillis())); + assertThat(tookDefaultPhrase[0], lessThan(tookLargePhrase[0])); } public void testMatchedFieldsFvhRequireFieldMatch() throws Exception { @@ -1071,12 +1128,16 @@ private void checkMatchedFieldsCase(boolean requireFieldMatch) throws Exception SearchRequestBuilder req = prepareSearch("test").highlighter(new HighlightBuilder().field(fooField)); // First check highlighting without any matched fields set - SearchResponse resp = req.setQuery(queryStringQuery("running scissors").field("foo")).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("running with scissors")); + assertResponse( + req.setQuery(queryStringQuery("running scissors").field("foo")), + response -> assertHighlight(response, 0, "foo", 0, equalTo("running with scissors")) + ); // And that matching a subfield doesn't automatically highlight it - resp = req.setQuery(queryStringQuery("foo.plain:running scissors").field("foo")).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("running with scissors")); + assertResponse( + req.setQuery(queryStringQuery("foo.plain:running scissors").field("foo")), + response -> assertHighlight(response, 0, "foo", 0, equalTo("running with scissors")) + ); // Add the subfield to the list of matched fields but don't match it. Everything should still work // like before we added it. @@ -1087,12 +1148,16 @@ private void checkMatchedFieldsCase(boolean requireFieldMatch) throws Exception .requireFieldMatch(requireFieldMatch); fooField.matchedFields("foo", "foo.plain"); req = prepareSearch("test").highlighter(new HighlightBuilder().field(fooField)); - resp = req.setQuery(queryStringQuery("running scissors").field("foo")).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("running with scissors")); + assertResponse( + req.setQuery(queryStringQuery("running scissors").field("foo")), + response -> assertHighlight(response, 0, "foo", 0, equalTo("running with scissors")) + ); // Now make half the matches come from the stored field and half from just a matched field. - resp = req.setQuery(queryStringQuery("foo.plain:running scissors").field("foo")).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("running with scissors")); + assertResponse( + req.setQuery(queryStringQuery("foo.plain:running scissors").field("foo")), + response -> assertHighlight(response, 0, "foo", 0, equalTo("running with scissors")) + ); // Now remove the stored field from the matched field list. That should work too. fooField = new Field("foo").numOfFragments(1) @@ -1102,8 +1167,10 @@ private void checkMatchedFieldsCase(boolean requireFieldMatch) throws Exception .requireFieldMatch(requireFieldMatch); fooField.matchedFields("foo.plain"); req = prepareSearch("test").highlighter(new HighlightBuilder().field(fooField)); - resp = req.setQuery(queryStringQuery("foo.plain:running scissors").field("foo")).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("running with scissors")); + assertResponse( + req.setQuery(queryStringQuery("foo.plain:running scissors").field("foo")), + response -> assertHighlight(response, 0, "foo", 0, equalTo("running with scissors")) + ); // Now make sure boosted fields don't blow up when matched fields is both the subfield and stored field. fooField = new Field("foo").numOfFragments(1) @@ -1113,28 +1180,40 @@ private void checkMatchedFieldsCase(boolean requireFieldMatch) throws Exception .requireFieldMatch(requireFieldMatch); fooField.matchedFields("foo", "foo.plain"); req = prepareSearch("test").highlighter(new HighlightBuilder().field(fooField)); - resp = req.setQuery(queryStringQuery("foo.plain:running^5 scissors").field("foo")).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("running with scissors")); + assertResponse( + req.setQuery(queryStringQuery("foo.plain:running^5 scissors").field("foo")), + response -> assertHighlight(response, 0, "foo", 0, equalTo("running with scissors")) + ); // Now just all matches are against the matched field. This still returns highlighting. - resp = req.setQuery(queryStringQuery("foo.plain:running foo.plain:scissors").field("foo")).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("running with scissors")); + assertResponse( + req.setQuery(queryStringQuery("foo.plain:running foo.plain:scissors").field("foo")), + response -> assertHighlight(response, 0, "foo", 0, equalTo("running with scissors")) + ); // And all matched field via the queryString's field parameter, just in case - resp = req.setQuery(queryStringQuery("running scissors").field("foo.plain")).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("running with scissors")); + assertResponse( + req.setQuery(queryStringQuery("running scissors").field("foo.plain")), + response -> assertHighlight(response, 0, "foo", 0, equalTo("running with scissors")) + ); // Finding the same string two ways is ok too - resp = req.setQuery(queryStringQuery("run foo.plain:running^5 scissors").field("foo")).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("running with scissors")); + assertResponse( + req.setQuery(queryStringQuery("run foo.plain:running^5 scissors").field("foo")), + response -> assertHighlight(response, 0, "foo", 0, equalTo("running with scissors")) + ); // But we use the best found score when sorting fragments - resp = req.setQuery(queryStringQuery("cats foo.plain:cats^5").field("foo")).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("junk junk cats junk junk")); + assertResponse( + req.setQuery(queryStringQuery("cats foo.plain:cats^5").field("foo")), + response -> assertHighlight(response, 0, "foo", 0, equalTo("junk junk cats junk junk")) + ); // which can also be written by searching on the subfield - resp = req.setQuery(queryStringQuery("cats").field("foo").field("foo.plain", 5)).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("junk junk cats junk junk")); + assertResponse( + req.setQuery(queryStringQuery("cats").field("foo").field("foo.plain", 5)), + response -> assertHighlight(response, 0, "foo", 0, equalTo("junk junk cats junk junk")) + ); // Speaking of two fields, you can have two fields, only one of which has matchedFields enabled QueryBuilder twoFieldsQuery = queryStringQuery("cats").field("foo").field("foo.plain", 5).field("bar").field("bar.plain", 5); @@ -1143,50 +1222,63 @@ private void checkMatchedFieldsCase(boolean requireFieldMatch) throws Exception .fragmentSize(25) .highlighterType("fvh") .requireFieldMatch(requireFieldMatch); - resp = req.setQuery(twoFieldsQuery).highlighter(new HighlightBuilder().field(fooField).field(barField)).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("junk junk cats junk junk")); - assertHighlight(resp, 0, "bar", 0, equalTo("cat cat junk junk junk junk")); + assertResponse(req.setQuery(twoFieldsQuery).highlighter(new HighlightBuilder().field(fooField).field(barField)), response -> { + assertHighlight(response, 0, "foo", 0, equalTo("junk junk cats junk junk")); + assertHighlight(response, 0, "bar", 0, equalTo("cat cat junk junk junk junk")); + }); // And you can enable matchedField highlighting on both barField.matchedFields("bar", "bar.plain"); - resp = req.setQuery(twoFieldsQuery).highlighter(new HighlightBuilder().field(fooField).field(barField)).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("junk junk cats junk junk")); - assertHighlight(resp, 0, "bar", 0, equalTo("junk junk cats junk junk")); + assertResponse(req.setQuery(twoFieldsQuery).highlighter(new HighlightBuilder().field(fooField).field(barField)), response -> { + assertHighlight(response, 0, "foo", 0, equalTo("junk junk cats junk junk")); + assertHighlight(response, 0, "bar", 0, equalTo("junk junk cats junk junk")); + }); // Setting a matchedField that isn't searched/doesn't exist is simply ignored. barField.matchedFields("bar", "candy"); - resp = req.setQuery(twoFieldsQuery).highlighter(new HighlightBuilder().field(fooField).field(barField)).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("junk junk cats junk junk")); - assertHighlight(resp, 0, "bar", 0, equalTo("cat cat junk junk junk junk")); + assertResponse(req.setQuery(twoFieldsQuery).highlighter(new HighlightBuilder().field(fooField).field(barField)), response -> { + assertHighlight(response, 0, "foo", 0, equalTo("junk junk cats junk junk")); + assertHighlight(response, 0, "bar", 0, equalTo("cat cat junk junk junk junk")); + }); // If the stored field doesn't have a value it doesn't matter what you match, you get nothing. barField.matchedFields("bar", "foo.plain"); - resp = req.setQuery(queryStringQuery("running scissors").field("foo.plain").field("bar")) - .highlighter(new HighlightBuilder().field(fooField).field(barField)) - .get(); - assertHighlight(resp, 0, "foo", 0, equalTo("running with scissors")); - assertThat(resp.getHits().getAt(0).getHighlightFields(), not(hasKey("bar"))); + assertResponse( + req.setQuery(queryStringQuery("running scissors").field("foo.plain").field("bar")) + .highlighter(new HighlightBuilder().field(fooField).field(barField)), + response -> { + assertHighlight(response, 0, "foo", 0, equalTo("running with scissors")); + assertThat(response.getHits().getAt(0).getHighlightFields(), not(hasKey("bar"))); + } + ); // If the stored field is found but the matched field isn't then you don't get a result either. fooField.matchedFields("bar.plain"); - resp = req.setQuery(queryStringQuery("running scissors").field("foo").field("foo.plain").field("bar").field("bar.plain")) - .highlighter(new HighlightBuilder().field(fooField).field(barField)) - .get(); - assertThat(resp.getHits().getAt(0).getHighlightFields(), not(hasKey("foo"))); + assertResponse( + req.setQuery(queryStringQuery("running scissors").field("foo").field("foo.plain").field("bar").field("bar.plain")) + .highlighter(new HighlightBuilder().field(fooField).field(barField)), + response -> assertThat(response.getHits().getAt(0).getHighlightFields(), not(hasKey("foo"))) + ); // But if you add the stored field to the list of matched fields then you'll get a result again fooField.matchedFields("foo", "bar.plain"); - resp = req.setQuery(queryStringQuery("running scissors").field("foo").field("foo.plain").field("bar").field("bar.plain")) - .highlighter(new HighlightBuilder().field(fooField).field(barField)) - .get(); - assertHighlight(resp, 0, "foo", 0, equalTo("running with scissors")); - assertThat(resp.getHits().getAt(0).getHighlightFields(), not(hasKey("bar"))); + assertResponse( + req.setQuery(queryStringQuery("running scissors").field("foo").field("foo.plain").field("bar").field("bar.plain")) + .highlighter(new HighlightBuilder().field(fooField).field(barField)), + response -> { + assertHighlight(response, 0, "foo", 0, equalTo("running with scissors")); + assertThat(response.getHits().getAt(0).getHighlightFields(), not(hasKey("bar"))); + } + ); // You _can_ highlight fields that aren't subfields of one another. - resp = req.setQuery(queryStringQuery("weird").field("foo").field("foo.plain").field("bar").field("bar.plain")) - .highlighter(new HighlightBuilder().field(fooField).field(barField)) - .get(); - assertHighlight(resp, 0, "foo", 0, equalTo("weird")); - assertHighlight(resp, 0, "bar", 0, equalTo("result")); + assertResponse( + req.setQuery(queryStringQuery("weird").field("foo").field("foo.plain").field("bar").field("bar.plain")) + .highlighter(new HighlightBuilder().field(fooField).field(barField)), + response -> { + assertHighlight(response, 0, "foo", 0, equalTo("weird")); + assertHighlight(response, 0, "bar", 0, equalTo("result")); + } + ); assertFailures( req.setQuery(queryStringQuery("result").field("foo").field("foo.plain").field("bar").field("bar.plain")), @@ -1208,15 +1300,18 @@ public void testFastVectorHighlighterManyDocs() throws Exception { indexRandom(true, indexRequestBuilders); logger.info("--> searching explicitly on field1 and highlighting on it"); - SearchResponse searchResponse = prepareSearch().setSize(COUNT) - .setQuery(termQuery("field1", "test")) - .highlighter(new HighlightBuilder().field("field1", 100, 0)) - .get(); - for (int i = 0; i < COUNT; i++) { - SearchHit hit = searchResponse.getHits().getHits()[i]; - // LUCENE 3.1 UPGRADE: Caused adding the space at the end... - assertHighlight(searchResponse, i, "field1", 0, 1, equalTo("test " + hit.getId())); - } + assertResponse( + prepareSearch().setSize(COUNT) + .setQuery(termQuery("field1", "test")) + .highlighter(new HighlightBuilder().field("field1", 100, 0)), + response -> { + for (int i = 0; i < COUNT; i++) { + SearchHit hit = response.getHits().getHits()[i]; + // LUCENE 3.1 UPGRADE: Caused adding the space at the end... + assertHighlight(response, i, "field1", 0, 1, equalTo("test " + hit.getId())); + } + } + ); } public XContentBuilder type1TermVectorMapping() throws IOException { @@ -1248,13 +1343,21 @@ public void testSameContent() throws Exception { } indexRandom(true, indexRequestBuilders); - SearchResponse search = prepareSearch().setQuery(matchQuery("title", "bug")) - .highlighter(new HighlightBuilder().field("title", -1, 0)) - .get(); - - for (int i = 0; i < 5; i++) { - assertHighlight(search, i, "title", 0, 1, equalTo("This is a test on the highlighting bug present in elasticsearch")); - } + assertResponse( + prepareSearch().setQuery(matchQuery("title", "bug")).highlighter(new HighlightBuilder().field("title", -1, 0)), + response -> { + for (int i = 0; i < 5; i++) { + assertHighlight( + response, + i, + "title", + 0, + 1, + equalTo("This is a test on the highlighting bug present in elasticsearch") + ); + } + } + ); } public void testFastVectorHighlighterOffsetParameter() throws Exception { @@ -1268,14 +1371,16 @@ public void testFastVectorHighlighterOffsetParameter() throws Exception { } indexRandom(true, indexRequestBuilders); - SearchResponse search = prepareSearch().setQuery(matchQuery("title", "bug")) - .highlighter(new HighlightBuilder().field("title", 30, 1, 10).highlighterType("fvh")) - .get(); - - for (int i = 0; i < 5; i++) { - // LUCENE 3.1 UPGRADE: Caused adding the space at the end... - assertHighlight(search, i, "title", 0, 1, equalTo("highlighting bug present in elasticsearch")); - } + assertResponse( + prepareSearch().setQuery(matchQuery("title", "bug")) + .highlighter(new HighlightBuilder().field("title", 30, 1, 10).highlighterType("fvh")), + response -> { + for (int i = 0; i < 5; i++) { + // LUCENE 3.1 UPGRADE: Caused adding the space at the end... + assertHighlight(response, i, "title", 0, 1, equalTo("highlighting bug present in elasticsearch")); + } + } + ); } public void testEscapeHtml() throws Exception { @@ -1289,13 +1394,22 @@ public void testEscapeHtml() throws Exception { } indexRandom(true, indexRequestBuilders); - SearchResponse search = prepareSearch().setQuery(matchQuery("title", "test")) - .highlighter(new HighlightBuilder().encoder("html").field("title", 50, 1, 10)) - .get(); - - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight(search, i, "title", 0, 1, startsWith("This is a html escaping highlighting test for *&?")); - } + assertResponse( + prepareSearch().setQuery(matchQuery("title", "test")) + .highlighter(new HighlightBuilder().encoder("html").field("title", 50, 1, 10)), + response -> { + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight( + response, + i, + "title", + 0, + 1, + startsWith("This is a html escaping highlighting test for *&?") + ); + } + } + ); } public void testEscapeHtmlVector() throws Exception { @@ -1309,13 +1423,15 @@ public void testEscapeHtmlVector() throws Exception { } indexRandom(true, indexRequestBuilders); - SearchResponse search = prepareSearch().setQuery(matchQuery("title", "test")) - .highlighter(new HighlightBuilder().encoder("html").field("title", 30, 1, 10).highlighterType("plain")) - .get(); - - for (int i = 0; i < 5; i++) { - assertHighlight(search, i, "title", 0, 1, equalTo(" highlighting test for *&? elasticsearch")); - } + assertResponse( + prepareSearch().setQuery(matchQuery("title", "test")) + .highlighter(new HighlightBuilder().encoder("html").field("title", 30, 1, 10).highlighterType("plain")), + response -> { + for (int i = 0; i < 5; i++) { + assertHighlight(response, i, "title", 0, 1, equalTo(" highlighting test for *&? elasticsearch")); + } + } + ); } public void testMultiMapperVectorWithStore() throws Exception { @@ -1573,48 +1689,55 @@ public void testDisableFastVectorHighlighter() throws Exception { } indexRandom(true, indexRequestBuilders); - SearchResponse search = prepareSearch().setQuery(matchPhraseQuery("title", "test for the workaround")) - .highlighter(new HighlightBuilder().field("title", 50, 1, 10).highlighterType("fvh")) - .get(); + assertResponse( + prepareSearch().setQuery(matchPhraseQuery("title", "test for the workaround")) + .highlighter(new HighlightBuilder().field("title", 50, 1, 10).highlighterType("fvh")), + response -> { - for (int i = 0; i < indexRequestBuilders.length; i++) { - // Because of SOLR-3724 nothing is highlighted when FVH is used - assertNotHighlighted(search, i, "title"); - } + for (int i = 0; i < indexRequestBuilders.length; i++) { + // Because of SOLR-3724 nothing is highlighted when FVH is used + assertNotHighlighted(response, i, "title"); + } + } + ); // Using plain highlighter instead of FVH - search = prepareSearch().setQuery(matchPhraseQuery("title", "test for the workaround")) - .highlighter(new HighlightBuilder().field("title", 50, 1, 10).highlighterType("plain")) - .get(); - - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight( - search, - i, - "title", - 0, - 1, - equalTo("This is a test for the workaround for the fast vector highlighting SOLR-3724") - ); - } + assertResponse( + prepareSearch().setQuery(matchPhraseQuery("title", "test for the workaround")) + .highlighter(new HighlightBuilder().field("title", 50, 1, 10).highlighterType("plain")), + response -> { + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight( + response, + i, + "title", + 0, + 1, + equalTo("This is a test for the workaround for the fast vector highlighting SOLR-3724") + ); + } + } + ); // Using plain highlighter instead of FVH on the field level - search = prepareSearch().setQuery(matchPhraseQuery("title", "test for the workaround")) - .highlighter( - new HighlightBuilder().field(new HighlightBuilder.Field("title").highlighterType("plain")).highlighterType("plain") - ) - .get(); - - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight( - search, - i, - "title", - 0, - 1, - equalTo("This is a test for the workaround for the fast vector highlighting SOLR-3724") - ); - } + assertResponse( + prepareSearch().setQuery(matchPhraseQuery("title", "test for the workaround")) + .highlighter( + new HighlightBuilder().field(new HighlightBuilder.Field("title").highlighterType("plain")).highlighterType("plain") + ), + response -> { + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight( + response, + i, + "title", + 0, + 1, + equalTo("This is a test for the workaround for the fast vector highlighting SOLR-3724") + ); + } + } + ); } public void testFSHHighlightAllMvFragments() throws Exception { @@ -1631,18 +1754,20 @@ public void testFSHHighlightAllMvFragments() throws Exception { .get(); refresh(); - SearchResponse response = prepareSearch("test").setQuery(QueryBuilders.matchQuery("tags", "tag")) - .highlighter(new HighlightBuilder().field("tags", -1, 0).highlighterType("fvh")) - .get(); - - assertHighlight(response, 0, "tags", 0, equalTo("this is a really long tag i would like to highlight")); - assertHighlight( - response, - 0, - "tags", - 1, - 2, - equalTo("here is another one that is very long and has the tag token near the end") + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.matchQuery("tags", "tag")) + .highlighter(new HighlightBuilder().field("tags", -1, 0).highlighterType("fvh")), + response -> { + assertHighlight(response, 0, "tags", 0, equalTo("this is a really long tag i would like to highlight")); + assertHighlight( + response, + 0, + "tags", + 1, + 2, + equalTo("here is another one that is very long and has the tag token near the end") + ); + } ); } @@ -1703,40 +1828,44 @@ public void testPlainHighlightDifferentFragmenter() throws Exception { .get(); refresh(); - SearchResponse response = prepareSearch("test").setQuery(QueryBuilders.matchPhraseQuery("tags", "long tag")) - .highlighter( - new HighlightBuilder().field( - new HighlightBuilder.Field("tags").highlighterType("plain").fragmentSize(-1).numOfFragments(2).fragmenter("simple") - ) - ) - .get(); - - assertHighlight(response, 0, "tags", 0, equalTo("this is a really long tag i would like to highlight")); - assertHighlight( - response, - 0, - "tags", - 1, - 2, - equalTo("here is another one that is very long tag and has the tag token near the end") + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.matchPhraseQuery("tags", "long tag")) + .highlighter( + new HighlightBuilder().field( + new HighlightBuilder.Field("tags").highlighterType("plain").fragmentSize(-1).numOfFragments(2).fragmenter("simple") + ) + ), + response -> { + assertHighlight(response, 0, "tags", 0, equalTo("this is a really long tag i would like to highlight")); + assertHighlight( + response, + 0, + "tags", + 1, + 2, + equalTo("here is another one that is very long tag and has the tag token near the end") + ); + } ); - response = prepareSearch("test").setQuery(QueryBuilders.matchPhraseQuery("tags", "long tag")) - .highlighter( - new HighlightBuilder().field( - new Field("tags").highlighterType("plain").fragmentSize(-1).numOfFragments(2).fragmenter("span") - ) - ) - .get(); - - assertHighlight(response, 0, "tags", 0, equalTo("this is a really long tag i would like to highlight")); - assertHighlight( - response, - 0, - "tags", - 1, - 2, - equalTo("here is another one that is very long tag and has the tag token near the end") + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.matchPhraseQuery("tags", "long tag")) + .highlighter( + new HighlightBuilder().field( + new Field("tags").highlighterType("plain").fragmentSize(-1).numOfFragments(2).fragmenter("span") + ) + ), + response -> { + assertHighlight(response, 0, "tags", 0, equalTo("this is a really long tag i would like to highlight")); + assertHighlight( + response, + 0, + "tags", + 1, + 2, + equalTo("here is another one that is very long tag and has the tag token near the end") + ); + } ); assertFailures( @@ -1758,14 +1887,18 @@ public void testPlainHighlighterMultipleFields() { indexDoc("test", "1", "field1", "The quick brown fox", "field2", "The slow brown fox"); refresh(); - SearchResponse response = prepareSearch("test").setQuery(QueryBuilders.matchQuery("field1", "fox")) - .highlighter( - new HighlightBuilder().field(new HighlightBuilder.Field("field1").preTags("<1>").postTags("").requireFieldMatch(true)) - .field(new HighlightBuilder.Field("field2").preTags("<2>").postTags("").requireFieldMatch(false)) - ) - .get(); - assertHighlight(response, 0, "field1", 0, 1, equalTo("The quick brown <1>fox")); - assertHighlight(response, 0, "field2", 0, 1, equalTo("The slow brown <2>fox")); + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.matchQuery("field1", "fox")) + .highlighter( + new HighlightBuilder().field( + new HighlightBuilder.Field("field1").preTags("<1>").postTags("").requireFieldMatch(true) + ).field(new HighlightBuilder.Field("field2").preTags("<2>").postTags("").requireFieldMatch(false)) + ), + response -> { + assertHighlight(response, 0, "field1", 0, 1, equalTo("The quick brown <1>fox")); + assertHighlight(response, 0, "field2", 0, 1, equalTo("The slow brown <2>fox")); + } + ); } public void testFastVectorHighlighterMultipleFields() { @@ -1782,14 +1915,18 @@ public void testFastVectorHighlighterMultipleFields() { indexDoc("test", "1", "field1", "The quick brown fox", "field2", "The slow brown fox"); refresh(); - SearchResponse response = prepareSearch("test").setQuery(QueryBuilders.matchQuery("field1", "fox")) - .highlighter( - new HighlightBuilder().field(new HighlightBuilder.Field("field1").preTags("<1>").postTags("").requireFieldMatch(true)) - .field(new HighlightBuilder.Field("field2").preTags("<2>").postTags("").requireFieldMatch(false)) - ) - .get(); - assertHighlight(response, 0, "field1", 0, 1, equalTo("The quick brown <1>fox")); - assertHighlight(response, 0, "field2", 0, 1, equalTo("The slow brown <2>fox")); + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.matchQuery("field1", "fox")) + .highlighter( + new HighlightBuilder().field( + new HighlightBuilder.Field("field1").preTags("<1>").postTags("").requireFieldMatch(true) + ).field(new HighlightBuilder.Field("field2").preTags("<2>").postTags("").requireFieldMatch(false)) + ), + response -> { + assertHighlight(response, 0, "field1", 0, 1, equalTo("The quick brown <1>fox")); + assertHighlight(response, 0, "field2", 0, 1, equalTo("The slow brown <2>fox")); + } + ); } public void testMissingStoredField() throws Exception { @@ -1799,14 +1936,15 @@ public void testMissingStoredField() throws Exception { refresh(); // This query used to fail when the field to highlight was absent - SearchResponse response = prepareSearch("test").setQuery(QueryBuilders.matchQuery("field", "highlight")) - .highlighter( - new HighlightBuilder().field( - new HighlightBuilder.Field("highlight_field").fragmentSize(-1).numOfFragments(1).fragmenter("simple") - ) - ) - .get(); - assertThat(response.getHits().getHits()[0].getHighlightFields().isEmpty(), equalTo(true)); + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.matchQuery("field", "highlight")) + .highlighter( + new HighlightBuilder().field( + new HighlightBuilder.Field("highlight_field").fragmentSize(-1).numOfFragments(1).fragmenter("simple") + ) + ), + response -> assertThat(response.getHits().getHits()[0].getHighlightFields().isEmpty(), equalTo(true)) + ); } // Issue #3211 @@ -1891,22 +2029,19 @@ public void testHighlightUsesHighlightQuery() throws IOException { .highlighter(highlightBuilder); Matcher searchQueryMatcher = equalTo("Testing the highlight query feature"); - SearchResponse response = search.get(); - assertHighlight(response, 0, "text", 0, searchQueryMatcher); + assertResponse(search, response -> assertHighlight(response, 0, "text", 0, searchQueryMatcher)); field = new HighlightBuilder.Field("text"); Matcher hlQueryMatcher = equalTo("Testing the highlight query feature"); field.highlightQuery(matchQuery("text", "query")); highlightBuilder = new HighlightBuilder().field(field); search = prepareSearch("test").setQuery(QueryBuilders.matchQuery("text", "testing")).highlighter(highlightBuilder); - response = search.get(); - assertHighlight(response, 0, "text", 0, hlQueryMatcher); + assertResponse(search, response -> assertHighlight(response, 0, "text", 0, hlQueryMatcher)); // Make sure the highlightQuery is taken into account when it is set on the highlight context instead of the field highlightBuilder.highlightQuery(matchQuery("text", "query")); field.highlighterType(type).highlightQuery(null); - response = search.get(); - assertHighlight(response, 0, "text", 0, hlQueryMatcher); + assertResponse(search, response -> assertHighlight(response, 0, "text", 0, hlQueryMatcher)); } } @@ -2212,19 +2347,21 @@ public void testHighlightNoMatchSizeNumberOfFragments() { // if there's a match we only return the values with matches (whole value as number_of_fragments == 0) MatchQueryBuilder queryBuilder = QueryBuilders.matchQuery("text", "third fifth"); field.highlighterType("plain"); - SearchResponse response = prepareSearch("test").setQuery(queryBuilder).highlighter(new HighlightBuilder().field(field)).get(); - assertHighlight(response, 0, "text", 0, 2, equalTo("This is the third sentence. This is the fourth sentence.")); - assertHighlight(response, 0, "text", 1, 2, equalTo("This is the fifth sentence")); + assertResponse(prepareSearch("test").setQuery(queryBuilder).highlighter(new HighlightBuilder().field(field)), response -> { + assertHighlight(response, 0, "text", 0, 2, equalTo("This is the third sentence. This is the fourth sentence.")); + assertHighlight(response, 0, "text", 1, 2, equalTo("This is the fifth sentence")); + }); field.highlighterType("fvh"); - response = prepareSearch("test").setQuery(queryBuilder).highlighter(new HighlightBuilder().field(field)).get(); - assertHighlight(response, 0, "text", 0, 2, equalTo("This is the third sentence. This is the fourth sentence.")); - assertHighlight(response, 0, "text", 1, 2, equalTo("This is the fifth sentence")); - + assertResponse(prepareSearch("test").setQuery(queryBuilder).highlighter(new HighlightBuilder().field(field)), response -> { + assertHighlight(response, 0, "text", 0, 2, equalTo("This is the third sentence. This is the fourth sentence.")); + assertHighlight(response, 0, "text", 1, 2, equalTo("This is the fifth sentence")); + }); field.highlighterType("unified"); - response = prepareSearch("test").setQuery(queryBuilder).highlighter(new HighlightBuilder().field(field)).get(); - assertHighlight(response, 0, "text", 0, 2, equalTo("This is the third sentence. This is the fourth sentence.")); - assertHighlight(response, 0, "text", 1, 2, equalTo("This is the fifth sentence")); + assertResponse(prepareSearch("test").setQuery(queryBuilder).highlighter(new HighlightBuilder().field(field)), response -> { + assertHighlight(response, 0, "text", 0, 2, equalTo("This is the third sentence. This is the fourth sentence.")); + assertHighlight(response, 0, "text", 1, 2, equalTo("This is the fifth sentence")); + }); } public void testPostingsHighlighter() throws Exception { @@ -2329,22 +2466,21 @@ public void testPostingsHighlighterNumberOfFragments() throws Exception { SearchSourceBuilder source = searchSource().query(termQuery("field1", "fox")) .highlighter(highlight().field(new Field("field1").numOfFragments(5).preTags("").postTags(""))); - SearchResponse searchResponse = client().search(new SearchRequest("test").source(source)).actionGet(); - - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - assertHighlight( - searchResponse, - 0, - "field1", - 0, - 2, - equalTo( - "The quick brown fox jumps over the lazy dog." - + " The lazy red fox jumps over the quick dog." - ) - ); - assertHighlight(searchResponse, 0, "field1", 1, 2, equalTo("The quick brown dog jumps over the lazy fox.")); - + assertResponse(client().search(new SearchRequest("test").source(source)), response -> { + assertThat(response.getHits().getHits().length, equalTo(1)); + assertHighlight( + response, + 0, + "field1", + 0, + 2, + equalTo( + "The quick brown fox jumps over the lazy dog." + + " The lazy red fox jumps over the quick dog." + ) + ); + assertHighlight(response, 0, "field1", 1, 2, equalTo("The quick brown dog jumps over the lazy fox.")); + }); client().prepareIndex("test") .setId("2") .setSource( @@ -2360,39 +2496,40 @@ public void testPostingsHighlighterNumberOfFragments() throws Exception { source = searchSource().query(termQuery("field1", "fox")) .highlighter(highlight().field(new Field("field1").numOfFragments(0).preTags("").postTags(""))); - searchResponse = client().search(new SearchRequest("test").source(source)).actionGet(); - assertHitCount(searchResponse, 2L); - - for (SearchHit searchHit : searchResponse.getHits()) { - if ("1".equals(searchHit.getId())) { - assertHighlight( - searchHit, - "field1", - 0, - 1, - equalTo( - "The quick brown fox jumps over the lazy dog. " - + "The lazy red fox jumps over the quick dog. " - + "The quick brown dog jumps over the lazy fox." - ) - ); - } else if ("2".equals(searchHit.getId())) { - assertHighlight( - searchHit, - "field1", - 0, - 3, - equalTo("The quick brown fox jumps over the lazy dog. Second sentence not finished") - ); - assertHighlight(searchHit, "field1", 1, 3, equalTo("The lazy red fox jumps over the quick dog.")); - assertHighlight(searchHit, "field1", 2, 3, equalTo("The quick brown dog jumps over the lazy fox.")); - } else { - fail("Only hits with id 1 and 2 are returned"); + assertResponse(client().search(new SearchRequest("test").source(source)), response -> { + assertHitCount(response, 2L); + + for (SearchHit searchHit : response.getHits()) { + if ("1".equals(searchHit.getId())) { + assertHighlight( + searchHit, + "field1", + 0, + 1, + equalTo( + "The quick brown fox jumps over the lazy dog. " + + "The lazy red fox jumps over the quick dog. " + + "The quick brown dog jumps over the lazy fox." + ) + ); + } else if ("2".equals(searchHit.getId())) { + assertHighlight( + searchHit, + "field1", + 0, + 3, + equalTo("The quick brown fox jumps over the lazy dog. Second sentence not finished") + ); + assertHighlight(searchHit, "field1", 1, 3, equalTo("The lazy red fox jumps over the quick dog.")); + assertHighlight(searchHit, "field1", 2, 3, equalTo("The quick brown dog jumps over the lazy fox.")); + } else { + fail("Only hits with id 1 and 2 are returned"); + } } - } + }); } - public void testMultiMatchQueryHighlight() throws IOException { + public void testMultiMatchQueryHighlight() throws Exception { XContentBuilder mapping = XContentFactory.jsonBuilder() .startObject() .startObject("_doc") @@ -2429,22 +2566,23 @@ public void testMultiMatchQueryHighlight() throws IOException { .field(new Field("field1").requireFieldMatch(true).preTags("").postTags("")) ); logger.info("Running multi-match type: [{}] highlight with type: [{}]", matchQueryType, highlighterType); - SearchResponse searchResponse = client().search(new SearchRequest("test").source(source)).actionGet(); - assertHitCount(searchResponse, 1L); - assertHighlight( - searchResponse, - 0, - "field1", - 0, - anyOf( - equalTo("The quick brown fox jumps over"), - equalTo("The quick brown fox jumps over") - ) - ); + assertResponse(client().search(new SearchRequest("test").source(source)), response -> { + assertHitCount(response, 1L); + assertHighlight( + response, + 0, + "field1", + 0, + anyOf( + equalTo("The quick brown fox jumps over"), + equalTo("The quick brown fox jumps over") + ) + ); + }); } } - public void testCombinedFieldsQueryHighlight() throws IOException { + public void testCombinedFieldsQueryHighlight() throws Exception { XContentBuilder mapping = XContentFactory.jsonBuilder() .startObject() .startObject("_doc") @@ -2478,15 +2616,16 @@ public void testCombinedFieldsQueryHighlight() throws IOException { .field(new Field("field1").requireFieldMatch(true).preTags("").postTags("")) ); - SearchResponse searchResponse = client().search(new SearchRequest("test").source(source)).actionGet(); - assertHitCount(searchResponse, 1L); - assertHighlight( - searchResponse, - 0, - "field1", - 0, - equalTo("The quick brown fox jumps over") - ); + assertResponse(client().search(new SearchRequest("test").source(source)), response -> { + assertHitCount(response, 1L); + assertHighlight( + response, + 0, + "field1", + 0, + equalTo("The quick brown fox jumps over") + ); + }); } } @@ -2512,31 +2651,31 @@ public void testPostingsHighlighterOrderByScore() throws Exception { SearchSourceBuilder source = searchSource().query(termQuery("field1", "sentence")) .highlighter(highlight().field("field1").order("score")); - SearchResponse searchResponse = client().search(new SearchRequest("test").source(source)).actionGet(); - - Map highlightFieldMap = searchResponse.getHits().getAt(0).getHighlightFields(); - assertThat(highlightFieldMap.size(), equalTo(1)); - HighlightField field1 = highlightFieldMap.get("field1"); - assertThat(field1.fragments().length, equalTo(4)); - assertThat( - field1.fragments()[0].string(), - equalTo("This sentence contains three sentence occurrences (sentence).") - ); - assertThat( - field1.fragments()[1].string(), - equalTo( - "This sentence contains one match, not that short. " - + "This sentence contains two sentence matches." - ) - ); - assertThat( - field1.fragments()[2].string(), - equalTo("This is the second value's first sentence. This one contains no matches.") - ); - assertThat( - field1.fragments()[3].string(), - equalTo("One sentence match here and scored lower since the text is quite long, not that appealing.") - ); + assertResponse(client().search(new SearchRequest("test").source(source)), response -> { + Map highlightFieldMap = response.getHits().getAt(0).getHighlightFields(); + assertThat(highlightFieldMap.size(), equalTo(1)); + HighlightField field1 = highlightFieldMap.get("field1"); + assertThat(field1.fragments().length, equalTo(4)); + assertThat( + field1.fragments()[0].string(), + equalTo("This sentence contains three sentence occurrences (sentence).") + ); + assertThat( + field1.fragments()[1].string(), + equalTo( + "This sentence contains one match, not that short. " + + "This sentence contains two sentence matches." + ) + ); + assertThat( + field1.fragments()[2].string(), + equalTo("This is the second value's first sentence. This one contains no matches.") + ); + assertThat( + field1.fragments()[3].string(), + equalTo("One sentence match here and scored lower since the text is quite long, not that appealing.") + ); + }); } public void testPostingsHighlighterEscapeHtml() throws Exception { @@ -2550,20 +2689,21 @@ public void testPostingsHighlighterEscapeHtml() throws Exception { } indexRandom(true, indexRequestBuilders); - SearchResponse searchResponse = prepareSearch().setQuery(matchQuery("title", "test")) - .highlighter(new HighlightBuilder().field("title").encoder("html")) - .get(); - - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight( - searchResponse, - i, - "title", - 0, - 1, - equalTo("This is a html escaping highlighting test for *&? elasticsearch") - ); - } + assertResponse( + prepareSearch().setQuery(matchQuery("title", "test")).highlighter(new HighlightBuilder().field("title").encoder("html")), + response -> { + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight( + response, + i, + "title", + 0, + 1, + equalTo("This is a html escaping highlighting test for *&? elasticsearch") + ); + } + } + ); } public void testPostingsHighlighterMultiMapperWithStore() throws Exception { @@ -2596,31 +2736,35 @@ public void testPostingsHighlighterMultiMapperWithStore() throws Exception { refresh(); // simple search on body with standard analyzer with a simple field query - SearchResponse searchResponse = prepareSearch() - // lets make sure we analyze the query and we highlight the resulting terms - .setQuery(matchQuery("title", "This is a Test")) - .highlighter(new HighlightBuilder().field("title")) - .get(); - - assertHitCount(searchResponse, 1L); - SearchHit hit = searchResponse.getHits().getAt(0); - // stopwords are not highlighted since not indexed - assertHighlight(hit, "title", 0, 1, equalTo("this is a test . Second sentence.")); - + assertResponse( + prepareSearch() + // lets make sure we analyze the query and we highlight the resulting terms + .setQuery(matchQuery("title", "This is a Test")) + .highlighter(new HighlightBuilder().field("title")), + response -> { + + assertHitCount(response, 1L); + SearchHit hit = response.getHits().getAt(0); + // stopwords are not highlighted since not indexed + assertHighlight(hit, "title", 0, 1, equalTo("this is a test . Second sentence.")); + } + ); // search on title.key and highlight on title - searchResponse = prepareSearch().setQuery(matchQuery("title.key", "this is a test")) - .highlighter(new HighlightBuilder().field("title.key")) - .get(); - assertHitCount(searchResponse, 1L); + assertResponse( + prepareSearch().setQuery(matchQuery("title.key", "this is a test")).highlighter(new HighlightBuilder().field("title.key")), + response -> { + assertHitCount(response, 1L); - // stopwords are now highlighted since we used only whitespace analyzer here - assertHighlight( - searchResponse, - 0, - "title.key", - 0, - 1, - equalTo("this is a test . Second sentence.") + // stopwords are now highlighted since we used only whitespace analyzer here + assertHighlight( + response, + 0, + "title.key", + 0, + 1, + equalTo("this is a test . Second sentence.") + ); + } ); } @@ -2830,17 +2974,11 @@ public void testPostingsHighlighterWildcardQuery() throws Exception { ); source = searchSource().query(wildcardQuery("field2", "qu*k")).highlighter(highlight().field("field2")); - SearchResponse searchResponse = prepareSearch("test").setSource(source).get(); - assertHitCount(searchResponse, 1L); + assertResponse(prepareSearch("test").setSource(source), response -> { + assertHitCount(response, 1L); - assertHighlight( - searchResponse, - 0, - "field2", - 0, - 1, - equalTo("The quick brown fox jumps over the lazy dog! Second sentence.") - ); + assertHighlight(response, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog! Second sentence.")); + }); } public void testPostingsHighlighterTermRangeQuery() throws Exception { @@ -2987,13 +3125,14 @@ public void testPostingsHighlighterManyDocs() throws Exception { SearchRequestBuilder searchRequestBuilder = prepareSearch().setSize(COUNT) .setQuery(termQuery("field1", "test")) .highlighter(new HighlightBuilder().field("field1")); - SearchResponse searchResponse = searchRequestBuilder.get(); - assertHitCount(searchResponse, COUNT); - assertThat(searchResponse.getHits().getHits().length, equalTo(COUNT)); - for (SearchHit hit : searchResponse.getHits()) { - String prefix = prefixes.get(hit.getId()); - assertHighlight(hit, "field1", 0, 1, equalTo("Sentence " + prefix + " test. Sentence two.")); - } + assertResponse(searchRequestBuilder, response -> { + assertHitCount(response, COUNT); + assertThat(response.getHits().getHits().length, equalTo(COUNT)); + for (SearchHit hit : response.getHits()) { + String prefix = prefixes.get(hit.getId()); + assertHighlight(hit, "field1", 0, 1, equalTo("Sentence " + prefix + " test. Sentence two.")); + } + }); } public void testDoesNotHighlightTypeName() throws Exception { @@ -3186,12 +3325,15 @@ public void testGeoFieldHighlightingWithDifferentHighlighters() throws IOExcepti .setCorners(61.10078883158897, -170.15625, -64.92354174306496, 118.47656249999999) ) .should(QueryBuilders.termQuery("text", "failure")); - SearchResponse search = prepareSearch().setSource( - new SearchSourceBuilder().query(query).highlighter(new HighlightBuilder().field("*").highlighterType(highlighterType)) - ).get(); - assertNoFailures(search); - assertThat(search.getHits().getTotalHits().value, equalTo(1L)); - assertThat(search.getHits().getAt(0).getHighlightFields().get("text").fragments().length, equalTo(1)); + assertNoFailuresAndResponse( + prepareSearch().setSource( + new SearchSourceBuilder().query(query).highlighter(new HighlightBuilder().field("*").highlighterType(highlighterType)) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(0).getHighlightFields().get("text").fragments().length, equalTo(1)); + } + ); } public void testGeoFieldHighlightingWhenQueryGetsRewritten() throws IOException { @@ -3258,15 +3400,16 @@ public void testKeywordFieldHighlighting() throws IOException { .setSource(jsonBuilder().startObject().field("keyword_field", "some text").endObject()) .get(); refresh(); - SearchResponse search = prepareSearch().setSource( - new SearchSourceBuilder().query(QueryBuilders.matchQuery("keyword_field", "some text")) - .highlighter(new HighlightBuilder().field("*")) - ).get(); - assertNoFailures(search); - assertThat(search.getHits().getTotalHits().value, equalTo(1L)); - assertThat( - search.getHits().getAt(0).getHighlightFields().get("keyword_field").getFragments()[0].string(), - equalTo("some text") + assertNoFailuresAndResponse( + prepareSearch().setSource( + new SearchSourceBuilder().query(QueryBuilders.matchQuery("keyword_field", "some text")) + .highlighter(new HighlightBuilder().field("*")) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + HighlightField highlightField = response.getHits().getAt(0).getHighlightFields().get("keyword_field"); + assertThat(highlightField.fragments()[0].string(), equalTo("some text")); + } ); } @@ -3288,14 +3431,15 @@ public void testCopyToFields() throws Exception { .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); - SearchResponse response = prepareSearch().setQuery(matchQuery("foo_copy", "brown")) - .highlighter(new HighlightBuilder().field(new Field("foo_copy"))) - .get(); - - assertHitCount(response, 1); - HighlightField field = response.getHits().getAt(0).getHighlightFields().get("foo_copy"); - assertThat(field.getFragments().length, equalTo(1)); - assertThat(field.getFragments()[0].string(), equalTo("how now brown cow")); + assertResponse( + prepareSearch().setQuery(matchQuery("foo_copy", "brown")).highlighter(new HighlightBuilder().field(new Field("foo_copy"))), + response -> { + assertHitCount(response, 1); + HighlightField field = response.getHits().getAt(0).getHighlightFields().get("foo_copy"); + assertThat(field.fragments().length, equalTo(1)); + assertThat(field.fragments()[0].string(), equalTo("how now brown cow")); + } + ); } public void testACopyFieldWithNestedQuery() throws Exception { @@ -3338,14 +3482,17 @@ public void testACopyFieldWithNestedQuery() throws Exception { .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); - SearchResponse searchResponse = prepareSearch().setQuery(nestedQuery("foo", matchQuery("foo.text", "brown cow"), ScoreMode.None)) - .highlighter(new HighlightBuilder().field(new Field("foo_text").highlighterType("fvh")).requireFieldMatch(false)) - .get(); - assertHitCount(searchResponse, 1); - HighlightField field = searchResponse.getHits().getAt(0).getHighlightFields().get("foo_text"); - assertThat(field.getFragments().length, equalTo(2)); - assertThat(field.getFragments()[0].string(), equalTo("brown")); - assertThat(field.getFragments()[1].string(), equalTo("cow")); + assertResponse( + prepareSearch().setQuery(nestedQuery("foo", matchQuery("foo.text", "brown cow"), ScoreMode.None)) + .highlighter(new HighlightBuilder().field(new Field("foo_text").highlighterType("fvh")).requireFieldMatch(false)), + response -> { + assertHitCount(response, 1); + HighlightField field = response.getHits().getAt(0).getHighlightFields().get("foo_text"); + assertThat(field.fragments().length, equalTo(2)); + assertThat(field.fragments()[0].string(), equalTo("brown")); + assertThat(field.fragments()[1].string(), equalTo("cow")); + } + ); } public void testFunctionScoreQueryHighlight() throws Exception { @@ -3355,13 +3502,16 @@ public void testFunctionScoreQueryHighlight() throws Exception { .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); - SearchResponse searchResponse = prepareSearch().setQuery(new FunctionScoreQueryBuilder(QueryBuilders.prefixQuery("text", "bro"))) - .highlighter(new HighlightBuilder().field(new Field("text"))) - .get(); - assertHitCount(searchResponse, 1); - HighlightField field = searchResponse.getHits().getAt(0).getHighlightFields().get("text"); - assertThat(field.getFragments().length, equalTo(1)); - assertThat(field.getFragments()[0].string(), equalTo("brown")); + assertResponse( + prepareSearch().setQuery(new FunctionScoreQueryBuilder(QueryBuilders.prefixQuery("text", "bro"))) + .highlighter(new HighlightBuilder().field(new Field("text"))), + response -> { + assertHitCount(response, 1); + HighlightField field = response.getHits().getAt(0).getHighlightFields().get("text"); + assertThat(field.fragments().length, equalTo(1)); + assertThat(field.fragments()[0].string(), equalTo("brown")); + } + ); } public void testFiltersFunctionScoreQueryHighlight() throws Exception { @@ -3375,16 +3525,20 @@ public void testFiltersFunctionScoreQueryHighlight() throws Exception { new RandomScoreFunctionBuilder() ); - SearchResponse searchResponse = prepareSearch().setQuery( - new FunctionScoreQueryBuilder( - QueryBuilders.prefixQuery("text", "bro"), - new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { filterBuilder } - ) - ).highlighter(new HighlightBuilder().field(new Field("text"))).get(); - assertHitCount(searchResponse, 1); - HighlightField field = searchResponse.getHits().getAt(0).getHighlightFields().get("text"); - assertThat(field.getFragments().length, equalTo(1)); - assertThat(field.getFragments()[0].string(), equalTo("brown")); + assertResponse( + prepareSearch().setQuery( + new FunctionScoreQueryBuilder( + QueryBuilders.prefixQuery("text", "bro"), + new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { filterBuilder } + ) + ).highlighter(new HighlightBuilder().field(new Field("text"))), + response -> { + assertHitCount(response, 1); + HighlightField field = response.getHits().getAt(0).getHighlightFields().get("text"); + assertThat(field.fragments().length, equalTo(1)); + assertThat(field.fragments()[0].string(), equalTo("brown")); + } + ); } public void testHighlightQueryRewriteDatesWithNow() throws Exception { @@ -3403,19 +3557,20 @@ public void testHighlightQueryRewriteDatesWithNow() throws Exception { ); ensureSearchable("index-1"); for (int i = 0; i < 5; i++) { - final SearchResponse r1 = prepareSearch("index-1").addSort("d", SortOrder.DESC) - .setTrackScores(true) - .highlighter(highlight().field("field").preTags("").postTags("")) - .setQuery( - QueryBuilders.boolQuery() - .must(QueryBuilders.rangeQuery("d").gte("now-12h").lte("now").includeLower(true).includeUpper(true).boost(1.0f)) - .should(QueryBuilders.termQuery("field", "hello")) - ) - .get(); - - assertNoFailures(r1); - assertThat(r1.getHits().getTotalHits().value, equalTo(1L)); - assertHighlight(r1, 0, "field", 0, 1, equalTo("hello world")); + assertNoFailuresAndResponse( + prepareSearch("index-1").addSort("d", SortOrder.DESC) + .setTrackScores(true) + .highlighter(highlight().field("field").preTags("").postTags("")) + .setQuery( + QueryBuilders.boolQuery() + .must(QueryBuilders.rangeQuery("d").gte("now-12h").lte("now").includeLower(true).includeUpper(true).boost(1.0f)) + .should(QueryBuilders.termQuery("field", "hello")) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertHighlight(response, 0, "field", 0, 1, equalTo("hello world")); + } + ); } } @@ -3460,51 +3615,63 @@ public void testWithNestedQuery() throws Exception { .get(); for (String type : new String[] { "unified", "plain" }) { - SearchResponse searchResponse = prepareSearch().setQuery( - nestedQuery("foo", matchQuery("foo.text", "brown cow"), ScoreMode.None) - ).highlighter(new HighlightBuilder().field(new Field("foo.text").highlighterType(type))).get(); - assertHitCount(searchResponse, 1); - HighlightField field = searchResponse.getHits().getAt(0).getHighlightFields().get("foo.text"); - assertThat(field.getFragments().length, equalTo(2)); - assertThat(field.getFragments()[0].string(), equalTo("brown shoes")); - assertThat(field.getFragments()[1].string(), equalTo("cow")); - - searchResponse = prepareSearch().setQuery(nestedQuery("foo", prefixQuery("foo.text", "bro"), ScoreMode.None)) - .highlighter(new HighlightBuilder().field(new Field("foo.text").highlighterType(type))) - .get(); - assertHitCount(searchResponse, 1); - field = searchResponse.getHits().getAt(0).getHighlightFields().get("foo.text"); - assertThat(field.getFragments().length, equalTo(1)); - assertThat(field.getFragments()[0].string(), equalTo("brown shoes")); - - searchResponse = prepareSearch().setQuery(nestedQuery("foo", matchPhraseQuery("foo.text", "brown shoes"), ScoreMode.None)) - .highlighter(new HighlightBuilder().field(new Field("foo.text").highlighterType(type))) - .get(); - assertHitCount(searchResponse, 1); - field = searchResponse.getHits().getAt(0).getHighlightFields().get("foo.text"); - assertThat(field.getFragments().length, equalTo(1)); - assertThat(field.getFragments()[0].string(), equalTo("brown shoes")); - - searchResponse = prepareSearch().setQuery(nestedQuery("foo", matchPhrasePrefixQuery("foo.text", "bro"), ScoreMode.None)) - .highlighter(new HighlightBuilder().field(new Field("foo.text").highlighterType(type))) - .get(); - assertHitCount(searchResponse, 1); - field = searchResponse.getHits().getAt(0).getHighlightFields().get("foo.text"); - assertThat(field.getFragments().length, equalTo(1)); - assertThat(field.getFragments()[0].string(), equalTo("brown shoes")); + assertResponse( + prepareSearch().setQuery(nestedQuery("foo", matchQuery("foo.text", "brown cow"), ScoreMode.None)) + .highlighter(new HighlightBuilder().field(new Field("foo.text").highlighterType(type))), + response -> { + assertHitCount(response, 1); + HighlightField field = response.getHits().getAt(0).getHighlightFields().get("foo.text"); + assertThat(field.fragments().length, equalTo(2)); + assertThat(field.fragments()[0].string(), equalTo("brown shoes")); + assertThat(field.fragments()[1].string(), equalTo("cow")); + } + ); + assertResponse( + prepareSearch().setQuery(nestedQuery("foo", prefixQuery("foo.text", "bro"), ScoreMode.None)) + .highlighter(new HighlightBuilder().field(new Field("foo.text").highlighterType(type))), + response -> { + assertHitCount(response, 1); + HighlightField field = response.getHits().getAt(0).getHighlightFields().get("foo.text"); + assertThat(field.fragments().length, equalTo(1)); + assertThat(field.fragments()[0].string(), equalTo("brown shoes")); + } + ); + assertResponse( + prepareSearch().setQuery(nestedQuery("foo", matchPhraseQuery("foo.text", "brown shoes"), ScoreMode.None)) + .highlighter(new HighlightBuilder().field(new Field("foo.text").highlighterType(type))), + response -> { + assertHitCount(response, 1); + HighlightField field = response.getHits().getAt(0).getHighlightFields().get("foo.text"); + assertThat(field.fragments().length, equalTo(1)); + assertThat(field.fragments()[0].string(), equalTo("brown shoes")); + } + ); + assertResponse( + prepareSearch().setQuery(nestedQuery("foo", matchPhrasePrefixQuery("foo.text", "bro"), ScoreMode.None)) + .highlighter(new HighlightBuilder().field(new Field("foo.text").highlighterType(type))), + response -> { + assertHitCount(response, 1); + HighlightField field = response.getHits().getAt(0).getHighlightFields().get("foo.text"); + assertThat(field.fragments().length, equalTo(1)); + assertThat(field.fragments()[0].string(), equalTo("brown shoes")); + } + ); } // For unified and fvh highlighters we just check that the nested query is correctly extracted // but we highlight the root text field since nested documents cannot be highlighted with postings nor term vectors // directly. for (String type : ALL_TYPES) { - SearchResponse searchResponse = prepareSearch().setQuery(nestedQuery("foo", prefixQuery("foo.text", "bro"), ScoreMode.None)) - .highlighter(new HighlightBuilder().field(new Field("text").highlighterType(type).requireFieldMatch(false))) - .get(); - assertHitCount(searchResponse, 1); - HighlightField field = searchResponse.getHits().getAt(0).getHighlightFields().get("text"); - assertThat(field.getFragments().length, equalTo(1)); - assertThat(field.getFragments()[0].string(), equalTo("brown")); + assertResponse( + prepareSearch().setQuery(nestedQuery("foo", prefixQuery("foo.text", "bro"), ScoreMode.None)) + .highlighter(new HighlightBuilder().field(new Field("text").highlighterType(type).requireFieldMatch(false))), + response -> { + assertHitCount(response, 1); + HighlightField field = response.getHits().getAt(0).getHighlightFields().get("text"); + assertThat(field.fragments().length, equalTo(1)); + assertThat(field.fragments()[0].string(), equalTo("brown")); + } + ); } } @@ -3521,13 +3688,16 @@ public void testWithNormalizer() throws Exception { .get(); for (String highlighterType : new String[] { "unified", "plain" }) { - SearchResponse searchResponse = prepareSearch().setQuery(matchQuery("keyword", "hello world")) - .highlighter(new HighlightBuilder().field(new Field("keyword").highlighterType(highlighterType))) - .get(); - assertHitCount(searchResponse, 1); - HighlightField field = searchResponse.getHits().getAt(0).getHighlightFields().get("keyword"); - assertThat(field.getFragments().length, equalTo(1)); - assertThat(field.getFragments()[0].string(), equalTo("hello world")); + assertResponse( + prepareSearch().setQuery(matchQuery("keyword", "hello world")) + .highlighter(new HighlightBuilder().field(new Field("keyword").highlighterType(highlighterType))), + response -> { + assertHitCount(response, 1); + HighlightField field = response.getHits().getAt(0).getHighlightFields().get("keyword"); + assertThat(field.fragments().length, equalTo(1)); + assertThat(field.fragments()[0].string(), equalTo("hello world")); + } + ); } } @@ -3542,11 +3712,14 @@ public void testDisableHighlightIdField() throws Exception { .get(); for (String highlighterType : new String[] { "plain", "unified" }) { - SearchResponse searchResponse = prepareSearch().setQuery( - matchQuery("_id", "d33f85bf1e51e84d9ab38948db9f3a068e1fe5294f1d8603914ac8c7bcc39ca1") - ).highlighter(new HighlightBuilder().field(new Field("*").highlighterType(highlighterType).requireFieldMatch(false))).get(); - assertHitCount(searchResponse, 1); - assertNull(searchResponse.getHits().getAt(0).getHighlightFields().get("_id")); + assertResponse( + prepareSearch().setQuery(matchQuery("_id", "d33f85bf1e51e84d9ab38948db9f3a068e1fe5294f1d8603914ac8c7bcc39ca1")) + .highlighter(new HighlightBuilder().field(new Field("*").highlighterType(highlighterType).requireFieldMatch(false))), + response -> { + assertHitCount(response, 1); + assertNull(response.getHits().getAt(0).getHighlightFields().get("_id")); + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java index 474d4ebc12843..3e035092a2551 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java @@ -193,11 +193,6 @@ protected boolean addMockHttpTransport() { return false; // enable http } - @Override - protected boolean ignoreExternalCluster() { - return true; - } - public void testFieldAlias() { FieldCapabilitiesResponse response = client().prepareFieldCaps().setFields("distance", "route_length_miles").get(); @@ -669,7 +664,7 @@ public void testCancel() throws Exception { ) ); BlockingOnRewriteQueryBuilder.blockOnRewrite(); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); Request restRequest = new Request("POST", "/_field_caps?fields=*"); restRequest.setEntity(new StringEntity(""" { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fields/SearchFieldsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fields/SearchFieldsIT.java index e3c9558eba907..7e88df463c13d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fields/SearchFieldsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fields/SearchFieldsIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -61,8 +60,10 @@ import static org.elasticsearch.common.util.set.Sets.newHashSet; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertCheckedResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; @@ -189,70 +190,70 @@ public void testStoredFields() throws Exception { indicesAdmin().prepareRefresh().get(); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()).addStoredField("field1").get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getFields().size(), equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field1").getValue().toString(), equalTo("value1")); - + assertResponse(prepareSearch().setQuery(matchAllQuery()).addStoredField("field1"), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().get("field1").getValue().toString(), equalTo("value1")); + }); // field2 is not stored, check that it is not extracted from source. - searchResponse = prepareSearch().setQuery(matchAllQuery()).addStoredField("field2").get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getFields().size(), equalTo(0)); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field2"), nullValue()); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).addStoredField("field3").get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getFields().size(), equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).addStoredField("*3").get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getFields().size(), equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addStoredField("*3") - .addStoredField("field1") - .addStoredField("field2") - .get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getFields().size(), equalTo(2)); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field1").getValue().toString(), equalTo("value1")); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).addStoredField("field*").get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getFields().size(), equalTo(2)); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field1").getValue().toString(), equalTo("value1")); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).addStoredField("f*3").get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getFields().size(), equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).addStoredField("*").get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getSourceAsMap(), nullValue()); - assertThat(searchResponse.getHits().getAt(0).getFields().size(), equalTo(2)); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field1").getValue().toString(), equalTo("value1")); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).addStoredField("*").addStoredField("_source").get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getSourceAsMap(), notNullValue()); - assertThat(searchResponse.getHits().getAt(0).getFields().size(), equalTo(2)); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field1").getValue().toString(), equalTo("value1")); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); + assertResponse(prepareSearch().setQuery(matchAllQuery()).addStoredField("field2"), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(0)); + assertThat(response.getHits().getAt(0).getFields().get("field2"), nullValue()); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).addStoredField("field3"), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).addStoredField("*3"), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); + }); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).addStoredField("*3").addStoredField("field1").addStoredField("field2"), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(2)); + assertThat(response.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); + assertThat(response.getHits().getAt(0).getFields().get("field1").getValue().toString(), equalTo("value1")); + } + ); + assertResponse(prepareSearch().setQuery(matchAllQuery()).addStoredField("field*"), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(2)); + assertThat(response.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); + assertThat(response.getHits().getAt(0).getFields().get("field1").getValue().toString(), equalTo("value1")); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).addStoredField("f*3"), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).addStoredField("*"), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + assertThat(response.getHits().getAt(0).getSourceAsMap(), nullValue()); + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(2)); + assertThat(response.getHits().getAt(0).getFields().get("field1").getValue().toString(), equalTo("value1")); + assertThat(response.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).addStoredField("*").addStoredField("_source"), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + assertThat(response.getHits().getAt(0).getSourceAsMap(), notNullValue()); + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(2)); + assertThat(response.getHits().getAt(0).getFields().get("field1").getValue().toString(), equalTo("value1")); + assertThat(response.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); + }); } public void testScriptDocAndFields() throws Exception { @@ -297,64 +298,68 @@ public void testScriptDocAndFields() throws Exception { indicesAdmin().refresh(new RefreshRequest()).actionGet(); logger.info("running doc['num1'].value"); - SearchResponse response = prepareSearch().setQuery(matchAllQuery()) - .addSort("num1", SortOrder.ASC) - .addScriptField("sNum1", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value", Collections.emptyMap())) - .addScriptField( - "sNum1_field", - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_fields['num1'].value", Collections.emptyMap()) - ) - .addScriptField( - "date1", - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['date'].date.millis", Collections.emptyMap()) - ) - .get(); - - assertNoFailures(response); - - assertThat(response.getHits().getTotalHits().value, equalTo(3L)); - assertFalse(response.getHits().getAt(0).hasSource()); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - Set fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); - assertThat(fields, equalTo(newHashSet("sNum1", "sNum1_field", "date1"))); - assertThat(response.getHits().getAt(0).getFields().get("sNum1").getValues().get(0), equalTo(1.0)); - assertThat(response.getHits().getAt(0).getFields().get("sNum1_field").getValues().get(0), equalTo(1.0)); - assertThat(response.getHits().getAt(0).getFields().get("date1").getValues().get(0), equalTo(0L)); - assertThat(response.getHits().getAt(1).getId(), equalTo("2")); - fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); - assertThat(fields, equalTo(newHashSet("sNum1", "sNum1_field", "date1"))); - assertThat(response.getHits().getAt(1).getFields().get("sNum1").getValues().get(0), equalTo(2.0)); - assertThat(response.getHits().getAt(1).getFields().get("sNum1_field").getValues().get(0), equalTo(2.0)); - assertThat(response.getHits().getAt(1).getFields().get("date1").getValues().get(0), equalTo(25000L)); - assertThat(response.getHits().getAt(2).getId(), equalTo("3")); - fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); - assertThat(fields, equalTo(newHashSet("sNum1", "sNum1_field", "date1"))); - assertThat(response.getHits().getAt(2).getFields().get("sNum1").getValues().get(0), equalTo(3.0)); - assertThat(response.getHits().getAt(2).getFields().get("sNum1_field").getValues().get(0), equalTo(3.0)); - assertThat(response.getHits().getAt(2).getFields().get("date1").getValues().get(0), equalTo(120000L)); - + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort("num1", SortOrder.ASC) + .addScriptField( + "sNum1", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value", Collections.emptyMap()) + ) + .addScriptField( + "sNum1_field", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_fields['num1'].value", Collections.emptyMap()) + ) + .addScriptField( + "date1", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['date'].date.millis", Collections.emptyMap()) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertFalse(response.getHits().getAt(0).hasSource()); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + Set fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); + assertThat(fields, equalTo(newHashSet("sNum1", "sNum1_field", "date1"))); + assertThat(response.getHits().getAt(0).getFields().get("sNum1").getValues().get(0), equalTo(1.0)); + assertThat(response.getHits().getAt(0).getFields().get("sNum1_field").getValues().get(0), equalTo(1.0)); + assertThat(response.getHits().getAt(0).getFields().get("date1").getValues().get(0), equalTo(0L)); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); + assertThat(fields, equalTo(newHashSet("sNum1", "sNum1_field", "date1"))); + assertThat(response.getHits().getAt(1).getFields().get("sNum1").getValues().get(0), equalTo(2.0)); + assertThat(response.getHits().getAt(1).getFields().get("sNum1_field").getValues().get(0), equalTo(2.0)); + assertThat(response.getHits().getAt(1).getFields().get("date1").getValues().get(0), equalTo(25000L)); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); + assertThat(fields, equalTo(newHashSet("sNum1", "sNum1_field", "date1"))); + assertThat(response.getHits().getAt(2).getFields().get("sNum1").getValues().get(0), equalTo(3.0)); + assertThat(response.getHits().getAt(2).getFields().get("sNum1_field").getValues().get(0), equalTo(3.0)); + assertThat(response.getHits().getAt(2).getFields().get("date1").getValues().get(0), equalTo(120000L)); + } + ); logger.info("running doc['num1'].value * factor"); - response = prepareSearch().setQuery(matchAllQuery()) - .addSort("num1", SortOrder.ASC) - .addScriptField( - "sNum1", - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value * factor", Map.of("factor", 2.0)) - ) - .get(); - - assertThat(response.getHits().getTotalHits().value, equalTo(3L)); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); - assertThat(fields, equalTo(singleton("sNum1"))); - assertThat(response.getHits().getAt(0).getFields().get("sNum1").getValues().get(0), equalTo(2.0)); - assertThat(response.getHits().getAt(1).getId(), equalTo("2")); - fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); - assertThat(fields, equalTo(singleton("sNum1"))); - assertThat(response.getHits().getAt(1).getFields().get("sNum1").getValues().get(0), equalTo(4.0)); - assertThat(response.getHits().getAt(2).getId(), equalTo("3")); - fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); - assertThat(fields, equalTo(singleton("sNum1"))); - assertThat(response.getHits().getAt(2).getFields().get("sNum1").getValues().get(0), equalTo(6.0)); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort("num1", SortOrder.ASC) + .addScriptField( + "sNum1", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value * factor", Map.of("factor", 2.0)) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + Set fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); + assertThat(fields, equalTo(singleton("sNum1"))); + assertThat(response.getHits().getAt(0).getFields().get("sNum1").getValues().get(0), equalTo(2.0)); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); + assertThat(fields, equalTo(singleton("sNum1"))); + assertThat(response.getHits().getAt(1).getFields().get("sNum1").getValues().get(0), equalTo(4.0)); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); + assertThat(fields, equalTo(singleton("sNum1"))); + assertThat(response.getHits().getAt(2).getFields().get("sNum1").getValues().get(0), equalTo(6.0)); + } + ); } public void testScriptFieldWithNanos() throws Exception { @@ -384,32 +389,32 @@ public void testScriptFieldWithNanos() throws Exception { client().prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().field("date", date).endObject()) ); - SearchResponse response = prepareSearch().setQuery(matchAllQuery()) - .addSort("date", SortOrder.ASC) - .addScriptField( - "date1", - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['date'].date.millis", Collections.emptyMap()) - ) - .addScriptField( - "date2", - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['date'].date.nanos", Collections.emptyMap()) - ) - .get(); - - assertNoFailures(response); - - assertThat(response.getHits().getAt(0).getId(), is("1")); - assertThat(response.getHits().getAt(0).getFields().get("date1").getValues().get(0), equalTo(0L)); - assertThat(response.getHits().getAt(0).getFields().get("date2").getValues().get(0), equalTo(0L)); - assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo(0L)); - - assertThat(response.getHits().getAt(1).getId(), is("2")); - Instant instant = ZonedDateTime.parse(date).toInstant(); - long dateAsNanos = DateUtils.toLong(instant); - long dateAsMillis = instant.toEpochMilli(); - assertThat(response.getHits().getAt(1).getFields().get("date1").getValues().get(0), equalTo(dateAsMillis)); - assertThat(response.getHits().getAt(1).getFields().get("date2").getValues().get(0), equalTo(dateAsNanos)); - assertThat(response.getHits().getAt(1).getSortValues()[0], equalTo(dateAsNanos)); + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort("date", SortOrder.ASC) + .addScriptField( + "date1", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['date'].date.millis", Collections.emptyMap()) + ) + .addScriptField( + "date2", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['date'].date.nanos", Collections.emptyMap()) + ), + response -> { + assertThat(response.getHits().getAt(0).getId(), is("1")); + assertThat(response.getHits().getAt(0).getFields().get("date1").getValues().get(0), equalTo(0L)); + assertThat(response.getHits().getAt(0).getFields().get("date2").getValues().get(0), equalTo(0L)); + assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo(0L)); + + assertThat(response.getHits().getAt(1).getId(), is("2")); + Instant instant = ZonedDateTime.parse(date).toInstant(); + long dateAsNanos = DateUtils.toLong(instant); + long dateAsMillis = instant.toEpochMilli(); + assertThat(response.getHits().getAt(1).getFields().get("date1").getValues().get(0), equalTo(dateAsMillis)); + assertThat(response.getHits().getAt(1).getFields().get("date2").getValues().get(0), equalTo(dateAsNanos)); + assertThat(response.getHits().getAt(1).getSortValues()[0], equalTo(dateAsNanos)); + } + ); } public void testIdBasedScriptFields() throws Exception { @@ -424,21 +429,21 @@ public void testIdBasedScriptFields() throws Exception { } indexRandom(true, indexRequestBuilders); - SearchResponse response = prepareSearch().setQuery(matchAllQuery()) - .addSort("num1", SortOrder.ASC) - .setSize(numDocs) - .addScriptField("id", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_fields._id.value", Collections.emptyMap())) - .get(); - - assertNoFailures(response); - - assertThat(response.getHits().getTotalHits().value, equalTo((long) numDocs)); - for (int i = 0; i < numDocs; i++) { - assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); - Set fields = new HashSet<>(response.getHits().getAt(i).getFields().keySet()); - assertThat(fields, equalTo(singleton("id"))); - assertThat(response.getHits().getAt(i).getFields().get("id").getValue(), equalTo(Integer.toString(i))); - } + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort("num1", SortOrder.ASC) + .setSize(numDocs) + .addScriptField("id", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_fields._id.value", Collections.emptyMap())), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo((long) numDocs)); + for (int i = 0; i < numDocs; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + Set fields = new HashSet<>(response.getHits().getAt(i).getFields().keySet()); + assertThat(fields, equalTo(singleton("id"))); + assertThat(response.getHits().getAt(i).getFields().get("id").getValue(), equalTo(Integer.toString(i))); + } + } + ); } public void testScriptFieldUsingSource() throws Exception { @@ -467,57 +472,63 @@ public void testScriptFieldUsingSource() throws Exception { .get(); indicesAdmin().refresh(new RefreshRequest()).actionGet(); - SearchResponse response = prepareSearch().setQuery(matchAllQuery()) - .addScriptField("s_obj1", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.obj1", Collections.emptyMap())) - .addScriptField( - "s_obj1_test", - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.obj1.test", Collections.emptyMap()) - ) - .addScriptField("s_obj2", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.obj2", Collections.emptyMap())) - .addScriptField( - "s_obj2_arr2", - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.obj2.arr2", Collections.emptyMap()) - ) - .addScriptField("s_arr3", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.arr3", Collections.emptyMap())) - .get(); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addScriptField("s_obj1", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.obj1", Collections.emptyMap())) + .addScriptField( + "s_obj1_test", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.obj1.test", Collections.emptyMap()) + ) + .addScriptField("s_obj2", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.obj2", Collections.emptyMap())) + .addScriptField( + "s_obj2_arr2", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.obj2.arr2", Collections.emptyMap()) + ) + .addScriptField("s_arr3", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.arr3", Collections.emptyMap())), + response -> { - assertThat("Failures " + Arrays.toString(response.getShardFailures()), response.getShardFailures().length, equalTo(0)); + assertThat("Failures " + Arrays.toString(response.getShardFailures()), response.getShardFailures().length, equalTo(0)); - assertThat(response.getHits().getAt(0).field("s_obj1_test").getValue().toString(), equalTo("something")); + assertThat(response.getHits().getAt(0).field("s_obj1_test").getValue().toString(), equalTo("something")); - Map sObj1 = response.getHits().getAt(0).field("s_obj1").getValue(); - assertThat(sObj1.get("test").toString(), equalTo("something")); - assertThat(response.getHits().getAt(0).field("s_obj1_test").getValue().toString(), equalTo("something")); + Map sObj1 = response.getHits().getAt(0).field("s_obj1").getValue(); + assertThat(sObj1.get("test").toString(), equalTo("something")); + assertThat(response.getHits().getAt(0).field("s_obj1_test").getValue().toString(), equalTo("something")); - Map sObj2 = response.getHits().getAt(0).field("s_obj2").getValue(); - List sObj2Arr2 = (List) sObj2.get("arr2"); - assertThat(sObj2Arr2.size(), equalTo(2)); - assertThat(sObj2Arr2.get(0).toString(), equalTo("arr_value1")); - assertThat(sObj2Arr2.get(1).toString(), equalTo("arr_value2")); + Map sObj2 = response.getHits().getAt(0).field("s_obj2").getValue(); + List sObj2Arr2 = (List) sObj2.get("arr2"); + assertThat(sObj2Arr2.size(), equalTo(2)); + assertThat(sObj2Arr2.get(0).toString(), equalTo("arr_value1")); + assertThat(sObj2Arr2.get(1).toString(), equalTo("arr_value2")); - sObj2Arr2 = response.getHits().getAt(0).field("s_obj2_arr2").getValues(); - assertThat(sObj2Arr2.size(), equalTo(2)); - assertThat(sObj2Arr2.get(0).toString(), equalTo("arr_value1")); - assertThat(sObj2Arr2.get(1).toString(), equalTo("arr_value2")); + sObj2Arr2 = response.getHits().getAt(0).field("s_obj2_arr2").getValues(); + assertThat(sObj2Arr2.size(), equalTo(2)); + assertThat(sObj2Arr2.get(0).toString(), equalTo("arr_value1")); + assertThat(sObj2Arr2.get(1).toString(), equalTo("arr_value2")); - List sObj2Arr3 = response.getHits().getAt(0).field("s_arr3").getValues(); - assertThat(((Map) sObj2Arr3.get(0)).get("arr3_field1").toString(), equalTo("arr3_value1")); + List sObj2Arr3 = response.getHits().getAt(0).field("s_arr3").getValues(); + assertThat(((Map) sObj2Arr3.get(0)).get("arr3_field1").toString(), equalTo("arr3_value1")); + } + ); } public void testScriptFieldsForNullReturn() throws Exception { client().prepareIndex("test").setId("1").setSource("foo", "bar").setRefreshPolicy("true").get(); - SearchResponse response = prepareSearch().setQuery(matchAllQuery()) - .addScriptField("test_script_1", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "return null", Collections.emptyMap())) - .get(); - - assertNoFailures(response); - - DocumentField fieldObj = response.getHits().getAt(0).field("test_script_1"); - assertThat(fieldObj, notNullValue()); - List fieldValues = fieldObj.getValues(); - assertThat(fieldValues, hasSize(1)); - assertThat(fieldValues.get(0), nullValue()); + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()) + .addScriptField( + "test_script_1", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "return null", Collections.emptyMap()) + ), + response -> { + DocumentField fieldObj = response.getHits().getAt(0).field("test_script_1"); + assertThat(fieldObj, notNullValue()); + List fieldValues = fieldObj.getValues(); + assertThat(fieldValues, hasSize(1)); + assertThat(fieldValues.get(0), nullValue()); + } + ); } public void testPartialFields() throws Exception { @@ -624,49 +635,51 @@ public void testStoredFieldsWithoutSource() throws Exception { indicesAdmin().prepareRefresh().get(); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addStoredField("byte_field") - .addStoredField("short_field") - .addStoredField("integer_field") - .addStoredField("long_field") - .addStoredField("float_field") - .addStoredField("double_field") - .addStoredField("date_field") - .addStoredField("boolean_field") - .addStoredField("binary_field") - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - Set fields = new HashSet<>(searchResponse.getHits().getAt(0).getFields().keySet()); - assertThat( - fields, - equalTo( - newHashSet( - "byte_field", - "short_field", - "integer_field", - "long_field", - "float_field", - "double_field", - "date_field", - "boolean_field", - "binary_field" - ) - ) + assertCheckedResponse( + prepareSearch().setQuery(matchAllQuery()) + .addStoredField("byte_field") + .addStoredField("short_field") + .addStoredField("integer_field") + .addStoredField("long_field") + .addStoredField("float_field") + .addStoredField("double_field") + .addStoredField("date_field") + .addStoredField("boolean_field") + .addStoredField("binary_field"), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + Set fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); + assertThat( + fields, + equalTo( + newHashSet( + "byte_field", + "short_field", + "integer_field", + "long_field", + "float_field", + "double_field", + "date_field", + "boolean_field", + "binary_field" + ) + ) + ); + + SearchHit searchHit = response.getHits().getAt(0); + assertThat(searchHit.getFields().get("byte_field").getValue().toString(), equalTo("1")); + assertThat(searchHit.getFields().get("short_field").getValue().toString(), equalTo("2")); + assertThat(searchHit.getFields().get("integer_field").getValue(), equalTo((Object) 3)); + assertThat(searchHit.getFields().get("long_field").getValue(), equalTo((Object) 4L)); + assertThat(searchHit.getFields().get("float_field").getValue(), equalTo((Object) 5.0f)); + assertThat(searchHit.getFields().get("double_field").getValue(), equalTo((Object) 6.0d)); + String dateTime = DateFormatter.forPattern("date_optional_time").format(date); + assertThat(searchHit.getFields().get("date_field").getValue(), equalTo((Object) dateTime)); + assertThat(searchHit.getFields().get("boolean_field").getValue(), equalTo((Object) Boolean.TRUE)); + assertThat(searchHit.getFields().get("binary_field").getValue(), equalTo(new BytesArray("testing text".getBytes("UTF8")))); + } ); - - SearchHit searchHit = searchResponse.getHits().getAt(0); - assertThat(searchHit.getFields().get("byte_field").getValue().toString(), equalTo("1")); - assertThat(searchHit.getFields().get("short_field").getValue().toString(), equalTo("2")); - assertThat(searchHit.getFields().get("integer_field").getValue(), equalTo((Object) 3)); - assertThat(searchHit.getFields().get("long_field").getValue(), equalTo((Object) 4L)); - assertThat(searchHit.getFields().get("float_field").getValue(), equalTo((Object) 5.0f)); - assertThat(searchHit.getFields().get("double_field").getValue(), equalTo((Object) 6.0d)); - String dateTime = DateFormatter.forPattern("date_optional_time").format(date); - assertThat(searchHit.getFields().get("date_field").getValue(), equalTo((Object) dateTime)); - assertThat(searchHit.getFields().get("boolean_field").getValue(), equalTo((Object) Boolean.TRUE)); - assertThat(searchHit.getFields().get("binary_field").getValue(), equalTo(new BytesArray("testing text".getBytes("UTF8")))); } public void testSearchFieldsMetadata() throws Exception { @@ -677,11 +690,11 @@ public void testSearchFieldsMetadata() throws Exception { .setRefreshPolicy(IMMEDIATE) .get(); - SearchResponse searchResponse = prepareSearch("my-index").addStoredField("field1").addStoredField("_routing").get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getAt(0).field("field1"), nullValue()); - assertThat(searchResponse.getHits().getAt(0).field("_routing").getValue().toString(), equalTo("1")); + assertResponse(prepareSearch("my-index").addStoredField("field1").addStoredField("_routing"), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(0).field("field1"), nullValue()); + assertThat(response.getHits().getAt(0).field("_routing").getValue().toString(), equalTo("1")); + }); } public void testGetFieldsComplexField() throws Exception { @@ -745,11 +758,12 @@ public void testGetFieldsComplexField() throws Exception { String field = "field1.field2.field3.field4"; - SearchResponse searchResponse = prepareSearch("my-index").addStoredField(field).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getAt(0).field(field).getValues().size(), equalTo(2)); - assertThat(searchResponse.getHits().getAt(0).field(field).getValues().get(0).toString(), equalTo("value1")); - assertThat(searchResponse.getHits().getAt(0).field(field).getValues().get(1).toString(), equalTo("value2")); + assertResponse(prepareSearch("my-index").addStoredField(field), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(0).field(field).getValues().size(), equalTo(2)); + assertThat(response.getHits().getAt(0).field(field).getValues().get(0).toString(), equalTo("value1")); + assertThat(response.getHits().getAt(0).field(field).getValues().get(1).toString(), equalTo("value2")); + }); } // see #8203 @@ -757,12 +771,14 @@ public void testSingleValueFieldDatatField() throws ExecutionException, Interrup assertAcked(indicesAdmin().prepareCreate("test").setMapping("test_field", "type=keyword").get()); indexRandom(true, client().prepareIndex("test").setId("1").setSource("test_field", "foobar")); refresh(); - SearchResponse searchResponse = prepareSearch("test").setSource( - new SearchSourceBuilder().query(QueryBuilders.matchAllQuery()).docValueField("test_field") - ).get(); - assertHitCount(searchResponse, 1); - Map fields = searchResponse.getHits().getHits()[0].getFields(); - assertThat(fields.get("test_field").getValue(), equalTo("foobar")); + assertResponse( + prepareSearch("test").setSource(new SearchSourceBuilder().query(QueryBuilders.matchAllQuery()).docValueField("test_field")), + response -> { + assertHitCount(response, 1); + Map fields = response.getHits().getHits()[0].getFields(); + assertThat(fields.get("test_field").getValue(), equalTo("foobar")); + } + ); } public void testDocValueFields() throws Exception { @@ -860,116 +876,116 @@ public void testDocValueFields() throws Exception { if (randomBoolean()) { builder.addDocValueField("*_field"); } - SearchResponse searchResponse = builder.get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - Set fields = new HashSet<>(searchResponse.getHits().getAt(0).getFields().keySet()); - assertThat( - fields, - equalTo( - newHashSet( - "byte_field", - "short_field", - "integer_field", - "long_field", - "float_field", - "double_field", - "date_field", - "boolean_field", - "text_field", - "keyword_field", - "binary_field", - "ip_field" + assertResponse(builder, response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + Set fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); + assertThat( + fields, + equalTo( + newHashSet( + "byte_field", + "short_field", + "integer_field", + "long_field", + "float_field", + "double_field", + "date_field", + "boolean_field", + "text_field", + "keyword_field", + "binary_field", + "ip_field" + ) ) - ) - ); + ); - assertThat(searchResponse.getHits().getAt(0).getFields().get("byte_field").getValues(), equalTo(List.of(1L))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("short_field").getValues(), equalTo(List.of(2L))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("integer_field").getValues(), equalTo(List.of(3L))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("long_field").getValues(), equalTo(List.of(4L))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("float_field").getValues(), equalTo(List.of(5.0))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("double_field").getValues(), equalTo(List.of(6.0d))); - assertThat( - searchResponse.getHits().getAt(0).getFields().get("date_field").getValue(), - equalTo(DateFormatter.forPattern("date_optional_time").format(date)) - ); - assertThat(searchResponse.getHits().getAt(0).getFields().get("boolean_field").getValues(), equalTo(List.of(true))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("text_field").getValues(), equalTo(List.of("foo"))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValues(), equalTo(List.of("foo"))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("binary_field").getValues(), equalTo(List.of("KmQ="))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValues(), equalTo(List.of("::1"))); - - builder = prepareSearch().setQuery(matchAllQuery()).addDocValueField("*field"); - searchResponse = builder.get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - fields = new HashSet<>(searchResponse.getHits().getAt(0).getFields().keySet()); - assertThat( - fields, - equalTo( - newHashSet( - "byte_field", - "short_field", - "integer_field", - "long_field", - "float_field", - "double_field", - "date_field", - "boolean_field", - "text_field", - "keyword_field", - "binary_field", - "ip_field" + assertThat(response.getHits().getAt(0).getFields().get("byte_field").getValues(), equalTo(List.of(1L))); + assertThat(response.getHits().getAt(0).getFields().get("short_field").getValues(), equalTo(List.of(2L))); + assertThat(response.getHits().getAt(0).getFields().get("integer_field").getValues(), equalTo(List.of(3L))); + assertThat(response.getHits().getAt(0).getFields().get("long_field").getValues(), equalTo(List.of(4L))); + assertThat(response.getHits().getAt(0).getFields().get("float_field").getValues(), equalTo(List.of(5.0))); + assertThat(response.getHits().getAt(0).getFields().get("double_field").getValues(), equalTo(List.of(6.0d))); + assertThat( + response.getHits().getAt(0).getFields().get("date_field").getValue(), + equalTo(DateFormatter.forPattern("date_optional_time").format(date)) + ); + assertThat(response.getHits().getAt(0).getFields().get("boolean_field").getValues(), equalTo(List.of(true))); + assertThat(response.getHits().getAt(0).getFields().get("text_field").getValues(), equalTo(List.of("foo"))); + assertThat(response.getHits().getAt(0).getFields().get("keyword_field").getValues(), equalTo(List.of("foo"))); + assertThat(response.getHits().getAt(0).getFields().get("binary_field").getValues(), equalTo(List.of("KmQ="))); + assertThat(response.getHits().getAt(0).getFields().get("ip_field").getValues(), equalTo(List.of("::1"))); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).addDocValueField("*field"), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + Set fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); + assertThat( + fields, + equalTo( + newHashSet( + "byte_field", + "short_field", + "integer_field", + "long_field", + "float_field", + "double_field", + "date_field", + "boolean_field", + "text_field", + "keyword_field", + "binary_field", + "ip_field" + ) ) - ) - ); - - assertThat(searchResponse.getHits().getAt(0).getFields().get("byte_field").getValues(), equalTo(List.of(1L))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("short_field").getValues(), equalTo(List.of(2L))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("integer_field").getValues(), equalTo(List.of(3L))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("long_field").getValues(), equalTo(List.of(4L))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("float_field").getValues(), equalTo(List.of(5.0))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("double_field").getValues(), equalTo(List.of(6.0d))); - assertThat( - searchResponse.getHits().getAt(0).getFields().get("date_field").getValue(), - equalTo(DateFormatter.forPattern("date_optional_time").format(date)) - ); - assertThat(searchResponse.getHits().getAt(0).getFields().get("boolean_field").getValues(), equalTo(List.of(true))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("text_field").getValues(), equalTo(List.of("foo"))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValues(), equalTo(List.of("foo"))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("binary_field").getValues(), equalTo(List.of("KmQ="))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValues(), equalTo(List.of("::1"))); - - builder = prepareSearch().setQuery(matchAllQuery()) - .addDocValueField("byte_field", "#.0") - .addDocValueField("short_field", "#.0") - .addDocValueField("integer_field", "#.0") - .addDocValueField("long_field", "#.0") - .addDocValueField("float_field", "#.0") - .addDocValueField("double_field", "#.0") - .addDocValueField("date_field", "epoch_millis"); - searchResponse = builder.get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - fields = new HashSet<>(searchResponse.getHits().getAt(0).getFields().keySet()); - assertThat( - fields, - equalTo(newHashSet("byte_field", "short_field", "integer_field", "long_field", "float_field", "double_field", "date_field")) - ); + ); - assertThat(searchResponse.getHits().getAt(0).getFields().get("byte_field").getValues(), equalTo(List.of("1.0"))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("short_field").getValues(), equalTo(List.of("2.0"))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("integer_field").getValues(), equalTo(List.of("3.0"))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("long_field").getValues(), equalTo(List.of("4.0"))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("float_field").getValues(), equalTo(List.of("5.0"))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("double_field").getValues(), equalTo(List.of("6.0"))); - assertThat( - searchResponse.getHits().getAt(0).getFields().get("date_field").getValue(), - equalTo(DateFormatter.forPattern("epoch_millis").format(date)) + assertThat(response.getHits().getAt(0).getFields().get("byte_field").getValues(), equalTo(List.of(1L))); + assertThat(response.getHits().getAt(0).getFields().get("short_field").getValues(), equalTo(List.of(2L))); + assertThat(response.getHits().getAt(0).getFields().get("integer_field").getValues(), equalTo(List.of(3L))); + assertThat(response.getHits().getAt(0).getFields().get("long_field").getValues(), equalTo(List.of(4L))); + assertThat(response.getHits().getAt(0).getFields().get("float_field").getValues(), equalTo(List.of(5.0))); + assertThat(response.getHits().getAt(0).getFields().get("double_field").getValues(), equalTo(List.of(6.0d))); + assertThat( + response.getHits().getAt(0).getFields().get("date_field").getValue(), + equalTo(DateFormatter.forPattern("date_optional_time").format(date)) + ); + assertThat(response.getHits().getAt(0).getFields().get("boolean_field").getValues(), equalTo(List.of(true))); + assertThat(response.getHits().getAt(0).getFields().get("text_field").getValues(), equalTo(List.of("foo"))); + assertThat(response.getHits().getAt(0).getFields().get("keyword_field").getValues(), equalTo(List.of("foo"))); + assertThat(response.getHits().getAt(0).getFields().get("binary_field").getValues(), equalTo(List.of("KmQ="))); + assertThat(response.getHits().getAt(0).getFields().get("ip_field").getValues(), equalTo(List.of("::1"))); + }); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addDocValueField("byte_field", "#.0") + .addDocValueField("short_field", "#.0") + .addDocValueField("integer_field", "#.0") + .addDocValueField("long_field", "#.0") + .addDocValueField("float_field", "#.0") + .addDocValueField("double_field", "#.0") + .addDocValueField("date_field", "epoch_millis"), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + Set fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); + assertThat( + fields, + equalTo( + newHashSet("byte_field", "short_field", "integer_field", "long_field", "float_field", "double_field", "date_field") + ) + ); + assertThat(response.getHits().getAt(0).getFields().get("byte_field").getValues(), equalTo(List.of("1.0"))); + assertThat(response.getHits().getAt(0).getFields().get("short_field").getValues(), equalTo(List.of("2.0"))); + assertThat(response.getHits().getAt(0).getFields().get("integer_field").getValues(), equalTo(List.of("3.0"))); + assertThat(response.getHits().getAt(0).getFields().get("long_field").getValues(), equalTo(List.of("4.0"))); + assertThat(response.getHits().getAt(0).getFields().get("float_field").getValues(), equalTo(List.of("5.0"))); + assertThat(response.getHits().getAt(0).getFields().get("double_field").getValues(), equalTo(List.of("6.0"))); + assertThat( + response.getHits().getAt(0).getFields().get("date_field").getValue(), + equalTo(DateFormatter.forPattern("epoch_millis").format(date)) + ); + } ); } @@ -1021,18 +1037,18 @@ public void testScriptFields() throws Exception { new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['" + field + "']", Collections.emptyMap()) ); } - SearchResponse resp = req.get(); - assertNoFailures(resp); - for (SearchHit hit : resp.getHits().getHits()) { - final int id = Integer.parseInt(hit.getId()); - Map fields = hit.getFields(); - assertThat(fields.get("s").getValues(), equalTo(Collections.singletonList(Integer.toString(id)))); - assertThat(fields.get("l").getValues(), equalTo(Collections.singletonList((long) id))); - assertThat(fields.get("d").getValues(), equalTo(Collections.singletonList((double) id))); - assertThat(fields.get("ms").getValues(), equalTo(Arrays.asList(Integer.toString(id), Integer.toString(id + 1)))); - assertThat(fields.get("ml").getValues(), equalTo(Arrays.asList((long) id, id + 1L))); - assertThat(fields.get("md").getValues(), equalTo(Arrays.asList((double) id, id + 1d))); - } + assertNoFailuresAndResponse(req, response -> { + for (SearchHit hit : response.getHits().getHits()) { + final int id = Integer.parseInt(hit.getId()); + Map fields = hit.getFields(); + assertThat(fields.get("s").getValues(), equalTo(Collections.singletonList(Integer.toString(id)))); + assertThat(fields.get("l").getValues(), equalTo(Collections.singletonList((long) id))); + assertThat(fields.get("d").getValues(), equalTo(Collections.singletonList((double) id))); + assertThat(fields.get("ms").getValues(), equalTo(Arrays.asList(Integer.toString(id), Integer.toString(id + 1)))); + assertThat(fields.get("ml").getValues(), equalTo(Arrays.asList((long) id, id + 1L))); + assertThat(fields.get("md").getValues(), equalTo(Arrays.asList((double) id, id + 1d))); + } + }); } public void testDocValueFieldsWithFieldAlias() throws Exception { @@ -1071,30 +1087,31 @@ public void testDocValueFieldsWithFieldAlias() throws Exception { indexDoc("test", "1", "text_field", "foo", "date_field", formatter.format(date)); refresh("test"); - SearchRequestBuilder builder = prepareSearch().setQuery(matchAllQuery()) - .addDocValueField("text_field_alias") - .addDocValueField("date_field_alias") - .addDocValueField("date_field"); - SearchResponse searchResponse = builder.get(); - - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 1); - SearchHit hit = searchResponse.getHits().getAt(0); - - Map fields = hit.getFields(); - assertThat(fields.keySet(), equalTo(newHashSet("text_field_alias", "date_field_alias", "date_field"))); - - DocumentField textFieldAlias = fields.get("text_field_alias"); - assertThat(textFieldAlias.getName(), equalTo("text_field_alias")); - assertThat(textFieldAlias.getValue(), equalTo("foo")); - - DocumentField dateFieldAlias = fields.get("date_field_alias"); - assertThat(dateFieldAlias.getName(), equalTo("date_field_alias")); - assertThat(dateFieldAlias.getValue(), equalTo("1990-12-29")); - - DocumentField dateField = fields.get("date_field"); - assertThat(dateField.getName(), equalTo("date_field")); - assertThat(dateField.getValue(), equalTo("1990-12-29")); + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()) + .addDocValueField("text_field_alias") + .addDocValueField("date_field_alias") + .addDocValueField("date_field"), + response -> { + assertHitCount(response, 1); + SearchHit hit = response.getHits().getAt(0); + + Map fields = hit.getFields(); + assertThat(fields.keySet(), equalTo(newHashSet("text_field_alias", "date_field_alias", "date_field"))); + + DocumentField textFieldAlias = fields.get("text_field_alias"); + assertThat(textFieldAlias.getName(), equalTo("text_field_alias")); + assertThat(textFieldAlias.getValue(), equalTo("foo")); + + DocumentField dateFieldAlias = fields.get("date_field_alias"); + assertThat(dateFieldAlias.getName(), equalTo("date_field_alias")); + assertThat(dateFieldAlias.getValue(), equalTo("1990-12-29")); + + DocumentField dateField = fields.get("date_field"); + assertThat(dateField.getName(), equalTo("date_field")); + assertThat(dateField.getValue(), equalTo("1990-12-29")); + } + ); } public void testWildcardDocValueFieldsWithFieldAlias() throws Exception { @@ -1133,27 +1150,28 @@ public void testWildcardDocValueFieldsWithFieldAlias() throws Exception { indexDoc("test", "1", "text_field", "foo", "date_field", formatter.format(date)); refresh("test"); - SearchRequestBuilder builder = prepareSearch().setQuery(matchAllQuery()).addDocValueField("*alias").addDocValueField("date_field"); - SearchResponse searchResponse = builder.get(); + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()).addDocValueField("*alias").addDocValueField("date_field"), + response -> { + assertHitCount(response, 1); + SearchHit hit = response.getHits().getAt(0); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 1); - SearchHit hit = searchResponse.getHits().getAt(0); + Map fields = hit.getFields(); + assertThat(fields.keySet(), equalTo(newHashSet("text_field_alias", "date_field_alias", "date_field"))); - Map fields = hit.getFields(); - assertThat(fields.keySet(), equalTo(newHashSet("text_field_alias", "date_field_alias", "date_field"))); + DocumentField textFieldAlias = fields.get("text_field_alias"); + assertThat(textFieldAlias.getName(), equalTo("text_field_alias")); + assertThat(textFieldAlias.getValue(), equalTo("foo")); - DocumentField textFieldAlias = fields.get("text_field_alias"); - assertThat(textFieldAlias.getName(), equalTo("text_field_alias")); - assertThat(textFieldAlias.getValue(), equalTo("foo")); + DocumentField dateFieldAlias = fields.get("date_field_alias"); + assertThat(dateFieldAlias.getName(), equalTo("date_field_alias")); + assertThat(dateFieldAlias.getValue(), equalTo("1990-12-29")); - DocumentField dateFieldAlias = fields.get("date_field_alias"); - assertThat(dateFieldAlias.getName(), equalTo("date_field_alias")); - assertThat(dateFieldAlias.getValue(), equalTo("1990-12-29")); - - DocumentField dateField = fields.get("date_field"); - assertThat(dateField.getName(), equalTo("date_field")); - assertThat(dateField.getValue(), equalTo("1990-12-29")); + DocumentField dateField = fields.get("date_field"); + assertThat(dateField.getName(), equalTo("date_field")); + assertThat(dateField.getValue(), equalTo("1990-12-29")); + } + ); } public void testStoredFieldsWithFieldAlias() throws Exception { @@ -1185,18 +1203,19 @@ public void testStoredFieldsWithFieldAlias() throws Exception { indexDoc("test", "1", "field1", "value1", "field2", "value2"); refresh("test"); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addStoredField("field1-alias") - .addStoredField("field2-alias") - .get(); - assertHitCount(searchResponse, 1L); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).addStoredField("field1-alias").addStoredField("field2-alias"), + response -> { + assertHitCount(response, 1L); - SearchHit hit = searchResponse.getHits().getAt(0); - assertEquals(1, hit.getFields().size()); - assertTrue(hit.getFields().containsKey("field1-alias")); + SearchHit hit = response.getHits().getAt(0); + assertEquals(1, hit.getFields().size()); + assertTrue(hit.getFields().containsKey("field1-alias")); - DocumentField field = hit.getFields().get("field1-alias"); - assertThat(field.getValue().toString(), equalTo("value1")); + DocumentField field = hit.getFields().get("field1-alias"); + assertThat(field.getValue().toString(), equalTo("value1")); + } + ); } public void testWildcardStoredFieldsWithFieldAlias() throws Exception { @@ -1228,19 +1247,20 @@ public void testWildcardStoredFieldsWithFieldAlias() throws Exception { indexDoc("test", "1", "field1", "value1", "field2", "value2"); refresh("test"); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()).addStoredField("field*").get(); - assertHitCount(searchResponse, 1L); + assertResponse(prepareSearch().setQuery(matchAllQuery()).addStoredField("field*"), response -> { + assertHitCount(response, 1L); - SearchHit hit = searchResponse.getHits().getAt(0); - assertEquals(2, hit.getFields().size()); - assertTrue(hit.getFields().containsKey("field1")); - assertTrue(hit.getFields().containsKey("field1-alias")); + SearchHit hit = response.getHits().getAt(0); + assertEquals(2, hit.getFields().size()); + assertTrue(hit.getFields().containsKey("field1")); + assertTrue(hit.getFields().containsKey("field1-alias")); - DocumentField field = hit.getFields().get("field1"); - assertThat(field.getValue().toString(), equalTo("value1")); + DocumentField field = hit.getFields().get("field1"); + assertThat(field.getValue().toString(), equalTo("value1")); - DocumentField fieldAlias = hit.getFields().get("field1-alias"); - assertThat(fieldAlias.getValue().toString(), equalTo("value1")); + DocumentField fieldAlias = hit.getFields().get("field1-alias"); + assertThat(fieldAlias.getValue().toString(), equalTo("value1")); + }); } public void testLoadMetadata() throws Exception { @@ -1254,14 +1274,14 @@ public void testLoadMetadata() throws Exception { .setSource(jsonBuilder().startObject().field("field1", "value").endObject()) ); - SearchResponse response = prepareSearch("test").addStoredField("field1").get(); - assertNoFailures(response); - assertHitCount(response, 1); + assertNoFailuresAndResponse(prepareSearch("test").addStoredField("field1"), response -> { + assertHitCount(response, 1); - Map fields = response.getHits().getAt(0).getMetadataFields(); + Map fields = response.getHits().getAt(0).getMetadataFields(); - assertThat(fields.get("field1"), nullValue()); - assertThat(fields.get("_routing").getValue().toString(), equalTo("1")); - assertThat(response.getHits().getAt(0).getDocumentFields().size(), equalTo(0)); + assertThat(fields.get("field1"), nullValue()); + assertThat(fields.get("_routing").getValue().toString(), equalTo("1")); + assertThat(response.getHits().getAt(0).getDocumentFields().size(), equalTo(0)); + }); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java index faefeea0cb04e..ef5eafa5153ce 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java @@ -8,12 +8,10 @@ package org.elasticsearch.search.functionscore; -import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.geo.GeoPoint; @@ -48,8 +46,10 @@ import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.linearDecayFunction; import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.anyOf; @@ -140,61 +140,65 @@ public void testDistanceScoreGeoLinGaussExp() throws Exception { lonlat.add(20f); lonlat.add(11f); - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH).source(searchSource().query(baseQuery)) + assertHitCount( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH).source(searchSource().query(baseQuery)) + ), + (numDummyDocs + 2) ); - SearchResponse sr = response.actionGet(); - SearchHits sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (numDummyDocs + 2))); - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source(searchSource().query(functionScoreQuery(baseQuery, gaussDecayFunction("loc", lonlat, "1000km")))) + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source(searchSource().query(functionScoreQuery(baseQuery, gaussDecayFunction("loc", lonlat, "1000km")))) + ), + response -> { + assertHitCount(response, (numDummyDocs + 2)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (numDummyDocs + 2))); - - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat(sh.getAt(1).getId(), equalTo("2")); // Test Exp - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH).source(searchSource().query(baseQuery)) + assertHitCount( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH).source(searchSource().query(baseQuery)) + ), + (numDummyDocs + 2) ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (numDummyDocs + 2))); - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source(searchSource().query(functionScoreQuery(baseQuery, linearDecayFunction("loc", lonlat, "1000km")))) + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source(searchSource().query(functionScoreQuery(baseQuery, linearDecayFunction("loc", lonlat, "1000km")))) + ), + response -> { + assertHitCount(response, (numDummyDocs + 2)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (numDummyDocs + 2))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat(sh.getAt(1).getId(), equalTo("2")); // Test Lin - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH).source(searchSource().query(baseQuery)) + assertHitCount( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH).source(searchSource().query(baseQuery)) + ), + (numDummyDocs + 2) ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (numDummyDocs + 2))); - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source(searchSource().query(functionScoreQuery(baseQuery, exponentialDecayFunction("loc", lonlat, "1000km")))) + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source(searchSource().query(functionScoreQuery(baseQuery, exponentialDecayFunction("loc", lonlat, "1000km")))) + ), + response -> { + assertHitCount(response, (numDummyDocs + 2)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (numDummyDocs + 2))); - - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat(sh.getAt(1).getId(), equalTo("2")); } public void testDistanceScoreGeoLinGaussExpWithOffset() throws Exception { @@ -245,67 +249,76 @@ public void testDistanceScoreGeoLinGaussExpWithOffset() throws Exception { // Test Gauss - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().size(numDummyDocs + 2) - .query( - functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num", 1.0, 5.0, 1.0)).boostMode( - CombineFunction.REPLACE + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().size(numDummyDocs + 2) + .query( + functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num", 1.0, 5.0, 1.0)).boostMode( + CombineFunction.REPLACE + ) ) - ) - ) + ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (numDummyDocs + 2))); + assertThat(sh.getAt(0).getId(), anyOf(equalTo("1"), equalTo("2"))); + assertThat(sh.getAt(1).getId(), anyOf(equalTo("1"), equalTo("2"))); + assertThat(sh.getAt(1).getScore(), equalTo(sh.getAt(0).getScore())); + for (int i = 0; i < numDummyDocs; i++) { + assertThat(sh.getAt(i + 2).getId(), equalTo(Integer.toString(i + 3))); + } + } ); - SearchResponse sr = response.actionGet(); - SearchHits sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (numDummyDocs + 2))); - assertThat(sh.getAt(0).getId(), anyOf(equalTo("1"), equalTo("2"))); - assertThat(sh.getAt(1).getId(), anyOf(equalTo("1"), equalTo("2"))); - assertThat(sh.getAt(1).getScore(), equalTo(sh.getAt(0).getScore())); - for (int i = 0; i < numDummyDocs; i++) { - assertThat(sh.getAt(i + 2).getId(), equalTo(Integer.toString(i + 3))); - } // Test Exp - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().size(numDummyDocs + 2) - .query( - functionScoreQuery(termQuery("test", "value"), exponentialDecayFunction("num", 1.0, 5.0, 1.0)).boostMode( - CombineFunction.REPLACE + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().size(numDummyDocs + 2) + .query( + functionScoreQuery(termQuery("test", "value"), exponentialDecayFunction("num", 1.0, 5.0, 1.0)).boostMode( + CombineFunction.REPLACE + ) ) - ) - ) + ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (numDummyDocs + 2))); + assertThat(sh.getAt(0).getId(), anyOf(equalTo("1"), equalTo("2"))); + assertThat(sh.getAt(1).getId(), anyOf(equalTo("1"), equalTo("2"))); + assertThat(sh.getAt(1).getScore(), equalTo(sh.getAt(0).getScore())); + for (int i = 0; i < numDummyDocs; i++) { + assertThat(sh.getAt(i + 2).getId(), equalTo(Integer.toString(i + 3))); + } + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (numDummyDocs + 2))); - assertThat(sh.getAt(0).getId(), anyOf(equalTo("1"), equalTo("2"))); - assertThat(sh.getAt(1).getId(), anyOf(equalTo("1"), equalTo("2"))); - assertThat(sh.getAt(1).getScore(), equalTo(sh.getAt(0).getScore())); - for (int i = 0; i < numDummyDocs; i++) { - assertThat(sh.getAt(i + 2).getId(), equalTo(Integer.toString(i + 3))); - } // Test Lin - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().size(numDummyDocs + 2) - .query( - functionScoreQuery(termQuery("test", "value"), linearDecayFunction("num", 1.0, 20.0, 1.0)).boostMode( - CombineFunction.REPLACE + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().size(numDummyDocs + 2) + .query( + functionScoreQuery(termQuery("test", "value"), linearDecayFunction("num", 1.0, 20.0, 1.0)).boostMode( + CombineFunction.REPLACE + ) ) - ) - ) + ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (numDummyDocs + 2))); + assertThat(sh.getAt(0).getId(), anyOf(equalTo("1"), equalTo("2"))); + assertThat(sh.getAt(1).getId(), anyOf(equalTo("1"), equalTo("2"))); + assertThat(sh.getAt(1).getScore(), equalTo(sh.getAt(0).getScore())); + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (numDummyDocs + 2))); - assertThat(sh.getAt(0).getId(), anyOf(equalTo("1"), equalTo("2"))); - assertThat(sh.getAt(1).getId(), anyOf(equalTo("1"), equalTo("2"))); - assertThat(sh.getAt(1).getScore(), equalTo(sh.getAt(0).getScore())); } public void testBoostModeSettingWorks() throws Exception { @@ -364,48 +377,56 @@ public void testBoostModeSettingWorks() throws Exception { lonlat.add(20f); lonlat.add(11f); - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("loc", lonlat, "1000km")).boostMode( - CombineFunction.MULTIPLY + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("loc", lonlat, "1000km")).boostMode( + CombineFunction.MULTIPLY + ) ) ) - ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (2))); + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat(sh.getAt(1).getId(), equalTo("2")); + } ); - SearchResponse sr = response.actionGet(); - SearchHits sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (2))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat(sh.getAt(1).getId(), equalTo("2")); - // Test Exp - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source(searchSource().query(termQuery("test", "value"))) + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source(searchSource().query(termQuery("test", "value"))) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (2))); + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat(sh.getAt(1).getId(), equalTo("2")); + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (2))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat(sh.getAt(1).getId(), equalTo("2")); - - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("loc", lonlat, "1000km")).boostMode( - CombineFunction.REPLACE + + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("loc", lonlat, "1000km")).boostMode( + CombineFunction.REPLACE + ) ) ) - ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (2))); + assertThat(sh.getAt(0).getId(), equalTo("2")); + assertThat(sh.getAt(1).getId(), equalTo("1")); + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (2))); - assertThat(sh.getAt(0).getId(), equalTo("2")); - assertThat(sh.getAt(1).getId(), equalTo("1")); } @@ -446,34 +467,44 @@ public void testParseGeoPoint() throws Exception { ScoreFunctionBuilders.weightFactorFunction(randomIntBetween(1, 10)) ); GeoPoint point = new GeoPoint(20, 11); - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(baseQueryBuilder, gaussDecayFunction("loc", point, "1000km")).boostMode(CombineFunction.REPLACE) + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("loc", point, "1000km")).boostMode( + CombineFunction.REPLACE + ) + ) ) - ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (1))); + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat((double) sh.getAt(0).getScore(), closeTo(1.0, 1.e-5)); + } ); - SearchResponse sr = response.actionGet(); - SearchHits sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (1))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat((double) sh.getAt(0).getScore(), closeTo(1.0, 1.e-5)); // this is equivalent to new GeoPoint(20, 11); just flipped so scores must be same float[] coords = { 11, 20 }; - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(baseQueryBuilder, gaussDecayFunction("loc", coords, "1000km")).boostMode(CombineFunction.REPLACE) + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("loc", coords, "1000km")).boostMode( + CombineFunction.REPLACE + ) + ) ) - ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (1))); + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat((double) sh.getAt(0).getScore(), closeTo(1.0f, 1.e-5)); + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (1))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat((double) sh.getAt(0).getScore(), closeTo(1.0f, 1.e-5)); } public void testCombineModes() throws Exception { @@ -505,95 +536,120 @@ public void testCombineModes() throws Exception { ScoreFunctionBuilders.weightFactorFunction(2) ); // decay score should return 0.5 for this function and baseQuery should return 2.0f as it's score - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode( - CombineFunction.MULTIPLY + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode( + CombineFunction.MULTIPLY + ) ) ) - ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (1))); + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat((double) sh.getAt(0).getScore(), closeTo(1.0, 1.e-5)); + } ); - SearchResponse sr = response.actionGet(); - SearchHits sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (1))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat((double) sh.getAt(0).getScore(), closeTo(1.0, 1.e-5)); - - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode( - CombineFunction.REPLACE + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode( + CombineFunction.REPLACE + ) ) ) - ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (1))); + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat((double) sh.getAt(0).getScore(), closeTo(0.5, 1.e-5)); + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (1))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat((double) sh.getAt(0).getScore(), closeTo(0.5, 1.e-5)); - - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode(CombineFunction.SUM) + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode( + CombineFunction.SUM + ) + ) ) - ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (1))); + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat((double) sh.getAt(0).getScore(), closeTo(2.0 + 0.5, 1.e-5)); + logger.info( + "--> Hit[0] {} Explanation:\n {}", + response.getHits().getAt(0).getId(), + response.getHits().getAt(0).getExplanation() + ); + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (1))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat((double) sh.getAt(0).getScore(), closeTo(2.0 + 0.5, 1.e-5)); - logger.info("--> Hit[0] {} Explanation:\n {}", sr.getHits().getAt(0).getId(), sr.getHits().getAt(0).getExplanation()); - - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode(CombineFunction.AVG) + + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode( + CombineFunction.AVG + ) + ) ) - ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (1))); + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat((double) sh.getAt(0).getScore(), closeTo((2.0 + 0.5) / 2, 1.e-5)); + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (1))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat((double) sh.getAt(0).getScore(), closeTo((2.0 + 0.5) / 2, 1.e-5)); - - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode(CombineFunction.MIN) + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode( + CombineFunction.MIN + ) + ) ) - ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (1))); + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat((double) sh.getAt(0).getScore(), closeTo(0.5, 1.e-5)); + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (1))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat((double) sh.getAt(0).getScore(), closeTo(0.5, 1.e-5)); - - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode(CombineFunction.MAX) + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode( + CombineFunction.MAX + ) + ) ) - ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (1))); + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat((double) sh.getAt(0).getScore(), closeTo(2.0, 1.e-5)); + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (1))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat((double) sh.getAt(0).getScore(), closeTo(2.0, 1.e-5)); - } public void testExceptionThrownIfScaleLE0() throws Exception { @@ -623,18 +679,18 @@ public void testExceptionThrownIfScaleLE0() throws Exception { ).actionGet(); refresh(); - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query(functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num1", "2013-05-28", "-1d"))) - ) + SearchPhaseExecutionException e = expectThrows( + SearchPhaseExecutionException.class, + () -> client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num1", "2013-05-28", "-1d")) + ) + ) + ).actionGet() ); - try { - response.actionGet(); - fail("Expected SearchPhaseExecutionException"); - } catch (SearchPhaseExecutionException e) { - assertThat(e.getMessage(), is("all shards failed")); - } + assertThat(e.getMessage(), is("all shards failed")); } public void testParseDateMath() throws Exception { @@ -670,24 +726,23 @@ public void testParseDateMath() throws Exception { ).actionGet(); refresh(); - SearchResponse sr = client().search( - new SearchRequest(new String[] {}).source( - searchSource().query(functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num1", "now", "2d"))) - ) - ).get(); - - assertNoFailures(sr); - assertOrderedSearchHits(sr, "1", "2"); - - sr = client().search( - new SearchRequest(new String[] {}).source( - searchSource().query(functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num1", "now-1d", "2d"))) - ) - ).get(); - - assertNoFailures(sr); - assertOrderedSearchHits(sr, "2", "1"); + assertNoFailuresAndResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().query(functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num1", "now", "2d"))) + ) + ), + response -> assertOrderedSearchHits(response, "1", "2") + ); + assertNoFailuresAndResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().query(functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num1", "now-1d", "2d"))) + ) + ), + response -> assertOrderedSearchHits(response, "2", "1") + ); } public void testValueMissingLin() throws Exception { @@ -729,32 +784,31 @@ public void testValueMissingLin() throws Exception { refresh(); - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery( - baseQuery, - new FilterFunctionBuilder[] { - new FilterFunctionBuilder(linearDecayFunction("num1", "2013-05-28", "+3d")), - new FilterFunctionBuilder(linearDecayFunction("num2", "0.0", "1")) } - ).scoreMode(FunctionScoreQuery.ScoreMode.MULTIPLY) + assertNoFailuresAndResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery( + baseQuery, + new FilterFunctionBuilder[] { + new FilterFunctionBuilder(linearDecayFunction("num1", "2013-05-28", "+3d")), + new FilterFunctionBuilder(linearDecayFunction("num2", "0.0", "1")) } + ).scoreMode(FunctionScoreQuery.ScoreMode.MULTIPLY) + ) ) - ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getHits().length, equalTo(4)); + double[] scores = new double[4]; + for (int i = 0; i < sh.getHits().length; i++) { + scores[Integer.parseInt(sh.getAt(i).getId()) - 1] = sh.getAt(i).getScore(); + } + assertThat(scores[0], lessThan(scores[1])); + assertThat(scores[2], lessThan(scores[3])); + } ); - - SearchResponse sr = response.actionGet(); - - assertNoFailures(sr); - SearchHits sh = sr.getHits(); - assertThat(sh.getHits().length, equalTo(4)); - double[] scores = new double[4]; - for (int i = 0; i < sh.getHits().length; i++) { - scores[Integer.parseInt(sh.getAt(i).getId()) - 1] = sh.getAt(i).getScore(); - } - assertThat(scores[0], lessThan(scores[1])); - assertThat(scores[2], lessThan(scores[3])); - } public void testDateWithoutOrigin() throws Exception { @@ -810,32 +864,32 @@ public void testDateWithoutOrigin() throws Exception { refresh(); - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery( - QueryBuilders.matchAllQuery(), - new FilterFunctionBuilder[] { - new FilterFunctionBuilder(linearDecayFunction("num1", null, "7000d")), - new FilterFunctionBuilder(gaussDecayFunction("num1", null, "1d")), - new FilterFunctionBuilder(exponentialDecayFunction("num1", null, "7000d")) } - ).scoreMode(FunctionScoreQuery.ScoreMode.MULTIPLY) + assertNoFailuresAndResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery( + QueryBuilders.matchAllQuery(), + new FilterFunctionBuilder[] { + new FilterFunctionBuilder(linearDecayFunction("num1", null, "7000d")), + new FilterFunctionBuilder(gaussDecayFunction("num1", null, "1d")), + new FilterFunctionBuilder(exponentialDecayFunction("num1", null, "7000d")) } + ).scoreMode(FunctionScoreQuery.ScoreMode.MULTIPLY) + ) ) - ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getHits().length, equalTo(3)); + double[] scores = new double[4]; + for (int i = 0; i < sh.getHits().length; i++) { + scores[Integer.parseInt(sh.getAt(i).getId()) - 1] = sh.getAt(i).getScore(); + } + assertThat(scores[1], lessThan(scores[0])); + assertThat(scores[2], lessThan(scores[1])); + } ); - - SearchResponse sr = response.actionGet(); - assertNoFailures(sr); - SearchHits sh = sr.getHits(); - assertThat(sh.getHits().length, equalTo(3)); - double[] scores = new double[4]; - for (int i = 0; i < sh.getHits().length; i++) { - scores[Integer.parseInt(sh.getAt(i).getId()) - 1] = sh.getAt(i).getScore(); - } - assertThat(scores[1], lessThan(scores[0])); - assertThat(scores[2], lessThan(scores[1])); - } public void testManyDocsLin() throws Exception { @@ -891,33 +945,34 @@ public void testManyDocsLin() throws Exception { List lonlat = new ArrayList<>(); lonlat.add(100f); lonlat.add(110f); - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().size(numDocs) - .query( - functionScoreQuery( - termQuery("test", "value"), - new FilterFunctionBuilder[] { - new FilterFunctionBuilder(linearDecayFunction("date", "2013-05-30", "+15d")), - new FilterFunctionBuilder(linearDecayFunction("geo", lonlat, "1000km")), - new FilterFunctionBuilder(linearDecayFunction("num", numDocs, numDocs / 2.0)) } - ).scoreMode(ScoreMode.MULTIPLY).boostMode(CombineFunction.REPLACE) - ) - ) + assertNoFailuresAndResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().size(numDocs) + .query( + functionScoreQuery( + termQuery("test", "value"), + new FilterFunctionBuilder[] { + new FilterFunctionBuilder(linearDecayFunction("date", "2013-05-30", "+15d")), + new FilterFunctionBuilder(linearDecayFunction("geo", lonlat, "1000km")), + new FilterFunctionBuilder(linearDecayFunction("num", numDocs, numDocs / 2.0)) } + ).scoreMode(ScoreMode.MULTIPLY).boostMode(CombineFunction.REPLACE) + ) + ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getHits().length, equalTo(numDocs)); + double[] scores = new double[numDocs]; + for (int i = 0; i < numDocs; i++) { + scores[Integer.parseInt(sh.getAt(i).getId())] = sh.getAt(i).getScore(); + } + for (int i = 0; i < numDocs - 1; i++) { + assertThat(scores[i], lessThan(scores[i + 1])); + } + } ); - - SearchResponse sr = response.actionGet(); - assertNoFailures(sr); - SearchHits sh = sr.getHits(); - assertThat(sh.getHits().length, equalTo(numDocs)); - double[] scores = new double[numDocs]; - for (int i = 0; i < numDocs; i++) { - scores[Integer.parseInt(sh.getAt(i).getId())] = sh.getAt(i).getScore(); - } - for (int i = 0; i < numDocs - 1; i++) { - assertThat(scores[i], lessThan(scores[i + 1])); - } } public void testParsingExceptionIfFieldDoesNotExist() throws Exception { @@ -953,23 +1008,22 @@ public void testParsingExceptionIfFieldDoesNotExist() throws Exception { List lonlat = new ArrayList<>(); lonlat.add(100f); lonlat.add(110f); - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().size(numDocs) - .query( - functionScoreQuery(termQuery("test", "value"), linearDecayFunction("type.geo", lonlat, "1000km")).scoreMode( - FunctionScoreQuery.ScoreMode.MULTIPLY + + SearchPhaseExecutionException e = expectThrows( + SearchPhaseExecutionException.class, + () -> client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().size(numDocs) + .query( + functionScoreQuery(termQuery("test", "value"), linearDecayFunction("type.geo", lonlat, "1000km")).scoreMode( + FunctionScoreQuery.ScoreMode.MULTIPLY + ) ) - ) - ) + ) + ).actionGet() ); - try { - response.actionGet(); - fail("Expected SearchPhaseExecutionException"); - } catch (SearchPhaseExecutionException e) { - assertThat(e.getMessage(), is("all shards failed")); - } + assertThat(e.getMessage(), is("all shards failed")); } public void testParsingExceptionIfFieldTypeDoesNotMatch() throws Exception { @@ -996,20 +1050,20 @@ public void testParsingExceptionIfFieldTypeDoesNotMatch() throws Exception { ).actionGet(); refresh(); // so, we indexed a string field, but now we try to score a num field - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(termQuery("test", "value"), linearDecayFunction("num", 1.0, 0.5)).scoreMode(ScoreMode.MULTIPLY) + SearchPhaseExecutionException e = expectThrows( + SearchPhaseExecutionException.class, + () -> client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(termQuery("test", "value"), linearDecayFunction("num", 1.0, 0.5)).scoreMode( + ScoreMode.MULTIPLY + ) + ) ) - ) + ).actionGet() ); - try { - response.actionGet(); - fail("Expected SearchPhaseExecutionException"); - } catch (SearchPhaseExecutionException e) { - assertThat(e.getMessage(), is("all shards failed")); - } + assertThat(e.getMessage(), is("all shards failed")); } public void testNoQueryGiven() throws Exception { @@ -1033,15 +1087,17 @@ public void testNoQueryGiven() throws Exception { .actionGet(); refresh(); // so, we indexed a string field, but now we try to score a num field - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(linearDecayFunction("num", 1, 0.5)).scoreMode(FunctionScoreQuery.ScoreMode.MULTIPLY) + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(linearDecayFunction("num", 1, 0.5)).scoreMode(FunctionScoreQuery.ScoreMode.MULTIPLY) + ) ) - ) + ), + response -> {} ); - response.actionGet(); } public void testMultiFieldOptions() throws Exception { @@ -1099,41 +1155,47 @@ public void testMultiFieldOptions() throws Exception { indexRandom(true, doc1, doc2); - ActionFuture response = client().search(new SearchRequest(new String[] {}).source(searchSource().query(baseQuery))); - SearchResponse sr = response.actionGet(); - assertSearchHits(sr, "1", "2"); - SearchHits sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (2))); + assertResponse(client().search(new SearchRequest(new String[] {}).source(searchSource().query(baseQuery))), response -> { + assertSearchHits(response, "1", "2"); + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (2))); + }); List lonlat = new ArrayList<>(); lonlat.add(20f); lonlat.add(10f); - response = client().search( - new SearchRequest(new String[] {}).source( - searchSource().query( - functionScoreQuery(baseQuery, gaussDecayFunction("loc", lonlat, "1000km").setMultiValueMode(MultiValueMode.MIN)) + assertResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().query( + functionScoreQuery(baseQuery, gaussDecayFunction("loc", lonlat, "1000km").setMultiValueMode(MultiValueMode.MIN)) + ) ) - ) + ), + response -> { + assertSearchHits(response, "1", "2"); + SearchHits sh = response.getHits(); + + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat(sh.getAt(1).getId(), equalTo("2")); + } ); - sr = response.actionGet(); - assertSearchHits(sr, "1", "2"); - sh = sr.getHits(); - - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat(sh.getAt(1).getId(), equalTo("2")); - response = client().search( - new SearchRequest(new String[] {}).source( - searchSource().query( - functionScoreQuery(baseQuery, gaussDecayFunction("loc", lonlat, "1000km").setMultiValueMode(MultiValueMode.MAX)) + assertResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().query( + functionScoreQuery(baseQuery, gaussDecayFunction("loc", lonlat, "1000km").setMultiValueMode(MultiValueMode.MAX)) + ) ) - ) + ), + response -> { + assertSearchHits(response, "1", "2"); + SearchHits sh = response.getHits(); + + assertThat(sh.getAt(0).getId(), equalTo("2")); + assertThat(sh.getAt(1).getId(), equalTo("1")); + } ); - sr = response.actionGet(); - assertSearchHits(sr, "1", "2"); - sh = sr.getHits(); - - assertThat(sh.getAt(0).getId(), equalTo("2")); - assertThat(sh.getAt(1).getId(), equalTo("1")); // Now test AVG and SUM @@ -1149,30 +1211,36 @@ public void testMultiFieldOptions() throws Exception { .setSource(jsonBuilder().startObject().field("test", "value").field("num", 1.0).endObject()); indexRandom(true, doc1, doc2); - response = client().search( - new SearchRequest(new String[] {}).source( - searchSource().query( - functionScoreQuery(baseQuery, linearDecayFunction("num", "0", "10").setMultiValueMode(MultiValueMode.SUM)) + assertResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().query( + functionScoreQuery(baseQuery, linearDecayFunction("num", "0", "10").setMultiValueMode(MultiValueMode.SUM)) + ) ) - ) + ), + response -> { + assertSearchHits(response, "1", "2"); + SearchHits sh = response.getHits(); + + assertThat(sh.getAt(0).getId(), equalTo("2")); + assertThat(sh.getAt(1).getId(), equalTo("1")); + assertThat(1.0 - sh.getAt(0).getScore(), closeTo((1.0 - sh.getAt(1).getScore()) / 3.0, 1.e-6d)); + } ); - sr = response.actionGet(); - assertSearchHits(sr, "1", "2"); - sh = sr.getHits(); - - assertThat(sh.getAt(0).getId(), equalTo("2")); - assertThat(sh.getAt(1).getId(), equalTo("1")); - assertThat(1.0 - sh.getAt(0).getScore(), closeTo((1.0 - sh.getAt(1).getScore()) / 3.0, 1.e-6d)); - response = client().search( - new SearchRequest(new String[] {}).source( - searchSource().query( - functionScoreQuery(baseQuery, linearDecayFunction("num", "0", "10").setMultiValueMode(MultiValueMode.AVG)) + assertResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().query( + functionScoreQuery(baseQuery, linearDecayFunction("num", "0", "10").setMultiValueMode(MultiValueMode.AVG)) + ) ) - ) + ), + response -> { + assertSearchHits(response, "1", "2"); + SearchHits sh = response.getHits(); + assertThat((double) (sh.getAt(0).getScore()), closeTo((sh.getAt(1).getScore()), 1.e-6d)); + } ); - sr = response.actionGet(); - assertSearchHits(sr, "1", "2"); - sh = sr.getHits(); - assertThat((double) (sh.getAt(0).getScore()), closeTo((sh.getAt(1).getScore()), 1.e-6d)); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java index e9ce09f7455a2..6353c34491326 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java @@ -12,7 +12,6 @@ import org.apache.lucene.search.Explanation; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.lucene.search.function.CombineFunction; import org.elasticsearch.common.settings.Settings; @@ -41,12 +40,13 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.ExecutionException; import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction; import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -112,7 +112,7 @@ protected Collection> nodePlugins() { return Arrays.asList(ExplainableScriptPlugin.class); } - public void testExplainScript() throws InterruptedException, IOException { + public void testExplainScript() throws InterruptedException, IOException, ExecutionException { List indexRequests = new ArrayList<>(); for (int i = 0; i < 20; i++) { indexRequests.add( @@ -124,28 +124,30 @@ public void testExplainScript() throws InterruptedException, IOException { indexRandom(true, true, indexRequests); client().admin().indices().prepareRefresh().get(); ensureYellow(); - SearchResponse response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().explain(true) - .query( - functionScoreQuery( - termQuery("text", "text"), - scriptFunction(new Script(ScriptType.INLINE, "test", "explainable_script", Collections.emptyMap())) - ).boostMode(CombineFunction.REPLACE) - ) - ) - ).actionGet(); - - assertNoFailures(response); - SearchHits hits = response.getHits(); - assertThat(hits.getTotalHits().value, equalTo(20L)); - int idCounter = 19; - for (SearchHit hit : hits.getHits()) { - assertThat(hit.getId(), equalTo(Integer.toString(idCounter))); - assertThat(hit.getExplanation().toString(), containsString(Double.toString(idCounter))); - assertThat(hit.getExplanation().getDetails().length, equalTo(2)); - idCounter--; - } + assertNoFailuresAndResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().explain(true) + .query( + functionScoreQuery( + termQuery("text", "text"), + scriptFunction(new Script(ScriptType.INLINE, "test", "explainable_script", Collections.emptyMap())) + ).boostMode(CombineFunction.REPLACE) + ) + ) + ), + response -> { + SearchHits hits = response.getHits(); + assertThat(hits.getTotalHits().value, equalTo(20L)); + int idCounter = 19; + for (SearchHit hit : hits.getHits()) { + assertThat(hit.getId(), equalTo(Integer.toString(idCounter))); + assertThat(hit.getExplanation().toString(), containsString(Double.toString(idCounter))); + assertThat(hit.getExplanation().getDetails().length, equalTo(2)); + idCounter--; + } + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScoreFieldValueIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScoreFieldValueIT.java index 61cccfdf114b1..238f5b873a8dd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScoreFieldValueIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScoreFieldValueIT.java @@ -9,9 +9,9 @@ package org.elasticsearch.search.functionscore; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.lucene.search.function.FieldValueFactorFunction; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import java.io.IOException; @@ -20,8 +20,8 @@ import static org.elasticsearch.index.query.QueryBuilders.simpleQueryStringQuery; import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.fieldValueFactorFunction; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; /** @@ -88,10 +88,11 @@ public void testFieldValueFactor() throws IOException { // doc 3 doesn't have a "test" field, so an exception will be thrown try { - SearchResponse response = prepareSearch("test").setExplain(randomBoolean()) - .setQuery(functionScoreQuery(matchAllQuery(), fieldValueFactorFunction("test"))) - .get(); - assertFailures(response); + assertResponse( + prepareSearch("test").setExplain(randomBoolean()) + .setQuery(functionScoreQuery(matchAllQuery(), fieldValueFactorFunction("test"))), + ElasticsearchAssertions::assertFailures + ); } catch (SearchPhaseExecutionException e) { // We are expecting an exception, because 3 has no field } @@ -111,30 +112,32 @@ public void testFieldValueFactor() throws IOException { ); // field is not mapped but we're defaulting it to 100 so all documents should have the same score - SearchResponse response = prepareSearch("test").setExplain(randomBoolean()) - .setQuery( - functionScoreQuery( - matchAllQuery(), - fieldValueFactorFunction("notmapped").modifier(FieldValueFactorFunction.Modifier.RECIPROCAL).missing(100) - ) - ) - .get(); - assertEquals(response.getHits().getAt(0).getScore(), response.getHits().getAt(2).getScore(), 0); + assertResponse( + prepareSearch("test").setExplain(randomBoolean()) + .setQuery( + functionScoreQuery( + matchAllQuery(), + fieldValueFactorFunction("notmapped").modifier(FieldValueFactorFunction.Modifier.RECIPROCAL).missing(100) + ) + ), + response -> assertEquals(response.getHits().getAt(0).getScore(), response.getHits().getAt(2).getScore(), 0) + ); client().prepareIndex("test").setId("2").setSource("test", -1, "body", "foo").get(); refresh(); // -1 divided by 0 is infinity, which should provoke an exception. try { - response = prepareSearch("test").setExplain(randomBoolean()) - .setQuery( - functionScoreQuery( - simpleQueryStringQuery("foo"), - fieldValueFactorFunction("test").modifier(FieldValueFactorFunction.Modifier.RECIPROCAL).factor(0) - ) - ) - .get(); - assertFailures(response); + assertResponse( + prepareSearch("test").setExplain(randomBoolean()) + .setQuery( + functionScoreQuery( + simpleQueryStringQuery("foo"), + fieldValueFactorFunction("test").modifier(FieldValueFactorFunction.Modifier.RECIPROCAL).factor(0) + ) + ), + ElasticsearchAssertions::assertFailures + ); } catch (SearchPhaseExecutionException e) { // This is fine, the query will throw an exception if executed // locally, instead of just having failures diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScoreIT.java index e32abeb481a2a..d2f68d8dd1909 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScoreIT.java @@ -41,6 +41,8 @@ import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -76,7 +78,7 @@ protected Map, Object>> pluginScripts() { } } - public void testScriptScoresNested() throws IOException { + public void testScriptScoresNested() throws Exception { createIndex(INDEX); index(INDEX, "1", jsonBuilder().startObject().field("dummy_field", 1).endObject()); refresh(); @@ -84,39 +86,46 @@ public void testScriptScoresNested() throws IOException { Script scriptOne = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "1", Collections.emptyMap()); Script scriptTwo = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "get score value", Collections.emptyMap()); - SearchResponse response = client().search( - new SearchRequest(new String[] {}).source( - searchSource().query( - functionScoreQuery( - functionScoreQuery(functionScoreQuery(scriptFunction(scriptOne)), scriptFunction(scriptTwo)), - scriptFunction(scriptTwo) + assertNoFailuresAndResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().query( + functionScoreQuery( + functionScoreQuery(functionScoreQuery(scriptFunction(scriptOne)), scriptFunction(scriptTwo)), + scriptFunction(scriptTwo) + ) ) ) - ) - ).actionGet(); - assertNoFailures(response); - assertThat(response.getHits().getAt(0).getScore(), equalTo(1.0f)); + ), + response -> assertThat(response.getHits().getAt(0).getScore(), equalTo(1.0f)) + ); } - public void testScriptScoresWithAgg() throws IOException { + public void testScriptScoresWithAgg() throws Exception { createIndex(INDEX); index(INDEX, "1", jsonBuilder().startObject().field("dummy_field", 1).endObject()); refresh(); Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "get score value", Collections.emptyMap()); - SearchResponse response = client().search( - new SearchRequest(new String[] {}).source( - searchSource().query(functionScoreQuery(scriptFunction(script))).aggregation(terms("score_agg").script(script)) - ) - ).actionGet(); - assertNoFailures(response); - assertThat(response.getHits().getAt(0).getScore(), equalTo(1.0f)); - assertThat(((Terms) response.getAggregations().asMap().get("score_agg")).getBuckets().get(0).getKeyAsString(), equalTo("1.0")); - assertThat(((Terms) response.getAggregations().asMap().get("score_agg")).getBuckets().get(0).getDocCount(), is(1L)); + assertNoFailuresAndResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().query(functionScoreQuery(scriptFunction(script))).aggregation(terms("score_agg").script(script)) + ) + ), + response -> { + assertThat(response.getHits().getAt(0).getScore(), equalTo(1.0f)); + assertThat( + ((Terms) response.getAggregations().asMap().get("score_agg")).getBuckets().get(0).getKeyAsString(), + equalTo("1.0") + ); + assertThat(((Terms) response.getAggregations().asMap().get("score_agg")).getBuckets().get(0).getDocCount(), is(1L)); + } + ); } - public void testMinScoreFunctionScoreBasic() throws IOException { + public void testMinScoreFunctionScoreBasic() throws Exception { float score = randomValueOtherThanMany((f) -> Float.compare(f, 0) < 0, ESTestCase::randomFloat); float minScore = randomValueOtherThanMany((f) -> Float.compare(f, 0) < 0, ESTestCase::randomFloat); index( @@ -130,34 +139,42 @@ public void testMinScoreFunctionScoreBasic() throws IOException { ensureYellow(); Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['random_score']", Collections.emptyMap()); - SearchResponse searchResponse = client().search( - new SearchRequest(new String[] {}).source( - searchSource().query(functionScoreQuery(scriptFunction(script)).setMinScore(minScore)) - ) - ).actionGet(); - if (score < minScore) { - assertThat(searchResponse.getHits().getTotalHits().value, is(0L)); - } else { - assertThat(searchResponse.getHits().getTotalHits().value, is(1L)); - } + assertResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().query(functionScoreQuery(scriptFunction(script)).setMinScore(minScore)) + ) + ), + response -> { + if (score < minScore) { + assertThat(response.getHits().getTotalHits().value, is(0L)); + } else { + assertThat(response.getHits().getTotalHits().value, is(1L)); + } + } + ); - searchResponse = client().search( - new SearchRequest(new String[] {}).source( - searchSource().query( - functionScoreQuery( - new MatchAllQueryBuilder(), - new FilterFunctionBuilder[] { - new FilterFunctionBuilder(scriptFunction(script)), - new FilterFunctionBuilder(scriptFunction(script)) } - ).scoreMode(FunctionScoreQuery.ScoreMode.AVG).setMinScore(minScore) + assertResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().query( + functionScoreQuery( + new MatchAllQueryBuilder(), + new FilterFunctionBuilder[] { + new FilterFunctionBuilder(scriptFunction(script)), + new FilterFunctionBuilder(scriptFunction(script)) } + ).scoreMode(FunctionScoreQuery.ScoreMode.AVG).setMinScore(minScore) + ) ) - ) - ).actionGet(); - if (score < minScore) { - assertThat(searchResponse.getHits().getTotalHits().value, is(0L)); - } else { - assertThat(searchResponse.getHits().getTotalHits().value, is(1L)); - } + ), + response -> { + if (score < minScore) { + assertThat(response.getHits().getTotalHits().value, is(0L)); + } else { + assertThat(response.getHits().getTotalHits().value, is(1L)); + } + } + ); } public void testMinScoreFunctionScoreManyDocsAndRandomMinScore() throws IOException, ExecutionException, InterruptedException { @@ -178,26 +195,33 @@ public void testMinScoreFunctionScoreManyDocsAndRandomMinScore() throws IOExcept numMatchingDocs = numDocs; } - SearchResponse searchResponse = client().search( - new SearchRequest(new String[] {}).source( - searchSource().query(functionScoreQuery(scriptFunction(script)).setMinScore(minScore)).size(numDocs) - ) - ).actionGet(); - assertMinScoreSearchResponses(numDocs, searchResponse, numMatchingDocs); - - searchResponse = client().search( - new SearchRequest(new String[] {}).source( - searchSource().query( - functionScoreQuery( - new MatchAllQueryBuilder(), - new FilterFunctionBuilder[] { - new FilterFunctionBuilder(scriptFunction(script)), - new FilterFunctionBuilder(scriptFunction(script)) } - ).scoreMode(FunctionScoreQuery.ScoreMode.AVG).setMinScore(minScore) - ).size(numDocs) - ) - ).actionGet(); - assertMinScoreSearchResponses(numDocs, searchResponse, numMatchingDocs); + final int finalNumMatchingDocs = numMatchingDocs; + + assertResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().query(functionScoreQuery(scriptFunction(script)).setMinScore(minScore)).size(numDocs) + ) + ), + response -> assertMinScoreSearchResponses(numDocs, response, finalNumMatchingDocs) + ); + + assertResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().query( + functionScoreQuery( + new MatchAllQueryBuilder(), + new FilterFunctionBuilder[] { + new FilterFunctionBuilder(scriptFunction(script)), + new FilterFunctionBuilder(scriptFunction(script)) } + ).scoreMode(FunctionScoreQuery.ScoreMode.AVG).setMinScore(minScore) + ).size(numDocs) + ) + ), + response -> assertMinScoreSearchResponses(numDocs, response, finalNumMatchingDocs) + ); + } protected void assertMinScoreSearchResponses(int numDocs, SearchResponse searchResponse, int numMatchingDocs) { @@ -216,35 +240,38 @@ public void testWithEmptyFunctions() throws IOException, ExecutionException, Int index("test", "1", jsonBuilder().startObject().field("text", "test text").endObject()); refresh(); - SearchResponse termQuery = client().search( - new SearchRequest(new String[] {}).source(searchSource().explain(true).query(termQuery("text", "text"))) - ).get(); - assertNoFailures(termQuery); - assertThat(termQuery.getHits().getTotalHits().value, equalTo(1L)); - float termQueryScore = termQuery.getHits().getAt(0).getScore(); - + float[] termQueryScore = new float[1]; + assertNoFailuresAndResponse( + client().search(new SearchRequest(new String[] {}).source(searchSource().explain(true).query(termQuery("text", "text")))), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + termQueryScore[0] = response.getHits().getAt(0).getScore(); + } + ); for (CombineFunction combineFunction : CombineFunction.values()) { - testMinScoreApplied(combineFunction, termQueryScore); + testMinScoreApplied(combineFunction, termQueryScore[0]); } } protected void testMinScoreApplied(CombineFunction boostMode, float expectedScore) throws InterruptedException, ExecutionException { - SearchResponse response = client().search( - new SearchRequest(new String[] {}).source( - searchSource().explain(true).query(functionScoreQuery(termQuery("text", "text")).boostMode(boostMode).setMinScore(0.1f)) - ) - ).get(); - assertNoFailures(response); - assertThat(response.getHits().getTotalHits().value, equalTo(1L)); - assertThat(response.getHits().getAt(0).getScore(), equalTo(expectedScore)); - - response = client().search( - new SearchRequest(new String[] {}).source( - searchSource().explain(true).query(functionScoreQuery(termQuery("text", "text")).boostMode(boostMode).setMinScore(2f)) - ) - ).get(); - - assertNoFailures(response); - assertThat(response.getHits().getTotalHits().value, equalTo(0L)); + assertNoFailuresAndResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().explain(true).query(functionScoreQuery(termQuery("text", "text")).boostMode(boostMode).setMinScore(0.1f)) + ) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(0).getScore(), equalTo(expectedScore)); + } + ); + assertNoFailuresAndResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().explain(true).query(functionScoreQuery(termQuery("text", "text")).boostMode(boostMode).setMinScore(2f)) + ) + ), + response -> assertThat(response.getHits().getTotalHits().value, equalTo(0L)) + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java index 5c9c54a0d3b19..396af7e8501cf 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java @@ -11,10 +11,8 @@ import org.apache.lucene.search.Explanation; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; -import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.Priority; import org.elasticsearch.common.bytes.BytesReference; @@ -29,7 +27,6 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import java.io.IOException; import java.util.Arrays; @@ -40,6 +37,7 @@ import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; @@ -81,19 +79,19 @@ public void testPlugin() throws Exception { client().admin().indices().prepareRefresh().get(); DecayFunctionBuilder gfb = new CustomDistanceScoreBuilder("num1", "2013-05-28", "+1d"); - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source(searchSource().explain(false).query(functionScoreQuery(termQuery("test", "value"), gfb))) + assertNoFailuresAndResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source(searchSource().explain(false).query(functionScoreQuery(termQuery("test", "value"), gfb))) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getHits().length, equalTo(2)); + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat(sh.getAt(1).getId(), equalTo("2")); + } ); - SearchResponse sr = response.actionGet(); - ElasticsearchAssertions.assertNoFailures(sr); - SearchHits sh = sr.getHits(); - - assertThat(sh.getHits().length, equalTo(2)); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat(sh.getAt(1).getId(), equalTo("2")); - } public static class CustomDistanceScorePlugin extends Plugin implements SearchPlugin { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java index 14df03bb86e8d..f191f627dcd7f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java @@ -49,6 +49,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFourthHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThirdHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; @@ -72,29 +74,31 @@ public void testEnforceWindowSize() { int numShards = getNumShards("test").numPrimaries; for (int j = 0; j < iters; j++) { - SearchResponse searchResponse = prepareSearch().setQuery(QueryBuilders.matchAllQuery()) - .setRescorer( - new QueryRescorerBuilder( - functionScoreQuery(matchAllQuery(), ScoreFunctionBuilders.weightFactorFunction(100)).boostMode( - CombineFunction.REPLACE - ).queryName("hello world") - ).setQueryWeight(0.0f).setRescoreQueryWeight(1.0f), - 1 - ) - .setSize(randomIntBetween(2, 10)) - .get(); - assertNoFailures(searchResponse); - assertFirstHit(searchResponse, hasScore(100.f)); - int numDocsWith100AsAScore = 0; - for (int i = 0; i < searchResponse.getHits().getHits().length; i++) { - float score = searchResponse.getHits().getHits()[i].getScore(); - if (score == 100f) { - numDocsWith100AsAScore += 1; + assertNoFailuresAndResponse( + prepareSearch().setQuery(QueryBuilders.matchAllQuery()) + .setRescorer( + new QueryRescorerBuilder( + functionScoreQuery(matchAllQuery(), ScoreFunctionBuilders.weightFactorFunction(100)).boostMode( + CombineFunction.REPLACE + ).queryName("hello world") + ).setQueryWeight(0.0f).setRescoreQueryWeight(1.0f), + 1 + ) + .setSize(randomIntBetween(2, 10)), + response -> { + assertFirstHit(response, hasScore(100.f)); + int numDocsWith100AsAScore = 0; + for (int i = 0; i < response.getHits().getHits().length; i++) { + float score = response.getHits().getHits()[i].getScore(); + if (score == 100f) { + numDocsWith100AsAScore += 1; + } + } + assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); + // we cannot assert that they are equal since some shards might not have docs at all + assertThat(numDocsWith100AsAScore, lessThanOrEqualTo(numShards)); } - } - assertThat(searchResponse.getHits().getMaxScore(), equalTo(searchResponse.getHits().getHits()[0].getScore())); - // we cannot assert that they are equal since some shards might not have docs at all - assertThat(numDocsWith100AsAScore, lessThanOrEqualTo(numShards)); + ); } } @@ -121,39 +125,41 @@ public void testRescorePhrase() throws Exception { .setSource("field1", "quick huge brown", "field2", "the quick lazy huge brown fox jumps over the tree") .get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery( - QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR) - ) - .setRescorer( - new QueryRescorerBuilder(matchPhraseQuery("field1", "quick brown").slop(2).boost(4.0f)).setRescoreQueryWeight(2), - 5 - ) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getMaxScore(), equalTo(searchResponse.getHits().getHits()[0].getScore())); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("2")); - - searchResponse = prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) - .setRescorer(new QueryRescorerBuilder(matchPhraseQuery("field1", "the quick brown").slop(3)), 5) - .get(); - - assertHitCount(searchResponse, 3); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("2")); - assertThirdHit(searchResponse, hasId("3")); - - searchResponse = prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) - .setRescorer(new QueryRescorerBuilder(matchPhraseQuery("field1", "the quick brown")), 5) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getMaxScore(), equalTo(searchResponse.getHits().getHits()[0].getScore())); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("2")); - assertThirdHit(searchResponse, hasId("3")); + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) + .setRescorer( + new QueryRescorerBuilder(matchPhraseQuery("field1", "quick brown").slop(2).boost(4.0f)).setRescoreQueryWeight(2), + 5 + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); + assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("2")); + } + ); + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) + .setRescorer(new QueryRescorerBuilder(matchPhraseQuery("field1", "the quick brown").slop(3)), 5), + response -> { + assertHitCount(response, 3); + assertFirstHit(response, hasId("1")); + assertSecondHit(response, hasId("2")); + assertThirdHit(response, hasId("3")); + } + ); + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) + .setRescorer(new QueryRescorerBuilder(matchPhraseQuery("field1", "the quick brown")), 5), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); + assertFirstHit(response, hasId("1")); + assertSecondHit(response, hasId("2")); + assertThirdHit(response, hasId("3")); + } + ); } public void testMoreDocs() throws Exception { @@ -189,62 +195,61 @@ public void testMoreDocs() throws Exception { client().prepareIndex("test").setId("11").setSource("field1", "2st street boston massachusetts").get(); client().prepareIndex("test").setId("12").setSource("field1", "3st street boston massachusetts").get(); indicesAdmin().prepareRefresh("test").get(); - SearchResponse searchResponse = prepareSearch().setQuery( - QueryBuilders.matchQuery("field1", "lexington avenue massachusetts").operator(Operator.OR) - ) - .setFrom(0) - .setSize(5) - .setRescorer( - new QueryRescorerBuilder(matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3)).setQueryWeight(0.6f) - .setRescoreQueryWeight(2.0f), - 20 - ) - .get(); - - assertThat(searchResponse.getHits().getHits().length, equalTo(5)); - assertHitCount(searchResponse, 9); - assertFirstHit(searchResponse, hasId("2")); - assertSecondHit(searchResponse, hasId("6")); - assertThirdHit(searchResponse, hasId("3")); - - searchResponse = prepareSearch().setQuery( - QueryBuilders.matchQuery("field1", "lexington avenue massachusetts").operator(Operator.OR) - ) - .setFrom(0) - .setSize(5) - .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setRescorer( - new QueryRescorerBuilder(matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3)).setQueryWeight(0.6f) - .setRescoreQueryWeight(2.0f), - 20 - ) - .get(); - - assertThat(searchResponse.getHits().getHits().length, equalTo(5)); - assertHitCount(searchResponse, 9); - assertThat(searchResponse.getHits().getMaxScore(), equalTo(searchResponse.getHits().getHits()[0].getScore())); - assertFirstHit(searchResponse, hasId("2")); - assertSecondHit(searchResponse, hasId("6")); - assertThirdHit(searchResponse, hasId("3")); + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "lexington avenue massachusetts").operator(Operator.OR)) + .setFrom(0) + .setSize(5) + .setRescorer( + new QueryRescorerBuilder(matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3)).setQueryWeight(0.6f) + .setRescoreQueryWeight(2.0f), + 20 + ), + response -> { + assertThat(response.getHits().getHits().length, equalTo(5)); + assertHitCount(response, 9); + assertFirstHit(response, hasId("2")); + assertSecondHit(response, hasId("6")); + assertThirdHit(response, hasId("3")); + } + ); + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "lexington avenue massachusetts").operator(Operator.OR)) + .setFrom(0) + .setSize(5) + .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) + .setRescorer( + new QueryRescorerBuilder(matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3)).setQueryWeight(0.6f) + .setRescoreQueryWeight(2.0f), + 20 + ), + response -> { + assertThat(response.getHits().getHits().length, equalTo(5)); + assertHitCount(response, 9); + assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); + assertFirstHit(response, hasId("2")); + assertSecondHit(response, hasId("6")); + assertThirdHit(response, hasId("3")); + } + ); // Make sure non-zero from works: - searchResponse = prepareSearch().setQuery( - QueryBuilders.matchQuery("field1", "lexington avenue massachusetts").operator(Operator.OR) - ) - .setFrom(2) - .setSize(5) - .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setRescorer( - new QueryRescorerBuilder(matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3)).setQueryWeight(0.6f) - .setRescoreQueryWeight(2.0f), - 20 - ) - .get(); - - assertThat(searchResponse.getHits().getHits().length, equalTo(5)); - assertHitCount(searchResponse, 9); - assertThat(searchResponse.getHits().getMaxScore(), greaterThan(searchResponse.getHits().getHits()[0].getScore())); - assertFirstHit(searchResponse, hasId("3")); + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "lexington avenue massachusetts").operator(Operator.OR)) + .setFrom(2) + .setSize(5) + .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) + .setRescorer( + new QueryRescorerBuilder(matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3)).setQueryWeight(0.6f) + .setRescoreQueryWeight(2.0f), + 20 + ), + response -> { + assertThat(response.getHits().getHits().length, equalTo(5)); + assertHitCount(response, 9); + assertThat(response.getHits().getMaxScore(), greaterThan(response.getHits().getHits()[0].getScore())); + assertFirstHit(response, hasId("3")); + } + ); } // Tests a rescore window smaller than number of hits: @@ -272,56 +277,59 @@ public void testSmallRescoreWindow() throws Exception { client().prepareIndex("test").setId("2").setSource("field1", "lexington avenue boston massachusetts road").get(); indicesAdmin().prepareRefresh("test").get(); - SearchResponse searchResponse = prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "massachusetts")) - .setFrom(0) - .setSize(5) - .get(); - assertThat(searchResponse.getHits().getHits().length, equalTo(4)); - assertHitCount(searchResponse, 4); - assertThat(searchResponse.getHits().getMaxScore(), equalTo(searchResponse.getHits().getHits()[0].getScore())); - assertFirstHit(searchResponse, hasId("3")); - assertSecondHit(searchResponse, hasId("6")); - assertThirdHit(searchResponse, hasId("1")); - assertFourthHit(searchResponse, hasId("2")); + assertResponse(prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "massachusetts")).setFrom(0).setSize(5), response -> { + assertThat(response.getHits().getHits().length, equalTo(4)); + assertHitCount(response, 4); + assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); + assertFirstHit(response, hasId("3")); + assertSecondHit(response, hasId("6")); + assertThirdHit(response, hasId("1")); + assertFourthHit(response, hasId("2")); + }); // Now, rescore only top 2 hits w/ proximity: - searchResponse = prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "massachusetts")) - .setFrom(0) - .setSize(5) - .setRescorer( - new QueryRescorerBuilder(matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3)).setQueryWeight(0.6f) - .setRescoreQueryWeight(2.0f), - 2 - ) - .get(); - // Only top 2 hits were re-ordered: - assertThat(searchResponse.getHits().getHits().length, equalTo(4)); - assertHitCount(searchResponse, 4); - assertThat(searchResponse.getHits().getMaxScore(), equalTo(searchResponse.getHits().getHits()[0].getScore())); - assertFirstHit(searchResponse, hasId("6")); - assertSecondHit(searchResponse, hasId("3")); - assertThirdHit(searchResponse, hasId("1")); - assertFourthHit(searchResponse, hasId("2")); + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "massachusetts")) + .setFrom(0) + .setSize(5) + .setRescorer( + new QueryRescorerBuilder(matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3)).setQueryWeight(0.6f) + .setRescoreQueryWeight(2.0f), + 2 + ), + response -> { + // Only top 2 hits were re-ordered: + assertThat(response.getHits().getHits().length, equalTo(4)); + assertHitCount(response, 4); + assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); + assertFirstHit(response, hasId("6")); + assertSecondHit(response, hasId("3")); + assertThirdHit(response, hasId("1")); + assertFourthHit(response, hasId("2")); + } + ); // Now, rescore only top 3 hits w/ proximity: - searchResponse = prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "massachusetts")) - .setFrom(0) - .setSize(5) - .setRescorer( - new QueryRescorerBuilder(matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3)).setQueryWeight(0.6f) - .setRescoreQueryWeight(2.0f), - 3 - ) - .get(); - - // Only top 3 hits were re-ordered: - assertThat(searchResponse.getHits().getHits().length, equalTo(4)); - assertHitCount(searchResponse, 4); - assertThat(searchResponse.getHits().getMaxScore(), equalTo(searchResponse.getHits().getHits()[0].getScore())); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("6")); - assertThirdHit(searchResponse, hasId("3")); - assertFourthHit(searchResponse, hasId("2")); + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "massachusetts")) + .setFrom(0) + .setSize(5) + .setRescorer( + new QueryRescorerBuilder(matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3)).setQueryWeight(0.6f) + .setRescoreQueryWeight(2.0f), + 3 + ), + response -> { + // Only top 3 hits were re-ordered: + assertThat(response.getHits().getHits().length, equalTo(4)); + assertHitCount(response, 4); + assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); + assertFirstHit(response, hasId("1")); + assertSecondHit(response, hasId("6")); + assertThirdHit(response, hasId("3")); + assertFourthHit(response, hasId("2")); + } + ); } // Tests a rescorer that penalizes the scores: @@ -349,35 +357,37 @@ public void testRescorerMadeScoresWorse() throws Exception { client().prepareIndex("test").setId("2").setSource("field1", "lexington avenue boston massachusetts road").get(); indicesAdmin().prepareRefresh("test").get(); - SearchResponse searchResponse = prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "massachusetts").operator(Operator.OR)) - .setFrom(0) - .setSize(5) - .get(); - assertThat(searchResponse.getHits().getHits().length, equalTo(4)); - assertHitCount(searchResponse, 4); - assertThat(searchResponse.getHits().getMaxScore(), equalTo(searchResponse.getHits().getHits()[0].getScore())); - assertFirstHit(searchResponse, hasId("3")); - assertSecondHit(searchResponse, hasId("6")); - assertThirdHit(searchResponse, hasId("1")); - assertFourthHit(searchResponse, hasId("2")); - + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "massachusetts").operator(Operator.OR)).setFrom(0).setSize(5), + response -> { + assertThat(response.getHits().getHits().length, equalTo(4)); + assertHitCount(response, 4); + assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); + assertFirstHit(response, hasId("3")); + assertSecondHit(response, hasId("6")); + assertThirdHit(response, hasId("1")); + assertFourthHit(response, hasId("2")); + } + ); // Now, penalizing rescore (nothing matches the rescore query): - searchResponse = prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "massachusetts").operator(Operator.OR)) - .setFrom(0) - .setSize(5) - .setRescorer( - new QueryRescorerBuilder(matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3)).setQueryWeight(1.0f) - .setRescoreQueryWeight(-1f), - 3 - ) - .get(); - - // 6 and 1 got worse, and then the hit (2) outside the rescore window were sorted ahead: - assertThat(searchResponse.getHits().getMaxScore(), equalTo(searchResponse.getHits().getHits()[0].getScore())); - assertFirstHit(searchResponse, hasId("3")); - assertSecondHit(searchResponse, hasId("2")); - assertThirdHit(searchResponse, hasId("6")); - assertFourthHit(searchResponse, hasId("1")); + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "massachusetts").operator(Operator.OR)) + .setFrom(0) + .setSize(5) + .setRescorer( + new QueryRescorerBuilder(matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3)).setQueryWeight(1.0f) + .setRescoreQueryWeight(-1f), + 3 + ), + response -> { + // 6 and 1 got worse, and then the hit (2) outside the rescore window were sorted ahead: + assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); + assertFirstHit(response, hasId("3")); + assertSecondHit(response, hasId("2")); + assertThirdHit(response, hasId("6")); + assertFourthHit(response, hasId("1")); + } + ); } // Comparator that sorts hits and rescored hits in the same way. @@ -430,43 +440,46 @@ public void testEquivalence() throws Exception { int rescoreWindow = between(1, 3) * resultSize; String intToEnglish = English.intToEnglish(between(0, numDocs - 1)); String query = intToEnglish.split(" ")[0]; - SearchResponse rescored = prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH) - .setPreference("test") // ensure we hit the same shards for tie-breaking - .setQuery(QueryBuilders.matchQuery("field1", query).operator(Operator.OR)) - .setFrom(0) - .setSize(resultSize) - .setRescorer( - new QueryRescorerBuilder(constantScoreQuery(matchPhraseQuery("field1", intToEnglish).slop(3))).setQueryWeight(1.0f) - // no weight - so we basically use the same score as the actual query - .setRescoreQueryWeight(0.0f), - rescoreWindow - ) - .get(); - - SearchResponse plain = prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH) - .setPreference("test") // ensure we hit the same shards for tie-breaking - .setQuery(QueryBuilders.matchQuery("field1", query).operator(Operator.OR)) - .setFrom(0) - .setSize(resultSize) - .get(); - - // check equivalence - assertEquivalent(query, plain, rescored); - rescored = prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH) - .setPreference("test") // ensure we hit the same shards for tie-breaking - .setQuery(QueryBuilders.matchQuery("field1", query).operator(Operator.OR)) - .setFrom(0) - .setSize(resultSize) - .setRescorer( - new QueryRescorerBuilder(constantScoreQuery(matchPhraseQuery("field1", "not in the index").slop(3))).setQueryWeight( - 1.0f - ).setRescoreQueryWeight(1.0f), - rescoreWindow - ) - .get(); - // check equivalence - assertEquivalent(query, plain, rescored); + assertResponse( + prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH) + .setPreference("test") // ensure we hit the same shards for tie-breaking + .setQuery(QueryBuilders.matchQuery("field1", query).operator(Operator.OR)) + .setFrom(0) + .setSize(resultSize), + plain -> { + assertResponse( + prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH) + .setPreference("test") // ensure we hit the same shards for tie-breaking + .setQuery(QueryBuilders.matchQuery("field1", query).operator(Operator.OR)) + .setFrom(0) + .setSize(resultSize) + .setRescorer( + new QueryRescorerBuilder(constantScoreQuery(matchPhraseQuery("field1", intToEnglish).slop(3))) + .setQueryWeight(1.0f) + // no weight - so we basically use the same score as the actual query + .setRescoreQueryWeight(0.0f), + rescoreWindow + ), + rescored -> assertEquivalent(query, plain, rescored) + ); // check equivalence + + assertResponse( + prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH) + .setPreference("test") // ensure we hit the same shards for tie-breaking + .setQuery(QueryBuilders.matchQuery("field1", query).operator(Operator.OR)) + .setFrom(0) + .setSize(resultSize) + .setRescorer( + new QueryRescorerBuilder(constantScoreQuery(matchPhraseQuery("field1", "not in the index").slop(3))) + .setQueryWeight(1.0f) + .setRescoreQueryWeight(1.0f), + rescoreWindow + ), + rescored -> assertEquivalent(query, plain, rescored) + ); // check equivalence + } + ); } } @@ -495,39 +508,42 @@ public void testExplain() throws Exception { refresh(); { - SearchResponse searchResponse = prepareSearch().setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) - .setRescorer( - new QueryRescorerBuilder(matchPhraseQuery("field1", "the quick brown").slop(2).boost(4.0f)).setQueryWeight(0.5f) - .setRescoreQueryWeight(0.4f), - 5 - ) - .setExplain(true) - .get(); - assertHitCount(searchResponse, 3); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("2")); - assertThirdHit(searchResponse, hasId("3")); - - for (int i = 0; i < 3; i++) { - assertThat(searchResponse.getHits().getAt(i).getExplanation(), notNullValue()); - assertThat(searchResponse.getHits().getAt(i).getExplanation().isMatch(), equalTo(true)); - assertThat(searchResponse.getHits().getAt(i).getExplanation().getDetails().length, equalTo(2)); - assertThat(searchResponse.getHits().getAt(i).getExplanation().getDetails()[0].isMatch(), equalTo(true)); - if (i == 2) { - assertThat(searchResponse.getHits().getAt(i).getExplanation().getDetails()[1].getValue(), equalTo(0.5f)); - } else { - assertThat(searchResponse.getHits().getAt(i).getExplanation().getDescription(), equalTo("sum of:")); - assertThat( - searchResponse.getHits().getAt(i).getExplanation().getDetails()[0].getDetails()[1].getValue(), - equalTo(0.5f) - ); - assertThat( - searchResponse.getHits().getAt(i).getExplanation().getDetails()[1].getDetails()[1].getValue(), - equalTo(0.4f) - ); + assertResponse( + prepareSearch().setSearchType(SearchType.DFS_QUERY_THEN_FETCH) + .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) + .setRescorer( + new QueryRescorerBuilder(matchPhraseQuery("field1", "the quick brown").slop(2).boost(4.0f)).setQueryWeight(0.5f) + .setRescoreQueryWeight(0.4f), + 5 + ) + .setExplain(true), + response -> { + assertHitCount(response, 3); + assertFirstHit(response, hasId("1")); + assertSecondHit(response, hasId("2")); + assertThirdHit(response, hasId("3")); + + for (int i = 0; i < 3; i++) { + assertThat(response.getHits().getAt(i).getExplanation(), notNullValue()); + assertThat(response.getHits().getAt(i).getExplanation().isMatch(), equalTo(true)); + assertThat(response.getHits().getAt(i).getExplanation().getDetails().length, equalTo(2)); + assertThat(response.getHits().getAt(i).getExplanation().getDetails()[0].isMatch(), equalTo(true)); + if (i == 2) { + assertThat(response.getHits().getAt(i).getExplanation().getDetails()[1].getValue(), equalTo(0.5f)); + } else { + assertThat(response.getHits().getAt(i).getExplanation().getDescription(), equalTo("sum of:")); + assertThat( + response.getHits().getAt(i).getExplanation().getDetails()[0].getDetails()[1].getValue(), + equalTo(0.5f) + ); + assertThat( + response.getHits().getAt(i).getExplanation().getDetails()[1].getDetails()[1].getValue(), + equalTo(0.4f) + ); + } + } } - } + ); } String[] scoreModes = new String[] { "max", "min", "avg", "total", "multiply", "" }; @@ -540,21 +556,26 @@ public void testExplain() throws Exception { if ("".equals(scoreModes[innerMode]) == false) { innerRescoreQuery.setScoreMode(QueryRescoreMode.fromString(scoreModes[innerMode])); } - - SearchResponse searchResponse = prepareSearch().setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) - .setRescorer(innerRescoreQuery, 5) - .setExplain(true) - .get(); - assertHitCount(searchResponse, 3); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("2")); - assertThirdHit(searchResponse, hasId("3")); - - for (int j = 0; j < 3; j++) { - assertThat(searchResponse.getHits().getAt(j).getExplanation().getDescription(), equalTo(descriptionModes[innerMode])); - } - + final int finalInnerMode = innerMode; + assertResponse( + prepareSearch().setSearchType(SearchType.DFS_QUERY_THEN_FETCH) + .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) + .setRescorer(innerRescoreQuery, 5) + .setExplain(true), + response -> { + assertHitCount(response, 3); + assertFirstHit(response, hasId("1")); + assertSecondHit(response, hasId("2")); + assertThirdHit(response, hasId("3")); + + for (int j = 0; j < 3; j++) { + assertThat( + response.getHits().getAt(j).getExplanation().getDescription(), + equalTo(descriptionModes[finalInnerMode]) + ); + } + } + ); for (int outerMode = 0; outerMode < scoreModes.length; outerMode++) { QueryRescorerBuilder outerRescoreQuery = new QueryRescorerBuilder(matchQuery("field1", "the quick brown").boost(4.0f)) .setQueryWeight(0.5f) @@ -563,23 +584,29 @@ public void testExplain() throws Exception { if ("".equals(scoreModes[outerMode]) == false) { outerRescoreQuery.setScoreMode(QueryRescoreMode.fromString(scoreModes[outerMode])); } - - searchResponse = prepareSearch().setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) - .addRescorer(innerRescoreQuery, 5) - .addRescorer(outerRescoreQuery.windowSize(10)) - .setExplain(true) - .get(); - assertHitCount(searchResponse, 3); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("2")); - assertThirdHit(searchResponse, hasId("3")); - - for (int j = 0; j < 3; j++) { - Explanation explanation = searchResponse.getHits().getAt(j).getExplanation(); - assertThat(explanation.getDescription(), equalTo(descriptionModes[outerMode])); - assertThat(explanation.getDetails()[0].getDetails()[0].getDescription(), equalTo(descriptionModes[innerMode])); - } + final int finalOuterMode = outerMode; + assertResponse( + prepareSearch().setSearchType(SearchType.DFS_QUERY_THEN_FETCH) + .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) + .addRescorer(innerRescoreQuery, 5) + .addRescorer(outerRescoreQuery.windowSize(10)) + .setExplain(true), + response -> { + assertHitCount(response, 3); + assertFirstHit(response, hasId("1")); + assertSecondHit(response, hasId("2")); + assertThirdHit(response, hasId("3")); + + for (int j = 0; j < 3; j++) { + Explanation explanation = response.getHits().getAt(j).getExplanation(); + assertThat(explanation.getDescription(), equalTo(descriptionModes[finalOuterMode])); + assertThat( + explanation.getDetails()[0].getDetails()[0].getDescription(), + equalTo(descriptionModes[finalInnerMode]) + ); + } + } + ); } } } @@ -617,58 +644,66 @@ public void testScoring() throws Exception { if ("".equals(scoreMode) == false) { rescoreQuery.setScoreMode(QueryRescoreMode.fromString(scoreMode)); } - - SearchResponse rescored = prepareSearch().setPreference("test") // ensure we hit the same shards for tie-breaking - .setFrom(0) - .setSize(10) - .setQuery(query) - .setRescorer(rescoreQuery, 50) - .get(); - - assertHitCount(rescored, 4); - - assertThat(rescored.getHits().getMaxScore(), equalTo(rescored.getHits().getHits()[0].getScore())); - if ("total".equals(scoreMode) || "".equals(scoreMode)) { - assertFirstHit(rescored, hasId(String.valueOf(i + 1))); - assertSecondHit(rescored, hasId(String.valueOf(i))); - assertThirdHit(rescored, hasId(String.valueOf(i + 2))); - assertThat(rescored.getHits().getHits()[0].getScore(), equalTo(3.0f * primaryWeight + 7.0f * secondaryWeight)); - assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(2.0f * primaryWeight + 5.0f * secondaryWeight)); - assertThat(rescored.getHits().getHits()[2].getScore(), equalTo(5.0f * primaryWeight)); - assertThat(rescored.getHits().getHits()[3].getScore(), equalTo(0.2f * primaryWeight + 0.0f * secondaryWeight)); - } else if ("max".equals(scoreMode)) { - assertFirstHit(rescored, hasId(String.valueOf(i + 1))); - assertSecondHit(rescored, hasId(String.valueOf(i))); - assertThirdHit(rescored, hasId(String.valueOf(i + 2))); - assertThat(rescored.getHits().getHits()[0].getScore(), equalTo(7.0f * secondaryWeight)); - assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(5.0f * secondaryWeight)); - assertThat(rescored.getHits().getHits()[2].getScore(), equalTo(5.0f * primaryWeight)); - assertThat(rescored.getHits().getHits()[3].getScore(), equalTo(0.2f * primaryWeight)); - } else if ("min".equals(scoreMode)) { - assertFirstHit(rescored, hasId(String.valueOf(i + 2))); - assertSecondHit(rescored, hasId(String.valueOf(i + 1))); - assertThirdHit(rescored, hasId(String.valueOf(i))); - assertThat(rescored.getHits().getHits()[0].getScore(), equalTo(5.0f * primaryWeight)); - assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(3.0f * primaryWeight)); - assertThat(rescored.getHits().getHits()[2].getScore(), equalTo(2.0f * primaryWeight)); - assertThat(rescored.getHits().getHits()[3].getScore(), equalTo(0.0f * secondaryWeight)); - } else if ("avg".equals(scoreMode)) { - assertFirstHit(rescored, hasId(String.valueOf(i + 1))); - assertSecondHit(rescored, hasId(String.valueOf(i + 2))); - assertThirdHit(rescored, hasId(String.valueOf(i))); - assertThat(rescored.getHits().getHits()[0].getScore(), equalTo((3.0f * primaryWeight + 7.0f * secondaryWeight) / 2.0f)); - assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(5.0f * primaryWeight)); - assertThat(rescored.getHits().getHits()[2].getScore(), equalTo((2.0f * primaryWeight + 5.0f * secondaryWeight) / 2.0f)); - assertThat(rescored.getHits().getHits()[3].getScore(), equalTo((0.2f * primaryWeight) / 2.0f)); - } else if ("multiply".equals(scoreMode)) { - assertFirstHit(rescored, hasId(String.valueOf(i + 1))); - assertSecondHit(rescored, hasId(String.valueOf(i))); - assertThirdHit(rescored, hasId(String.valueOf(i + 2))); - assertThat(rescored.getHits().getHits()[0].getScore(), equalTo(3.0f * primaryWeight * 7.0f * secondaryWeight)); - assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(2.0f * primaryWeight * 5.0f * secondaryWeight)); - assertThat(rescored.getHits().getHits()[2].getScore(), equalTo(5.0f * primaryWeight)); - assertThat(rescored.getHits().getHits()[3].getScore(), equalTo(0.2f * primaryWeight * 0.0f * secondaryWeight)); - } + final int finalI = i; + assertResponse( + prepareSearch().setPreference("test") // ensure we hit the same shards for tie-breaking + .setFrom(0) + .setSize(10) + .setQuery(query) + .setRescorer(rescoreQuery, 50), + rescored -> { + assertHitCount(rescored, 4); + + assertThat(rescored.getHits().getMaxScore(), equalTo(rescored.getHits().getHits()[0].getScore())); + if ("total".equals(scoreMode) || "".equals(scoreMode)) { + assertFirstHit(rescored, hasId(String.valueOf(finalI + 1))); + assertSecondHit(rescored, hasId(String.valueOf(finalI))); + assertThirdHit(rescored, hasId(String.valueOf(finalI + 2))); + assertThat(rescored.getHits().getHits()[0].getScore(), equalTo(3.0f * primaryWeight + 7.0f * secondaryWeight)); + assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(2.0f * primaryWeight + 5.0f * secondaryWeight)); + assertThat(rescored.getHits().getHits()[2].getScore(), equalTo(5.0f * primaryWeight)); + assertThat(rescored.getHits().getHits()[3].getScore(), equalTo(0.2f * primaryWeight + 0.0f * secondaryWeight)); + } else if ("max".equals(scoreMode)) { + assertFirstHit(rescored, hasId(String.valueOf(finalI + 1))); + assertSecondHit(rescored, hasId(String.valueOf(finalI))); + assertThirdHit(rescored, hasId(String.valueOf(finalI + 2))); + assertThat(rescored.getHits().getHits()[0].getScore(), equalTo(7.0f * secondaryWeight)); + assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(5.0f * secondaryWeight)); + assertThat(rescored.getHits().getHits()[2].getScore(), equalTo(5.0f * primaryWeight)); + assertThat(rescored.getHits().getHits()[3].getScore(), equalTo(0.2f * primaryWeight)); + } else if ("min".equals(scoreMode)) { + assertFirstHit(rescored, hasId(String.valueOf(finalI + 2))); + assertSecondHit(rescored, hasId(String.valueOf(finalI + 1))); + assertThirdHit(rescored, hasId(String.valueOf(finalI))); + assertThat(rescored.getHits().getHits()[0].getScore(), equalTo(5.0f * primaryWeight)); + assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(3.0f * primaryWeight)); + assertThat(rescored.getHits().getHits()[2].getScore(), equalTo(2.0f * primaryWeight)); + assertThat(rescored.getHits().getHits()[3].getScore(), equalTo(0.0f * secondaryWeight)); + } else if ("avg".equals(scoreMode)) { + assertFirstHit(rescored, hasId(String.valueOf(finalI + 1))); + assertSecondHit(rescored, hasId(String.valueOf(finalI + 2))); + assertThirdHit(rescored, hasId(String.valueOf(finalI))); + assertThat( + rescored.getHits().getHits()[0].getScore(), + equalTo((3.0f * primaryWeight + 7.0f * secondaryWeight) / 2.0f) + ); + assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(5.0f * primaryWeight)); + assertThat( + rescored.getHits().getHits()[2].getScore(), + equalTo((2.0f * primaryWeight + 5.0f * secondaryWeight) / 2.0f) + ); + assertThat(rescored.getHits().getHits()[3].getScore(), equalTo((0.2f * primaryWeight) / 2.0f)); + } else if ("multiply".equals(scoreMode)) { + assertFirstHit(rescored, hasId(String.valueOf(finalI + 1))); + assertSecondHit(rescored, hasId(String.valueOf(finalI))); + assertThirdHit(rescored, hasId(String.valueOf(finalI + 2))); + assertThat(rescored.getHits().getHits()[0].getScore(), equalTo(3.0f * primaryWeight * 7.0f * secondaryWeight)); + assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(2.0f * primaryWeight * 5.0f * secondaryWeight)); + assertThat(rescored.getHits().getHits()[2].getScore(), equalTo(5.0f * primaryWeight)); + assertThat(rescored.getHits().getHits()[3].getScore(), equalTo(0.2f * primaryWeight * 0.0f * secondaryWeight)); + } + } + ); } } } @@ -688,13 +723,16 @@ public void testMultipleRescores() throws Exception { // First set the rescore window large enough that both rescores take effect SearchRequestBuilder request = prepareSearch(); request.addRescorer(eightIsGreat, numDocs).addRescorer(sevenIsBetter, numDocs); - SearchResponse response = request.get(); - assertFirstHit(response, hasId("7")); - assertSecondHit(response, hasId("8")); + assertResponse(request, response -> { + assertFirstHit(response, hasId("7")); + assertSecondHit(response, hasId("8")); + }); // Now squash the second rescore window so it never gets to see a seven - response = request.setSize(1).clearRescorers().addRescorer(eightIsGreat, numDocs).addRescorer(sevenIsBetter, 1).get(); - assertFirstHit(response, hasId("8")); + assertResponse( + request.setSize(1).clearRescorers().addRescorer(eightIsGreat, numDocs).addRescorer(sevenIsBetter, 1), + response -> assertFirstHit(response, hasId("8")) + ); // We have no idea what the second hit will be because we didn't get a chance to look for seven // Now use one rescore to drag the number we're looking for into the window of another @@ -709,11 +747,12 @@ public void testMultipleRescores() throws Exception { ) ).setScoreMode(QueryRescoreMode.Total); request.clearRescorers().addRescorer(ninetyIsGood, numDocs).addRescorer(oneToo, 10); - response = request.setSize(2).get(); - assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); - assertFirstHit(response, hasId("91")); - assertFirstHit(response, hasScore(2001.0f)); - assertSecondHit(response, hasScore(1001.0f)); // Not sure which one it is but it is ninety something + assertResponse(request.setSize(2), response -> { + assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); + assertFirstHit(response, hasId("91")); + assertFirstHit(response, hasScore(2001.0f)); + assertSecondHit(response, hasScore(1001.0f)); // Not sure which one it is but it is ninety something + }); } private int indexRandomNumbers(String analyzer) throws Exception { @@ -797,14 +836,17 @@ public void testRescorePhaseWithInvalidSort() throws Exception { assertNotNull(exc.getCause()); assertThat(exc.getCause().getMessage(), containsString("Cannot use [sort] option in conjunction with [rescore].")); - SearchResponse resp = prepareSearch().addSort(SortBuilders.scoreSort()) - .setTrackScores(true) - .addRescorer(new QueryRescorerBuilder(matchAllQuery()).setRescoreQueryWeight(100.0f), 50) - .get(); - assertThat(resp.getHits().getTotalHits().value, equalTo(5L)); - assertThat(resp.getHits().getHits().length, equalTo(5)); - for (SearchHit hit : resp.getHits().getHits()) { - assertThat(hit.getScore(), equalTo(101f)); - } + assertResponse( + prepareSearch().addSort(SortBuilders.scoreSort()) + .setTrackScores(true) + .addRescorer(new QueryRescorerBuilder(matchAllQuery()).setRescoreQueryWeight(100.0f), 50), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(5L)); + assertThat(response.getHits().getHits().length, equalTo(5)); + for (SearchHit hit : response.getHits().getHits()) { + assertThat(hit.getScore(), equalTo(101f)); + } + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/RandomScoreFunctionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/RandomScoreFunctionIT.java index ef8ffcf0d806a..5109491c5faca 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/RandomScoreFunctionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/RandomScoreFunctionIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.functionscore; import org.apache.lucene.util.ArrayUtil; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder; @@ -37,6 +36,8 @@ import static org.elasticsearch.script.MockScriptPlugin.NAME; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; @@ -97,35 +98,39 @@ public void testConsistentHitsWithSameSeed() throws Exception { preference = randomRealisticUnicodeOfLengthBetween(1, 10); } int innerIters = scaledRandomIntBetween(2, 5); - SearchHit[] hits = null; + final SearchHit[][] hits = new SearchHit[1][]; for (int i = 0; i < innerIters; i++) { - SearchResponse searchResponse = prepareSearch().setSize(docCount) // get all docs otherwise we are prone to tie-breaking - .setPreference(preference) - .setQuery(functionScoreQuery(matchAllQuery(), randomFunction().seed(seed).setField("foo"))) - .get(); - assertThat( - "Failures " + Arrays.toString(searchResponse.getShardFailures()), - searchResponse.getShardFailures().length, - CoreMatchers.equalTo(0) - ); - final int hitCount = searchResponse.getHits().getHits().length; - final SearchHit[] currentHits = searchResponse.getHits().getHits(); - ArrayUtil.timSort(currentHits, (o1, o2) -> { - // for tie-breaking we have to resort here since if the score is - // identical we rely on collection order which might change. - int cmp = Float.compare(o1.getScore(), o2.getScore()); - return cmp == 0 ? o1.getId().compareTo(o2.getId()) : cmp; - }); - if (i == 0) { - assertThat(hits, nullValue()); - hits = currentHits; - } else { - assertThat(hits.length, equalTo(searchResponse.getHits().getHits().length)); - for (int j = 0; j < hitCount; j++) { - assertThat("" + j, currentHits[j].getScore(), equalTo(hits[j].getScore())); - assertThat("" + j, currentHits[j].getId(), equalTo(hits[j].getId())); + final int finalI = i; + assertResponse( + prepareSearch().setSize(docCount) // get all docs otherwise we are prone to tie-breaking + .setPreference(preference) + .setQuery(functionScoreQuery(matchAllQuery(), randomFunction().seed(seed).setField("foo"))), + response -> { + assertThat( + "Failures " + Arrays.toString(response.getShardFailures()), + response.getShardFailures().length, + CoreMatchers.equalTo(0) + ); + final int hitCount = response.getHits().getHits().length; + final SearchHit[] currentHits = response.getHits().getHits(); + ArrayUtil.timSort(currentHits, (o1, o2) -> { + // for tie-breaking we have to resort here since if the score is + // identical we rely on collection order which might change. + int cmp = Float.compare(o1.getScore(), o2.getScore()); + return cmp == 0 ? o1.getId().compareTo(o2.getId()) : cmp; + }); + if (finalI == 0) { + assertThat(hits[0], nullValue()); + hits[0] = currentHits; + } else { + assertThat(hits[0].length, equalTo(response.getHits().getHits().length)); + for (int j = 0; j < hitCount; j++) { + assertThat("" + j, currentHits[j].getScore(), equalTo(hits[0][j].getScore())); + assertThat("" + j, currentHits[j].getId(), equalTo(hits[0][j].getId())); + } + } } - } + ); // randomly change some docs to get them in different segments int numDocsToChange = randomIntBetween(20, 50); @@ -165,73 +170,88 @@ public void testScoreAccessWithinScript() throws Exception { // Test for accessing _score Script script = new Script(ScriptType.INLINE, NAME, "log(doc['index'].value + (factor * _score))", params); - SearchResponse resp = prepareSearch("test").setQuery( - functionScoreQuery( - matchQuery("body", "foo"), - new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { - new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction("index").factor(2)), - new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(script)) } - ) - ).get(); - assertNoFailures(resp); - SearchHit firstHit = resp.getHits().getAt(0); - assertThat(firstHit.getScore(), greaterThan(1f)); + assertNoFailuresAndResponse( + prepareSearch("test").setQuery( + functionScoreQuery( + matchQuery("body", "foo"), + new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { + new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction("index").factor(2)), + new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(script)) } + ) + ), + response -> { + SearchHit firstHit = response.getHits().getAt(0); + assertThat(firstHit.getScore(), greaterThan(1f)); + } + ); // Test for accessing _score.intValue() script = new Script(ScriptType.INLINE, NAME, "log(doc['index'].value + (factor * _score.intValue()))", params); - resp = prepareSearch("test").setQuery( - functionScoreQuery( - matchQuery("body", "foo"), - new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { - new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction("index").factor(2)), - new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(script)) } - ) - ).get(); - assertNoFailures(resp); - firstHit = resp.getHits().getAt(0); - assertThat(firstHit.getScore(), greaterThan(1f)); + assertNoFailuresAndResponse( + prepareSearch("test").setQuery( + functionScoreQuery( + matchQuery("body", "foo"), + new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { + new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction("index").factor(2)), + new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(script)) } + ) + ), + response -> { + SearchHit firstHit = response.getHits().getAt(0); + assertThat(firstHit.getScore(), greaterThan(1f)); + } + ); // Test for accessing _score.longValue() script = new Script(ScriptType.INLINE, NAME, "log(doc['index'].value + (factor * _score.longValue()))", params); - resp = prepareSearch("test").setQuery( - functionScoreQuery( - matchQuery("body", "foo"), - new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { - new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction("index").factor(2)), - new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(script)) } - ) - ).get(); - assertNoFailures(resp); - firstHit = resp.getHits().getAt(0); - assertThat(firstHit.getScore(), greaterThan(1f)); + assertNoFailuresAndResponse( + prepareSearch("test").setQuery( + functionScoreQuery( + matchQuery("body", "foo"), + new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { + new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction("index").factor(2)), + new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(script)) } + ) + ), + response -> { + SearchHit firstHit = response.getHits().getAt(0); + assertThat(firstHit.getScore(), greaterThan(1f)); + } + ); // Test for accessing _score.floatValue() script = new Script(ScriptType.INLINE, NAME, "log(doc['index'].value + (factor * _score.floatValue()))", params); - resp = prepareSearch("test").setQuery( - functionScoreQuery( - matchQuery("body", "foo"), - new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { - new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction("index").factor(2)), - new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(script)) } - ) - ).get(); - assertNoFailures(resp); - firstHit = resp.getHits().getAt(0); - assertThat(firstHit.getScore(), greaterThan(1f)); + assertNoFailuresAndResponse( + prepareSearch("test").setQuery( + functionScoreQuery( + matchQuery("body", "foo"), + new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { + new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction("index").factor(2)), + new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(script)) } + ) + ), + response -> { + SearchHit firstHit = response.getHits().getAt(0); + assertThat(firstHit.getScore(), greaterThan(1f)); + } + ); // Test for accessing _score.doubleValue() script = new Script(ScriptType.INLINE, NAME, "log(doc['index'].value + (factor * _score.doubleValue()))", params); - resp = prepareSearch("test").setQuery( - functionScoreQuery( - matchQuery("body", "foo"), - new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { - new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction("index").factor(2)), - new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(script)) } - ) - ).get(); - assertNoFailures(resp); - firstHit = resp.getHits().getAt(0); - assertThat(firstHit.getScore(), greaterThan(1f)); + assertNoFailuresAndResponse( + prepareSearch("test").setQuery( + functionScoreQuery( + matchQuery("body", "foo"), + new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { + new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction("index").factor(2)), + new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(script)) } + ) + ), + response -> { + SearchHit firstHit = response.getHits().getAt(0); + assertThat(firstHit.getScore(), greaterThan(1f)); + } + ); } public void testSeedReportedInExplain() throws Exception { @@ -243,28 +263,33 @@ public void testSeedReportedInExplain() throws Exception { int seed = 12345678; - SearchResponse resp = prepareSearch("test").setQuery( - functionScoreQuery(matchAllQuery(), randomFunction().seed(seed).setField(SeqNoFieldMapper.NAME)) - ).setExplain(true).get(); - assertNoFailures(resp); - assertEquals(1, resp.getHits().getTotalHits().value); - SearchHit firstHit = resp.getHits().getAt(0); - assertThat(firstHit.getExplanation().toString(), containsString("" + seed)); + assertNoFailuresAndResponse( + prepareSearch("test").setQuery(functionScoreQuery(matchAllQuery(), randomFunction().seed(seed).setField(SeqNoFieldMapper.NAME))) + .setExplain(true), + response -> { + assertNoFailures(response); + assertEquals(1, response.getHits().getTotalHits().value); + SearchHit firstHit = response.getHits().getAt(0); + assertThat(firstHit.getExplanation().toString(), containsString("" + seed)); + } + ); } public void testNoDocs() throws Exception { createIndex("test"); ensureGreen(); - SearchResponse resp = prepareSearch("test").setQuery( - functionScoreQuery(matchAllQuery(), randomFunction().seed(1234).setField(SeqNoFieldMapper.NAME)) - ).get(); - assertNoFailures(resp); - assertEquals(0, resp.getHits().getTotalHits().value); + assertNoFailuresAndResponse( + prepareSearch("test").setQuery( + functionScoreQuery(matchAllQuery(), randomFunction().seed(1234).setField(SeqNoFieldMapper.NAME)) + ), + response -> assertEquals(0, response.getHits().getTotalHits().value) + ); - resp = prepareSearch("test").setQuery(functionScoreQuery(matchAllQuery(), randomFunction())).get(); - assertNoFailures(resp); - assertEquals(0, resp.getHits().getTotalHits().value); + assertNoFailuresAndResponse( + prepareSearch("test").setQuery(functionScoreQuery(matchAllQuery(), randomFunction())), + response -> assertEquals(0, response.getHits().getTotalHits().value) + ); } public void testScoreRange() throws Exception { @@ -280,14 +305,14 @@ public void testScoreRange() throws Exception { refresh(); int iters = scaledRandomIntBetween(10, 20); for (int i = 0; i < iters; ++i) { - SearchResponse searchResponse = prepareSearch().setQuery(functionScoreQuery(matchAllQuery(), randomFunction())) - .setSize(docCount) - .get(); - - assertNoFailures(searchResponse); - for (SearchHit hit : searchResponse.getHits().getHits()) { - assertThat(hit.getScore(), allOf(greaterThanOrEqualTo(0.0f), lessThanOrEqualTo(1.0f))); - } + assertNoFailuresAndResponse( + prepareSearch().setQuery(functionScoreQuery(matchAllQuery(), randomFunction())).setSize(docCount), + response -> { + for (SearchHit hit : response.getHits().getHits()) { + assertThat(hit.getScore(), allOf(greaterThanOrEqualTo(0.0f), lessThanOrEqualTo(1.0f))); + } + } + ); } } @@ -338,10 +363,10 @@ public void checkDistribution() throws Exception { for (int i = 0; i < count; i++) { - SearchResponse searchResponse = prepareSearch().setQuery(functionScoreQuery(matchAllQuery(), new RandomScoreFunctionBuilder())) - .get(); - - matrix[Integer.valueOf(searchResponse.getHits().getAt(0).getId())]++; + assertResponse( + prepareSearch().setQuery(functionScoreQuery(matchAllQuery(), new RandomScoreFunctionBuilder())), + response -> matrix[Integer.valueOf(response.getHits().getAt(0).getId())]++ + ); } int filled = 0; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java index 415de06030938..93891a12dd861 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java @@ -14,7 +14,6 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.MoreLikeThisQueryBuilder; @@ -41,8 +40,10 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCountAndNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; @@ -225,26 +226,36 @@ public void testMoreLikeThisWithAliases() throws Exception { ); logger.info("Running moreLikeThis on beta shard"); - SearchResponse response = prepareSearch("beta").setQuery( - new MoreLikeThisQueryBuilder(null, new Item[] { new Item("test", "1") }).minTermFreq(1).minDocFreq(1) - ).get(); - assertHitCount(response, 1L); - assertThat(response.getHits().getAt(0).getId(), equalTo("3")); - + assertResponse( + prepareSearch("beta").setQuery( + new MoreLikeThisQueryBuilder(null, new Item[] { new Item("test", "1") }).minTermFreq(1).minDocFreq(1) + ), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), equalTo("3")); + } + ); logger.info("Running moreLikeThis on release shard"); - response = prepareSearch("release").setQuery( - new MoreLikeThisQueryBuilder(null, new Item[] { new Item("test", "1") }).minTermFreq(1).minDocFreq(1) - ).get(); - assertHitCount(response, 1L); - assertThat(response.getHits().getAt(0).getId(), equalTo("2")); + assertResponse( + prepareSearch("release").setQuery( + new MoreLikeThisQueryBuilder(null, new Item[] { new Item("test", "1") }).minTermFreq(1).minDocFreq(1) + ), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), equalTo("2")); + } + ); logger.info("Running moreLikeThis on alias with node client"); - response = internalCluster().coordOnlyNodeClient() - .prepareSearch("beta") - .setQuery(new MoreLikeThisQueryBuilder(null, new Item[] { new Item("test", "1") }).minTermFreq(1).minDocFreq(1)) - .get(); - assertHitCount(response, 1L); - assertThat(response.getHits().getAt(0).getId(), equalTo("3")); + assertResponse( + internalCluster().coordOnlyNodeClient() + .prepareSearch("beta") + .setQuery(new MoreLikeThisQueryBuilder(null, new Item[] { new Item("test", "1") }).minTermFreq(1).minDocFreq(1)), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), equalTo("3")); + } + ); } // Issue #14944 @@ -267,11 +278,15 @@ public void testMoreLikeThisWithAliasesInLikeDocuments() throws Exception { ).actionGet(); refresh(indexName); - SearchResponse response = prepareSearch().setQuery( - new MoreLikeThisQueryBuilder(null, new Item[] { new Item(aliasName, "1") }).minTermFreq(1).minDocFreq(1) - ).get(); - assertHitCount(response, 2L); - assertThat(response.getHits().getAt(0).getId(), equalTo("3")); + assertResponse( + prepareSearch().setQuery( + new MoreLikeThisQueryBuilder(null, new Item[] { new Item(aliasName, "1") }).minTermFreq(1).minDocFreq(1) + ), + response -> { + assertHitCount(response, 2L); + assertThat(response.getHits().getAt(0).getId(), equalTo("3")); + } + ); } public void testMoreLikeThisIssue2197() throws Exception { @@ -620,13 +635,14 @@ public void testMinimumShouldMatch() throws ExecutionException, InterruptedExcep .minDocFreq(1) .minimumShouldMatch(minimumShouldMatch); logger.info("Testing with minimum_should_match = {}", minimumShouldMatch); - SearchResponse response = prepareSearch("test").setQuery(mltQuery).get(); - assertNoFailures(response); - if (minimumShouldMatch.equals("0%")) { - assertHitCount(response, 10); - } else { - assertHitCount(response, 11 - i); - } + final int finalI = i; + assertNoFailuresAndResponse(prepareSearch("test").setQuery(mltQuery), response -> { + if (minimumShouldMatch.equals("0%")) { + assertHitCount(response, 10); + } else { + assertHitCount(response, 11 - finalI); + } + }); } } @@ -773,8 +789,7 @@ public void testWithRouting() throws IOException { ); moreLikeThisQueryBuilder.minTermFreq(1); moreLikeThisQueryBuilder.minDocFreq(1); - SearchResponse searchResponse = prepareSearch("index").setQuery(moreLikeThisQueryBuilder).get(); - assertEquals(2, searchResponse.getHits().getTotalHits().value); + assertHitCount(prepareSearch("index").setQuery(moreLikeThisQueryBuilder), 2L); } // Issue #29678 diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/nested/SimpleNestedIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/nested/SimpleNestedIT.java index 736796d73f164..f5f672c1fed9a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/nested/SimpleNestedIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/nested/SimpleNestedIT.java @@ -16,7 +16,6 @@ import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.settings.Settings; @@ -37,7 +36,10 @@ import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCountAndNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -51,10 +53,8 @@ public void testSimpleNested() throws Exception { ensureGreen(); // check on no data, see it works - SearchResponse searchResponse = prepareSearch("test").get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); - searchResponse = prepareSearch("test").setQuery(termQuery("n_field1", "n_value1_1")).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); + assertHitCount(prepareSearch("test"), 0L); + assertHitCount(prepareSearch("test").setQuery(termQuery("n_field1", "n_value1_1")), 0L); client().prepareIndex("test") .setId("1") @@ -83,26 +83,22 @@ public void testSimpleNested() throws Exception { // check the numDocs assertDocumentCount("test", 3); - searchResponse = prepareSearch("test").setQuery(termQuery("n_field1", "n_value1_1")).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); + assertHitCount(prepareSearch("test").setQuery(termQuery("n_field1", "n_value1_1")), 0L); // search for something that matches the nested doc, and see that we don't find the nested doc - searchResponse = prepareSearch("test").setQuery(matchAllQuery()).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - searchResponse = prepareSearch("test").setQuery(termQuery("n_field1", "n_value1_1")).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); + assertHitCount(prepareSearch("test"), 1L); + assertHitCount(prepareSearch("test").setQuery(termQuery("n_field1", "n_value1_1")), 0L); // now, do a nested query - searchResponse = prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"), ScoreMode.Avg)) - .get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - - searchResponse = prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"), ScoreMode.Avg)) - .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); + assertHitCountAndNoFailures( + prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"), ScoreMode.Avg)), + 1L + ); + assertHitCountAndNoFailures( + prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"), ScoreMode.Avg)) + .setSearchType(SearchType.DFS_QUERY_THEN_FETCH), + 1L + ); // add another doc, one that would match if it was not nested... @@ -128,40 +124,44 @@ public void testSimpleNested() throws Exception { refresh(); assertDocumentCount("test", 6); - searchResponse = prepareSearch("test").setQuery( - nestedQuery( - "nested1", - boolQuery().must(termQuery("nested1.n_field1", "n_value1_1")).must(termQuery("nested1.n_field2", "n_value2_1")), - ScoreMode.Avg - ) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); + assertHitCountAndNoFailures( + prepareSearch("test").setQuery( + nestedQuery( + "nested1", + boolQuery().must(termQuery("nested1.n_field1", "n_value1_1")).must(termQuery("nested1.n_field2", "n_value2_1")), + ScoreMode.Avg + ) + ), + 1L + ); + ; // filter - searchResponse = prepareSearch("test").setQuery( - boolQuery().must(matchAllQuery()) - .mustNot( - nestedQuery( - "nested1", - boolQuery().must(termQuery("nested1.n_field1", "n_value1_1")).must(termQuery("nested1.n_field2", "n_value2_1")), - ScoreMode.Avg + assertHitCountAndNoFailures( + prepareSearch("test").setQuery( + boolQuery().must(matchAllQuery()) + .mustNot( + nestedQuery( + "nested1", + boolQuery().must(termQuery("nested1.n_field1", "n_value1_1")).must(termQuery("nested1.n_field2", "n_value2_1")), + ScoreMode.Avg + ) ) - ) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); + ), + 1L + ); // check with type prefix - searchResponse = prepareSearch("test").setQuery( - nestedQuery( - "nested1", - boolQuery().must(termQuery("nested1.n_field1", "n_value1_1")).must(termQuery("nested1.n_field2", "n_value2_1")), - ScoreMode.Avg - ) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); + assertHitCountAndNoFailures( + prepareSearch("test").setQuery( + nestedQuery( + "nested1", + boolQuery().must(termQuery("nested1.n_field1", "n_value1_1")).must(termQuery("nested1.n_field2", "n_value2_1")), + ScoreMode.Avg + ) + ), + 1L + ); // check delete, so all is gone... DeleteResponse deleteResponse = client().prepareDelete("test", "2").get(); @@ -170,10 +170,10 @@ public void testSimpleNested() throws Exception { refresh(); assertDocumentCount("test", 3); - searchResponse = prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"), ScoreMode.Avg)) - .get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); + assertHitCountAndNoFailures( + prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"), ScoreMode.Avg)), + 1L + ); } public void testMultiNested() throws Exception { @@ -238,83 +238,87 @@ public void testMultiNested() throws Exception { assertDocumentCount("test", 7); // do some multi nested queries - SearchResponse searchResponse = prepareSearch("test").setQuery( - nestedQuery("nested1", termQuery("nested1.field1", "1"), ScoreMode.Avg) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - - searchResponse = prepareSearch("test").setQuery( - nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "2"), ScoreMode.Avg) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - - searchResponse = prepareSearch("test").setQuery( - nestedQuery( - "nested1", - boolQuery().must(termQuery("nested1.field1", "1")) - .must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "2"), ScoreMode.Avg)), - ScoreMode.Avg - ) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - - searchResponse = prepareSearch("test").setQuery( - nestedQuery( - "nested1", - boolQuery().must(termQuery("nested1.field1", "1")) - .must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "3"), ScoreMode.Avg)), - ScoreMode.Avg - ) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - - searchResponse = prepareSearch("test").setQuery( - nestedQuery( - "nested1", - boolQuery().must(termQuery("nested1.field1", "1")) - .must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "4"), ScoreMode.Avg)), - ScoreMode.Avg - ) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); - - searchResponse = prepareSearch("test").setQuery( - nestedQuery( - "nested1", - boolQuery().must(termQuery("nested1.field1", "1")) - .must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "5"), ScoreMode.Avg)), - ScoreMode.Avg - ) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); - - searchResponse = prepareSearch("test").setQuery( - nestedQuery( - "nested1", - boolQuery().must(termQuery("nested1.field1", "4")) - .must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "5"), ScoreMode.Avg)), - ScoreMode.Avg - ) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - - searchResponse = prepareSearch("test").setQuery( - nestedQuery( - "nested1", - boolQuery().must(termQuery("nested1.field1", "4")) - .must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "2"), ScoreMode.Avg)), - ScoreMode.Avg - ) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); + assertHitCountAndNoFailures( + prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.field1", "1"), ScoreMode.Avg)), + 1L + ); + + assertHitCountAndNoFailures( + prepareSearch("test").setQuery(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "2"), ScoreMode.Avg)), + 1L + ); + + assertHitCountAndNoFailures( + prepareSearch("test").setQuery( + nestedQuery( + "nested1", + boolQuery().must(termQuery("nested1.field1", "1")) + .must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "2"), ScoreMode.Avg)), + ScoreMode.Avg + ) + ), + 1L + ); + + assertHitCountAndNoFailures( + prepareSearch("test").setQuery( + nestedQuery( + "nested1", + boolQuery().must(termQuery("nested1.field1", "1")) + .must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "3"), ScoreMode.Avg)), + ScoreMode.Avg + ) + ), + 1L + ); + + assertHitCountAndNoFailures( + prepareSearch("test").setQuery( + nestedQuery( + "nested1", + boolQuery().must(termQuery("nested1.field1", "1")) + .must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "4"), ScoreMode.Avg)), + ScoreMode.Avg + ) + ), + 0L + ); + + assertHitCountAndNoFailures( + prepareSearch("test").setQuery( + nestedQuery( + "nested1", + boolQuery().must(termQuery("nested1.field1", "1")) + .must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "5"), ScoreMode.Avg)), + ScoreMode.Avg + ) + ), + 0L + ); + + assertHitCountAndNoFailures( + prepareSearch("test").setQuery( + nestedQuery( + "nested1", + boolQuery().must(termQuery("nested1.field1", "4")) + .must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "5"), ScoreMode.Avg)), + ScoreMode.Avg + ) + ), + 1L + ); + + assertHitCountAndNoFailures( + prepareSearch("test").setQuery( + nestedQuery( + "nested1", + boolQuery().must(termQuery("nested1.field1", "4")) + .must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "2"), ScoreMode.Avg)), + ScoreMode.Avg + ) + ), + 0L + ); } // When IncludeNestedDocsQuery is wrapped in a FilteredQuery then a in-finite loop occurs b/c of a bug in @@ -421,14 +425,17 @@ public void testExplain() throws Exception { .setRefreshPolicy(IMMEDIATE) .get(); - SearchResponse searchResponse = prepareSearch("test").setQuery( - nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1"), ScoreMode.Total) - ).setExplain(true).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - Explanation explanation = searchResponse.getHits().getHits()[0].getExplanation(); - assertThat(explanation.getValue(), equalTo(searchResponse.getHits().getHits()[0].getScore())); - assertThat(explanation.toString(), startsWith("0.36464313 = Score based on 2 child docs in range from 0 to 1")); + assertNoFailuresAndResponse( + prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1"), ScoreMode.Total)) + .setExplain(true), + response -> { + assertNoFailures(response); + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + Explanation explanation = response.getHits().getHits()[0].getExplanation(); + assertThat(explanation.getValue(), equalTo(response.getHits().getHits()[0].getScore())); + assertThat(explanation.toString(), startsWith("0.36464313 = Score based on 2 child docs in range from 0 to 1")); + } + ); } public void testSimpleNestedSorting() throws Exception { @@ -504,33 +511,32 @@ public void testSimpleNestedSorting() throws Exception { .get(); refresh(); - SearchResponse searchResponse = prepareSearch("test") - - .setQuery(QueryBuilders.matchAllQuery()) - .addSort(SortBuilders.fieldSort("nested1.field1").order(SortOrder.ASC).setNestedSort(new NestedSortBuilder("nested1"))) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("4")); - - searchResponse = prepareSearch("test") - - .setQuery(QueryBuilders.matchAllQuery()) - .addSort(SortBuilders.fieldSort("nested1.field1").order(SortOrder.DESC).setNestedSort(new NestedSortBuilder("nested1"))) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("5")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("4")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("2")); + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()) + .addSort(SortBuilders.fieldSort("nested1.field1").order(SortOrder.ASC).setNestedSort(new NestedSortBuilder("nested1"))), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits()[0].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("3")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("4")); + } + ); + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()) + .addSort(SortBuilders.fieldSort("nested1.field1").order(SortOrder.DESC).setNestedSort(new NestedSortBuilder("nested1"))), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("5")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("4")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("2")); + } + ); } public void testSimpleNestedSortingWithNestedFilterMissing() throws Exception { @@ -628,16 +634,15 @@ public void testSimpleNestedSortingWithNestedFilterMissing() throws Exception { searchRequestBuilder.setScroll("10m"); } - SearchResponse searchResponse = searchRequestBuilder.get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("4")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("10")); - + assertResponse(searchRequestBuilder, response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits()[0].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("4")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("10")); + }); searchRequestBuilder = prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()) .addSort( SortBuilders.fieldSort("nested1.field1") @@ -650,16 +655,16 @@ public void testSimpleNestedSortingWithNestedFilterMissing() throws Exception { searchRequestBuilder.setScroll("10m"); } - searchResponse = searchRequestBuilder.get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("10")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("5")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("2")); - client().prepareClearScroll().addScrollId("_all").get(); + assertResponse(searchRequestBuilder, response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits()[0].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("10")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("5")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("2")); + client().prepareClearScroll().addScrollId("_all").get(); + }); } public void testNestedSortWithMultiLevelFiltering() throws Exception { @@ -788,101 +793,106 @@ public void testNestedSortWithMultiLevelFiltering() throws Exception { refresh(); // access id = 1, read, max value, asc, should use matt and shay - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("acl.operation.user.username") - .setNestedSort( - new NestedSortBuilder("acl").setFilter(QueryBuilders.termQuery("acl.access_id", "1")) - .setNestedSort( - new NestedSortBuilder("acl.operation").setFilter(QueryBuilders.termQuery("acl.operation.name", "read")) - .setNestedSort(new NestedSortBuilder("acl.operation.user")) - ) - ) - .sortMode(SortMode.MAX) - .order(SortOrder.ASC) - ) - .get(); - - assertHitCount(searchResponse, 2); - assertThat(searchResponse.getHits().getHits().length, equalTo(2)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("matt")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("shay")); - + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("acl.operation.user.username") + .setNestedSort( + new NestedSortBuilder("acl").setFilter(QueryBuilders.termQuery("acl.access_id", "1")) + .setNestedSort( + new NestedSortBuilder("acl.operation").setFilter(QueryBuilders.termQuery("acl.operation.name", "read")) + .setNestedSort(new NestedSortBuilder("acl.operation.user")) + ) + ) + .sortMode(SortMode.MAX) + .order(SortOrder.ASC) + ), + response -> { + assertHitCount(response, 2); + assertThat(response.getHits().getHits().length, equalTo(2)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("matt")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("shay")); + } + ); // access id = 1, read, min value, asc, should now use adrien and luca - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("acl.operation.user.username") - .setNestedSort( - new NestedSortBuilder("acl").setFilter(QueryBuilders.termQuery("acl.access_id", "1")) - .setNestedSort( - new NestedSortBuilder("acl.operation").setFilter(QueryBuilders.termQuery("acl.operation.name", "read")) - .setNestedSort(new NestedSortBuilder("acl.operation.user")) - ) - ) - .sortMode(SortMode.MIN) - .order(SortOrder.ASC) - ) - .get(); - - assertHitCount(searchResponse, 2); - assertThat(searchResponse.getHits().getHits().length, equalTo(2)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("adrien")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("luca")); - - // execute, by matt or luca, by user id, sort missing first - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("acl.operation.user.id") - .setNestedSort( - new NestedSortBuilder("acl").setNestedSort( - new NestedSortBuilder("acl.operation").setFilter(QueryBuilders.termQuery("acl.operation.name", "execute")) + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("acl.operation.user.username") + .setNestedSort( + new NestedSortBuilder("acl").setFilter(QueryBuilders.termQuery("acl.access_id", "1")) .setNestedSort( - new NestedSortBuilder("acl.operation.user").setFilter( - QueryBuilders.termsQuery("acl.operation.user.username", "matt", "luca") - ) + new NestedSortBuilder("acl.operation").setFilter(QueryBuilders.termQuery("acl.operation.name", "read")) + .setNestedSort(new NestedSortBuilder("acl.operation.user")) ) ) - ) - .missing("_first") - .sortMode(SortMode.MIN) - .order(SortOrder.DESC) - ) - .get(); - - assertHitCount(searchResponse, 2); - assertThat(searchResponse.getHits().getHits().length, equalTo(2)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1")); // missing first - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("1")); - + .sortMode(SortMode.MIN) + .order(SortOrder.ASC) + ), + response -> { + assertHitCount(response, 2); + assertThat(response.getHits().getHits().length, equalTo(2)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("adrien")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("luca")); + } + ); + // execute, by matt or luca, by user id, sort missing first + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("acl.operation.user.id") + .setNestedSort( + new NestedSortBuilder("acl").setNestedSort( + new NestedSortBuilder("acl.operation").setFilter(QueryBuilders.termQuery("acl.operation.name", "execute")) + .setNestedSort( + new NestedSortBuilder("acl.operation.user").setFilter( + QueryBuilders.termsQuery("acl.operation.user.username", "matt", "luca") + ) + ) + ) + ) + .missing("_first") + .sortMode(SortMode.MIN) + .order(SortOrder.DESC) + ), + response -> { + assertHitCount(response, 2); + assertThat(response.getHits().getHits().length, equalTo(2)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); // missing first + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("1")); + } + ); // execute, by matt or luca, by username, sort missing last (default) - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("acl.operation.user.username") - .setNestedSort( - new NestedSortBuilder("acl").setNestedSort( - new NestedSortBuilder("acl.operation").setFilter(QueryBuilders.termQuery("acl.operation.name", "execute")) - .setNestedSort( - new NestedSortBuilder("acl.operation.user").setFilter( - QueryBuilders.termsQuery("acl.operation.user.username", "matt", "luca") + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("acl.operation.user.username") + .setNestedSort( + new NestedSortBuilder("acl").setNestedSort( + new NestedSortBuilder("acl.operation").setFilter(QueryBuilders.termQuery("acl.operation.name", "execute")) + .setNestedSort( + new NestedSortBuilder("acl.operation.user").setFilter( + QueryBuilders.termsQuery("acl.operation.user.username", "matt", "luca") + ) ) - ) + ) ) - ) - .sortMode(SortMode.MIN) - .order(SortOrder.DESC) - ) - .get(); - - assertHitCount(searchResponse, 2); - assertThat(searchResponse.getHits().getHits().length, equalTo(2)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("luca")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("1")); // missing last + .sortMode(SortMode.MIN) + .order(SortOrder.DESC) + ), + response -> { + assertHitCount(response, 2); + assertThat(response.getHits().getHits().length, equalTo(2)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("luca")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("1")); // missing last + } + ); } // https://github.com/elastic/elasticsearch/issues/31554 @@ -944,22 +954,25 @@ public void testLeakingSortValues() throws Exception { refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(termQuery("_id", 2)) - .addSort( - SortBuilders.fieldSort("nested1.nested2.sortVal") - .setNestedSort( - new NestedSortBuilder("nested1").setNestedSort( - new NestedSortBuilder("nested1.nested2").setFilter(termQuery("nested1.nested2.nested2_keyword", "nested2_bar")) + assertResponse( + prepareSearch().setQuery(termQuery("_id", 2)) + .addSort( + SortBuilders.fieldSort("nested1.nested2.sortVal") + .setNestedSort( + new NestedSortBuilder("nested1").setNestedSort( + new NestedSortBuilder("nested1.nested2").setFilter( + termQuery("nested1.nested2.nested2_keyword", "nested2_bar") + ) + ) ) - ) - ) - .get(); - - assertHitCount(searchResponse, 1); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("2")); - + ), + response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getHits().length, equalTo(1)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("2")); + } + ); } public void testSortNestedWithNestedFilter() throws Exception { @@ -1126,215 +1139,236 @@ public void testSortNestedWithNestedFilter() throws Exception { refresh(); // Without nested filter - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("parent.child.child_values") - .setNestedSort(new NestedSortBuilder("parent.child")) - .order(SortOrder.ASC) - ) - .get(); - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("-3")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("-2")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("-1")); - + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("parent.child.child_values") + .setNestedSort(new NestedSortBuilder("parent.child")) + .order(SortOrder.ASC) + ), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("-3")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("-2")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("-1")); + } + ); // With nested filter NestedSortBuilder nestedSort = new NestedSortBuilder("parent.child"); nestedSort.setFilter(QueryBuilders.termQuery("parent.child.filter", true)); - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("parent.child.child_values").setNestedSort(nestedSort).order(SortOrder.ASC)) - .get(); - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); - + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort(SortBuilders.fieldSort("parent.child.child_values").setNestedSort(nestedSort).order(SortOrder.ASC)), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); + } + ); // Nested path should be automatically detected, expect same results as above search request - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("parent.child.child_values").setNestedSort(nestedSort).order(SortOrder.ASC)) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); - + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort(SortBuilders.fieldSort("parent.child.child_values").setNestedSort(nestedSort).order(SortOrder.ASC)), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); + } + ); nestedSort.setFilter(QueryBuilders.termQuery("parent.filter", false)); - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("parent.parent_values").setNestedSort(nestedSort).order(SortOrder.ASC)) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("parent.child.child_values") - .setNestedSort( - new NestedSortBuilder("parent").setFilter(QueryBuilders.termQuery("parent.filter", false)) - .setNestedSort(new NestedSortBuilder("parent.child")) - ) - .sortMode(SortMode.MAX) - .order(SortOrder.ASC) - ) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("4")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("6")); - + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort(SortBuilders.fieldSort("parent.parent_values").setNestedSort(nestedSort).order(SortOrder.ASC)), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); + } + ); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("parent.child.child_values") + .setNestedSort( + new NestedSortBuilder("parent").setFilter(QueryBuilders.termQuery("parent.filter", false)) + .setNestedSort(new NestedSortBuilder("parent.child")) + ) + .sortMode(SortMode.MAX) + .order(SortOrder.ASC) + ), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("3")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("4")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("6")); + } + ); // Check if closest nested type is resolved - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("parent.child.child_obj.value") - .setNestedSort(new NestedSortBuilder("parent.child").setFilter(QueryBuilders.termQuery("parent.child.filter", true))) - .order(SortOrder.ASC) - ) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); - + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("parent.child.child_obj.value") + .setNestedSort( + new NestedSortBuilder("parent.child").setFilter(QueryBuilders.termQuery("parent.child.filter", true)) + ) + .order(SortOrder.ASC) + ), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); + } + ); // Sort mode: sum - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("parent.child.child_values") - .setNestedSort(new NestedSortBuilder("parent.child")) - .sortMode(SortMode.SUM) - .order(SortOrder.ASC) - ) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("7")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("11")); - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("parent.child.child_values") - .setNestedSort(new NestedSortBuilder("parent.child")) - .sortMode(SortMode.SUM) - .order(SortOrder.DESC) - ) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("11")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("7")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("2")); - + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("parent.child.child_values") + .setNestedSort(new NestedSortBuilder("parent.child")) + .sortMode(SortMode.SUM) + .order(SortOrder.ASC) + ), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("7")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("11")); + } + ); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("parent.child.child_values") + .setNestedSort(new NestedSortBuilder("parent.child")) + .sortMode(SortMode.SUM) + .order(SortOrder.DESC) + ), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("11")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("7")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("2")); + } + ); // Sort mode: sum with filter - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("parent.child.child_values") - .setNestedSort(new NestedSortBuilder("parent.child").setFilter(QueryBuilders.termQuery("parent.child.filter", true))) - .sortMode(SortMode.SUM) - .order(SortOrder.ASC) - ) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); - + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("parent.child.child_values") + .setNestedSort( + new NestedSortBuilder("parent.child").setFilter(QueryBuilders.termQuery("parent.child.filter", true)) + ) + .sortMode(SortMode.SUM) + .order(SortOrder.ASC) + ), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); + } + ); // Sort mode: avg - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("parent.child.child_values") - .setNestedSort(new NestedSortBuilder("parent.child")) - .sortMode(SortMode.AVG) - .order(SortOrder.ASC) - ) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("parent.child.child_values") - .setNestedSort(new NestedSortBuilder("parent.child")) - .sortMode(SortMode.AVG) - .order(SortOrder.DESC) - ) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("1")); - + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("parent.child.child_values") + .setNestedSort(new NestedSortBuilder("parent.child")) + .sortMode(SortMode.AVG) + .order(SortOrder.ASC) + ), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); + } + ); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("parent.child.child_values") + .setNestedSort(new NestedSortBuilder("parent.child")) + .sortMode(SortMode.AVG) + .order(SortOrder.DESC) + ), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("3")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("1")); + } + ); // Sort mode: avg with filter - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("parent.child.child_values") - .setNestedSort(new NestedSortBuilder("parent.child").setFilter(QueryBuilders.termQuery("parent.child.filter", true))) - .sortMode(SortMode.AVG) - .order(SortOrder.ASC) - ) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("parent.child.child_values") + .setNestedSort( + new NestedSortBuilder("parent.child").setFilter(QueryBuilders.termQuery("parent.child.filter", true)) + ) + .sortMode(SortMode.AVG) + .order(SortOrder.ASC) + ), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); + } + ); } // Issue #9305 @@ -1482,27 +1516,30 @@ public void testNestedSortingWithNestedFilterAsFilter() throws Exception { assertTrue(indexResponse2.getShardInfo().getSuccessful() > 0); refresh(); - SearchResponse searchResponse = prepareSearch("test").addSort( - SortBuilders.fieldSort("users.first").setNestedSort(new NestedSortBuilder("users")).order(SortOrder.ASC) - ) - .addSort( - SortBuilders.fieldSort("users.first") - .order(SortOrder.ASC) - .setNestedSort( - new NestedSortBuilder("users").setFilter( - nestedQuery("users.workstations", termQuery("users.workstations.stationid", "s5"), ScoreMode.Avg) - ) - ) + assertNoFailuresAndResponse( + prepareSearch("test").addSort( + SortBuilders.fieldSort("users.first").setNestedSort(new NestedSortBuilder("users")).order(SortOrder.ASC) ) - .get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 2); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("2")); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[0].toString(), equalTo("fname1")); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[1].toString(), equalTo("fname1")); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getSortValues()[0].toString(), equalTo("fname1")); - assertThat(searchResponse.getHits().getAt(1).getSortValues()[1].toString(), equalTo("fname3")); + .addSort( + SortBuilders.fieldSort("users.first") + .order(SortOrder.ASC) + .setNestedSort( + new NestedSortBuilder("users").setFilter( + nestedQuery("users.workstations", termQuery("users.workstations.stationid", "s5"), ScoreMode.Avg) + ) + ) + ), + response -> { + assertNoFailures(response); + assertHitCount(response, 2); + assertThat(response.getHits().getAt(0).getId(), equalTo("2")); + assertThat(response.getHits().getAt(0).getSortValues()[0].toString(), equalTo("fname1")); + assertThat(response.getHits().getAt(0).getSortValues()[1].toString(), equalTo("fname1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getSortValues()[0].toString(), equalTo("fname1")); + assertThat(response.getHits().getAt(1).getSortValues()[1].toString(), equalTo("fname3")); + } + ); } public void testCheckFixedBitSetCache() throws Exception { @@ -1546,11 +1583,10 @@ public void testCheckFixedBitSetCache() throws Exception { assertThat(clusterStatsResponse.getIndicesStats().getSegments().getBitsetMemoryInBytes(), equalTo(0L)); // only when querying with nested the fixed bitsets are loaded - SearchResponse searchResponse = prepareSearch("test").setQuery( - nestedQuery("array1", termQuery("array1.field1", "value1"), ScoreMode.Avg) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(5L)); + assertHitCountAndNoFailures( + prepareSearch("test").setQuery(nestedQuery("array1", termQuery("array1.field1", "value1"), ScoreMode.Avg)), + 5L + ); } clusterStatsResponse = clusterAdmin().prepareClusterStats().get(); assertThat(clusterStatsResponse.getIndicesStats().getSegments().getBitsetMemoryInBytes(), greaterThan(0L)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/nested/VectorNestedIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/nested/VectorNestedIT.java index 9219641f1d3bf..4832964427540 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/nested/VectorNestedIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/nested/VectorNestedIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.search.nested; import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.search.vectors.KnnSearchBuilder; @@ -18,6 +17,7 @@ import java.util.List; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -66,10 +66,11 @@ public void testSimpleNested() throws Exception { assertThat(getResponse.getSourceAsBytes(), notNullValue()); refresh(); - SearchResponse searchResponse = prepareSearch("test").setKnnSearch( - List.of(new KnnSearchBuilder("nested.vector", new float[] { 1, 1, 1 }, 1, 1, null)) - ).setAllowPartialSearchResults(false).get(); - assertThat(searchResponse.getHits().getHits().length, greaterThan(0)); + assertResponse( + prepareSearch("test").setKnnSearch(List.of(new KnnSearchBuilder("nested.vector", new float[] { 1, 1, 1 }, 1, 1, null))) + .setAllowPartialSearchResults(false), + response -> assertThat(response.getHits().getHits().length, greaterThan(0)) + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java index 526d523bb0638..560806a68c908 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.search.profile.aggregation; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; @@ -42,7 +41,7 @@ import static org.elasticsearch.test.MapMatcher.assertMap; import static org.elasticsearch.test.MapMatcher.matchesMap; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -122,110 +121,113 @@ protected void setupSuiteScopeCluster() throws Exception { } public void testSimpleProfile() { - SearchResponse response = prepareSearch("idx").setProfile(true) - .addAggregation(histogram("histo").field(NUMBER_FIELD).interval(1L)) - .get(); - assertNoFailures(response); - Map profileResults = response.getProfileResults(); - assertThat(profileResults, notNullValue()); - assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); - for (SearchProfileShardResult profileShardResult : profileResults.values()) { - assertThat(profileShardResult, notNullValue()); - AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); - assertThat(aggProfileResults, notNullValue()); - List aggProfileResultsList = aggProfileResults.getProfileResults(); - assertThat(aggProfileResultsList, notNullValue()); - assertThat(aggProfileResultsList.size(), equalTo(1)); - ProfileResult histoAggResult = aggProfileResultsList.get(0); - assertThat(histoAggResult, notNullValue()); - assertThat(histoAggResult.getQueryName(), equalTo("NumericHistogramAggregator")); - assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); - assertThat(histoAggResult.getProfiledChildren().size(), equalTo(0)); - assertThat(histoAggResult.getTime(), greaterThan(0L)); - Map breakdown = histoAggResult.getTimeBreakdown(); - assertThat(breakdown, notNullValue()); - assertThat(breakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(breakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(breakdown.get(COLLECT), greaterThan(0L)); - assertThat(breakdown.get(BUILD_AGGREGATION).longValue(), greaterThan(0L)); - assertThat(breakdown.get(REDUCE), equalTo(0L)); - assertMap( - histoAggResult.getDebugInfo(), - matchesMap().entry(TOTAL_BUCKETS, greaterThan(0L)).entry(BUILT_BUCKETS, greaterThan(0)) - ); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setProfile(true).addAggregation(histogram("histo").field(NUMBER_FIELD).interval(1L)), + response -> { + Map profileResults = response.getProfileResults(); + assertThat(profileResults, notNullValue()); + assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); + for (SearchProfileShardResult profileShardResult : profileResults.values()) { + assertThat(profileShardResult, notNullValue()); + AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); + assertThat(aggProfileResults, notNullValue()); + List aggProfileResultsList = aggProfileResults.getProfileResults(); + assertThat(aggProfileResultsList, notNullValue()); + assertThat(aggProfileResultsList.size(), equalTo(1)); + ProfileResult histoAggResult = aggProfileResultsList.get(0); + assertThat(histoAggResult, notNullValue()); + assertThat(histoAggResult.getQueryName(), equalTo("NumericHistogramAggregator")); + assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); + assertThat(histoAggResult.getProfiledChildren().size(), equalTo(0)); + assertThat(histoAggResult.getTime(), greaterThan(0L)); + Map breakdown = histoAggResult.getTimeBreakdown(); + assertThat(breakdown, notNullValue()); + assertThat(breakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(breakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(breakdown.get(COLLECT), greaterThan(0L)); + assertThat(breakdown.get(BUILD_AGGREGATION).longValue(), greaterThan(0L)); + assertThat(breakdown.get(REDUCE), equalTo(0L)); + assertMap( + histoAggResult.getDebugInfo(), + matchesMap().entry(TOTAL_BUCKETS, greaterThan(0L)).entry(BUILT_BUCKETS, greaterThan(0)) + ); + } + } + ); } public void testMultiLevelProfile() { - SearchResponse response = prepareSearch("idx").setProfile(true) - .addAggregation( - histogram("histo").field(NUMBER_FIELD) - .interval(1L) - .subAggregation( - terms("terms").field(TAG_FIELD) - .order(BucketOrder.aggregation("avg", false)) - .subAggregation(avg("avg").field(NUMBER_FIELD)) - ) - ) - .get(); - assertNoFailures(response); - Map profileResults = response.getProfileResults(); - assertThat(profileResults, notNullValue()); - assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); - for (SearchProfileShardResult profileShardResult : profileResults.values()) { - assertThat(profileShardResult, notNullValue()); - AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); - assertThat(aggProfileResults, notNullValue()); - List aggProfileResultsList = aggProfileResults.getProfileResults(); - assertThat(aggProfileResultsList, notNullValue()); - assertThat(aggProfileResultsList.size(), equalTo(1)); - ProfileResult histoAggResult = aggProfileResultsList.get(0); - assertThat(histoAggResult, notNullValue()); - assertThat(histoAggResult.getQueryName(), equalTo("NumericHistogramAggregator")); - assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); - assertThat(histoAggResult.getTime(), greaterThan(0L)); - Map histoBreakdown = histoAggResult.getTimeBreakdown(); - assertThat(histoBreakdown, notNullValue()); - assertThat(histoBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(histoBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(histoBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(histoBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(histoBreakdown.get(REDUCE), equalTo(0L)); - assertMap( - histoAggResult.getDebugInfo(), - matchesMap().entry(TOTAL_BUCKETS, greaterThan(0L)).entry(BUILT_BUCKETS, greaterThan(0)) - ); - - ProfileResult termsAggResult = histoAggResult.getProfiledChildren().get(0); - assertThat(termsAggResult, notNullValue()); - assertThat(termsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getSimpleName())); - assertThat(termsAggResult.getLuceneDescription(), equalTo("terms")); - assertThat(termsAggResult.getTime(), greaterThan(0L)); - Map termsBreakdown = termsAggResult.getTimeBreakdown(); - assertThat(termsBreakdown, notNullValue()); - assertThat(termsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(termsBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(termsBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(termsBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(termsBreakdown.get(REDUCE), equalTo(0L)); - assertRemapTermsDebugInfo(termsAggResult); - assertThat(termsAggResult.getProfiledChildren().size(), equalTo(1)); - - ProfileResult avgAggResult = termsAggResult.getProfiledChildren().get(0); - assertThat(avgAggResult, notNullValue()); - assertThat(avgAggResult.getQueryName(), equalTo("AvgAggregator")); - assertThat(avgAggResult.getLuceneDescription(), equalTo("avg")); - assertThat(avgAggResult.getTime(), greaterThan(0L)); - Map avgBreakdown = avgAggResult.getTimeBreakdown(); - assertThat(avgBreakdown, notNullValue()); - assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(avgBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); - assertMap(avgAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); - assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setProfile(true) + .addAggregation( + histogram("histo").field(NUMBER_FIELD) + .interval(1L) + .subAggregation( + terms("terms").field(TAG_FIELD) + .order(BucketOrder.aggregation("avg", false)) + .subAggregation(avg("avg").field(NUMBER_FIELD)) + ) + ), + response -> { + Map profileResults = response.getProfileResults(); + assertThat(profileResults, notNullValue()); + assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); + for (SearchProfileShardResult profileShardResult : profileResults.values()) { + assertThat(profileShardResult, notNullValue()); + AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); + assertThat(aggProfileResults, notNullValue()); + List aggProfileResultsList = aggProfileResults.getProfileResults(); + assertThat(aggProfileResultsList, notNullValue()); + assertThat(aggProfileResultsList.size(), equalTo(1)); + ProfileResult histoAggResult = aggProfileResultsList.get(0); + assertThat(histoAggResult, notNullValue()); + assertThat(histoAggResult.getQueryName(), equalTo("NumericHistogramAggregator")); + assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); + assertThat(histoAggResult.getTime(), greaterThan(0L)); + Map histoBreakdown = histoAggResult.getTimeBreakdown(); + assertThat(histoBreakdown, notNullValue()); + assertThat(histoBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(histoBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(histoBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(histoBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(histoBreakdown.get(REDUCE), equalTo(0L)); + assertMap( + histoAggResult.getDebugInfo(), + matchesMap().entry(TOTAL_BUCKETS, greaterThan(0L)).entry(BUILT_BUCKETS, greaterThan(0)) + ); + + ProfileResult termsAggResult = histoAggResult.getProfiledChildren().get(0); + assertThat(termsAggResult, notNullValue()); + assertThat(termsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getSimpleName())); + assertThat(termsAggResult.getLuceneDescription(), equalTo("terms")); + assertThat(termsAggResult.getTime(), greaterThan(0L)); + Map termsBreakdown = termsAggResult.getTimeBreakdown(); + assertThat(termsBreakdown, notNullValue()); + assertThat(termsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(termsBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(termsBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(termsBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(termsBreakdown.get(REDUCE), equalTo(0L)); + assertRemapTermsDebugInfo(termsAggResult); + assertThat(termsAggResult.getProfiledChildren().size(), equalTo(1)); + + ProfileResult avgAggResult = termsAggResult.getProfiledChildren().get(0); + assertThat(avgAggResult, notNullValue()); + assertThat(avgAggResult.getQueryName(), equalTo("AvgAggregator")); + assertThat(avgAggResult.getLuceneDescription(), equalTo("avg")); + assertThat(avgAggResult.getTime(), greaterThan(0L)); + Map avgBreakdown = avgAggResult.getTimeBreakdown(); + assertThat(avgBreakdown, notNullValue()); + assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(avgBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); + assertMap(avgAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); + assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); + } + } + ); } private void assertRemapTermsDebugInfo(ProfileResult termsAggResult, String... deferredAggregators) { @@ -243,375 +245,386 @@ private void assertRemapTermsDebugInfo(ProfileResult termsAggResult, String... d } public void testMultiLevelProfileBreadthFirst() { - SearchResponse response = prepareSearch("idx").setProfile(true) - .addAggregation( - histogram("histo").field(NUMBER_FIELD) - .interval(1L) - .subAggregation( - terms("terms").collectMode(SubAggCollectionMode.BREADTH_FIRST) - .field(TAG_FIELD) - .subAggregation(avg("avg").field(NUMBER_FIELD)) - ) - ) - .get(); - assertNoFailures(response); - Map profileResults = response.getProfileResults(); - assertThat(profileResults, notNullValue()); - assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); - for (SearchProfileShardResult profileShardResult : profileResults.values()) { - assertThat(profileShardResult, notNullValue()); - AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); - assertThat(aggProfileResults, notNullValue()); - List aggProfileResultsList = aggProfileResults.getProfileResults(); - assertThat(aggProfileResultsList, notNullValue()); - assertThat(aggProfileResultsList.size(), equalTo(1)); - ProfileResult histoAggResult = aggProfileResultsList.get(0); - assertThat(histoAggResult, notNullValue()); - assertThat(histoAggResult.getQueryName(), equalTo("NumericHistogramAggregator")); - assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); - assertThat(histoAggResult.getTime(), greaterThan(0L)); - Map histoBreakdown = histoAggResult.getTimeBreakdown(); - assertThat(histoBreakdown, notNullValue()); - assertThat(histoBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(histoBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(histoBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(histoBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(histoBreakdown.get(REDUCE), equalTo(0L)); - assertMap( - histoAggResult.getDebugInfo(), - matchesMap().entry(TOTAL_BUCKETS, greaterThan(0L)).entry(BUILT_BUCKETS, greaterThan(0)) - ); - assertThat(histoAggResult.getProfiledChildren().size(), equalTo(1)); - - ProfileResult termsAggResult = histoAggResult.getProfiledChildren().get(0); - assertThat(termsAggResult, notNullValue()); - assertThat(termsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getSimpleName())); - assertThat(termsAggResult.getLuceneDescription(), equalTo("terms")); - assertThat(termsAggResult.getTime(), greaterThan(0L)); - Map termsBreakdown = termsAggResult.getTimeBreakdown(); - assertThat(termsBreakdown, notNullValue()); - assertThat(termsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(termsBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(termsBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(termsBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(termsBreakdown.get(REDUCE), equalTo(0L)); - assertRemapTermsDebugInfo(termsAggResult, "avg"); - assertThat(termsAggResult.getProfiledChildren().size(), equalTo(1)); - - ProfileResult avgAggResult = termsAggResult.getProfiledChildren().get(0); - assertThat(avgAggResult, notNullValue()); - assertThat(avgAggResult.getQueryName(), equalTo("AvgAggregator")); - assertThat(avgAggResult.getLuceneDescription(), equalTo("avg")); - assertThat(avgAggResult.getTime(), greaterThan(0L)); - Map avgBreakdown = avgAggResult.getTimeBreakdown(); - assertThat(avgBreakdown, notNullValue()); - assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(avgBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); - assertMap(avgAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); - assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setProfile(true) + .addAggregation( + histogram("histo").field(NUMBER_FIELD) + .interval(1L) + .subAggregation( + terms("terms").collectMode(SubAggCollectionMode.BREADTH_FIRST) + .field(TAG_FIELD) + .subAggregation(avg("avg").field(NUMBER_FIELD)) + ) + ), + response -> { + Map profileResults = response.getProfileResults(); + assertThat(profileResults, notNullValue()); + assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); + for (SearchProfileShardResult profileShardResult : profileResults.values()) { + assertThat(profileShardResult, notNullValue()); + AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); + assertThat(aggProfileResults, notNullValue()); + List aggProfileResultsList = aggProfileResults.getProfileResults(); + assertThat(aggProfileResultsList, notNullValue()); + assertThat(aggProfileResultsList.size(), equalTo(1)); + ProfileResult histoAggResult = aggProfileResultsList.get(0); + assertThat(histoAggResult, notNullValue()); + assertThat(histoAggResult.getQueryName(), equalTo("NumericHistogramAggregator")); + assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); + assertThat(histoAggResult.getTime(), greaterThan(0L)); + Map histoBreakdown = histoAggResult.getTimeBreakdown(); + assertThat(histoBreakdown, notNullValue()); + assertThat(histoBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(histoBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(histoBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(histoBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(histoBreakdown.get(REDUCE), equalTo(0L)); + assertMap( + histoAggResult.getDebugInfo(), + matchesMap().entry(TOTAL_BUCKETS, greaterThan(0L)).entry(BUILT_BUCKETS, greaterThan(0)) + ); + assertThat(histoAggResult.getProfiledChildren().size(), equalTo(1)); + + ProfileResult termsAggResult = histoAggResult.getProfiledChildren().get(0); + assertThat(termsAggResult, notNullValue()); + assertThat(termsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getSimpleName())); + assertThat(termsAggResult.getLuceneDescription(), equalTo("terms")); + assertThat(termsAggResult.getTime(), greaterThan(0L)); + Map termsBreakdown = termsAggResult.getTimeBreakdown(); + assertThat(termsBreakdown, notNullValue()); + assertThat(termsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(termsBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(termsBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(termsBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(termsBreakdown.get(REDUCE), equalTo(0L)); + assertRemapTermsDebugInfo(termsAggResult, "avg"); + assertThat(termsAggResult.getProfiledChildren().size(), equalTo(1)); + + ProfileResult avgAggResult = termsAggResult.getProfiledChildren().get(0); + assertThat(avgAggResult, notNullValue()); + assertThat(avgAggResult.getQueryName(), equalTo("AvgAggregator")); + assertThat(avgAggResult.getLuceneDescription(), equalTo("avg")); + assertThat(avgAggResult.getTime(), greaterThan(0L)); + Map avgBreakdown = avgAggResult.getTimeBreakdown(); + assertThat(avgBreakdown, notNullValue()); + assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(avgBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); + assertMap(avgAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); + assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); + } + } + ); } public void testDiversifiedAggProfile() { - SearchResponse response = prepareSearch("idx").setProfile(true) - .addAggregation( - diversifiedSampler("diversify").shardSize(10) - .field(STRING_FIELD) - .maxDocsPerValue(2) - .subAggregation(max("max").field(NUMBER_FIELD)) - ) - .get(); - assertNoFailures(response); - Map profileResults = response.getProfileResults(); - assertThat(profileResults, notNullValue()); - assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); - for (SearchProfileShardResult profileShardResult : profileResults.values()) { - assertThat(profileShardResult, notNullValue()); - AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); - assertThat(aggProfileResults, notNullValue()); - List aggProfileResultsList = aggProfileResults.getProfileResults(); - assertThat(aggProfileResultsList, notNullValue()); - assertThat(aggProfileResultsList.size(), equalTo(1)); - ProfileResult diversifyAggResult = aggProfileResultsList.get(0); - assertThat(diversifyAggResult, notNullValue()); - assertThat(diversifyAggResult.getQueryName(), equalTo(DiversifiedOrdinalsSamplerAggregator.class.getSimpleName())); - assertThat(diversifyAggResult.getLuceneDescription(), equalTo("diversify")); - assertThat(diversifyAggResult.getTime(), greaterThan(0L)); - Map diversifyBreakdown = diversifyAggResult.getTimeBreakdown(); - assertThat(diversifyBreakdown, notNullValue()); - assertThat(diversifyBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(diversifyBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(diversifyBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); - assertThat(diversifyBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(diversifyBreakdown.get(POST_COLLECTION), greaterThan(0L)); - assertThat(diversifyBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(diversifyBreakdown.get(REDUCE), equalTo(0L)); - assertMap(diversifyAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0)).entry(DEFERRED, List.of("max"))); - - ProfileResult maxAggResult = diversifyAggResult.getProfiledChildren().get(0); - assertThat(maxAggResult, notNullValue()); - assertThat(maxAggResult.getQueryName(), equalTo("MaxAggregator")); - assertThat(maxAggResult.getLuceneDescription(), equalTo("max")); - assertThat(maxAggResult.getTime(), greaterThan(0L)); - Map maxBreakdown = maxAggResult.getTimeBreakdown(); - assertThat(maxBreakdown, notNullValue()); - assertThat(maxBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(diversifyBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(diversifyBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); - assertThat(diversifyBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(diversifyBreakdown.get(POST_COLLECTION), greaterThan(0L)); - assertThat(maxBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(maxBreakdown.get(REDUCE), equalTo(0L)); - assertMap(maxAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); - assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setProfile(true) + .addAggregation( + diversifiedSampler("diversify").shardSize(10) + .field(STRING_FIELD) + .maxDocsPerValue(2) + .subAggregation(max("max").field(NUMBER_FIELD)) + ), + response -> { + Map profileResults = response.getProfileResults(); + assertThat(profileResults, notNullValue()); + assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); + for (SearchProfileShardResult profileShardResult : profileResults.values()) { + assertThat(profileShardResult, notNullValue()); + AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); + assertThat(aggProfileResults, notNullValue()); + List aggProfileResultsList = aggProfileResults.getProfileResults(); + assertThat(aggProfileResultsList, notNullValue()); + assertThat(aggProfileResultsList.size(), equalTo(1)); + ProfileResult diversifyAggResult = aggProfileResultsList.get(0); + assertThat(diversifyAggResult, notNullValue()); + assertThat(diversifyAggResult.getQueryName(), equalTo(DiversifiedOrdinalsSamplerAggregator.class.getSimpleName())); + assertThat(diversifyAggResult.getLuceneDescription(), equalTo("diversify")); + assertThat(diversifyAggResult.getTime(), greaterThan(0L)); + Map diversifyBreakdown = diversifyAggResult.getTimeBreakdown(); + assertThat(diversifyBreakdown, notNullValue()); + assertThat(diversifyBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(diversifyBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(diversifyBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); + assertThat(diversifyBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(diversifyBreakdown.get(POST_COLLECTION), greaterThan(0L)); + assertThat(diversifyBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(diversifyBreakdown.get(REDUCE), equalTo(0L)); + assertMap( + diversifyAggResult.getDebugInfo(), + matchesMap().entry(BUILT_BUCKETS, greaterThan(0)).entry(DEFERRED, List.of("max")) + ); + + ProfileResult maxAggResult = diversifyAggResult.getProfiledChildren().get(0); + assertThat(maxAggResult, notNullValue()); + assertThat(maxAggResult.getQueryName(), equalTo("MaxAggregator")); + assertThat(maxAggResult.getLuceneDescription(), equalTo("max")); + assertThat(maxAggResult.getTime(), greaterThan(0L)); + Map maxBreakdown = maxAggResult.getTimeBreakdown(); + assertThat(maxBreakdown, notNullValue()); + assertThat(maxBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(diversifyBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(diversifyBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); + assertThat(diversifyBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(diversifyBreakdown.get(POST_COLLECTION), greaterThan(0L)); + assertThat(maxBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(maxBreakdown.get(REDUCE), equalTo(0L)); + assertMap(maxAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); + assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0)); + } + } + ); } public void testComplexProfile() { - SearchResponse response = prepareSearch("idx").setProfile(true) - .addAggregation( - histogram("histo").field(NUMBER_FIELD) - .interval(1L) - .subAggregation( - terms("tags").field(TAG_FIELD) - .subAggregation(avg("avg").field(NUMBER_FIELD)) - .subAggregation(max("max").field(NUMBER_FIELD)) - ) - .subAggregation( - terms("strings").field(STRING_FIELD) - .subAggregation(avg("avg").field(NUMBER_FIELD)) - .subAggregation(max("max").field(NUMBER_FIELD)) - .subAggregation( - terms("tags").field(TAG_FIELD) - .subAggregation(avg("avg").field(NUMBER_FIELD)) - .subAggregation(max("max").field(NUMBER_FIELD)) - ) - ) - ) - .get(); - assertNoFailures(response); - Map profileResults = response.getProfileResults(); - assertThat(profileResults, notNullValue()); - assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); - for (SearchProfileShardResult profileShardResult : profileResults.values()) { - assertThat(profileShardResult, notNullValue()); - AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); - assertThat(aggProfileResults, notNullValue()); - List aggProfileResultsList = aggProfileResults.getProfileResults(); - assertThat(aggProfileResultsList, notNullValue()); - assertThat(aggProfileResultsList.size(), equalTo(1)); - ProfileResult histoAggResult = aggProfileResultsList.get(0); - assertThat(histoAggResult, notNullValue()); - assertThat(histoAggResult.getQueryName(), equalTo("NumericHistogramAggregator")); - assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); - assertThat(histoAggResult.getTime(), greaterThan(0L)); - Map histoBreakdown = histoAggResult.getTimeBreakdown(); - assertThat(histoBreakdown, notNullValue()); - assertThat(histoBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(histoBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(histoBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); - assertThat(histoBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(histoBreakdown.get(POST_COLLECTION), greaterThan(0L)); - assertThat(histoBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(histoBreakdown.get(REDUCE), equalTo(0L)); - assertMap( - histoAggResult.getDebugInfo(), - matchesMap().entry(TOTAL_BUCKETS, greaterThan(0L)).entry(BUILT_BUCKETS, greaterThan(0)) - ); - assertThat(histoAggResult.getProfiledChildren().size(), equalTo(2)); - - Map histoAggResultSubAggregations = histoAggResult.getProfiledChildren() - .stream() - .collect(Collectors.toMap(ProfileResult::getLuceneDescription, s -> s)); - - ProfileResult tagsAggResult = histoAggResultSubAggregations.get("tags"); - assertThat(tagsAggResult, notNullValue()); - assertThat(tagsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getSimpleName())); - assertThat(tagsAggResult.getTime(), greaterThan(0L)); - Map tagsBreakdown = tagsAggResult.getTimeBreakdown(); - assertThat(tagsBreakdown, notNullValue()); - assertThat(tagsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(tagsBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(tagsBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); - assertThat(tagsBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(tagsBreakdown.get(POST_COLLECTION), greaterThan(0L)); - assertThat(tagsBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(tagsBreakdown.get(REDUCE), equalTo(0L)); - assertRemapTermsDebugInfo(tagsAggResult); - assertThat(tagsAggResult.getProfiledChildren().size(), equalTo(2)); - - Map tagsAggResultSubAggregations = tagsAggResult.getProfiledChildren() - .stream() - .collect(Collectors.toMap(ProfileResult::getLuceneDescription, s -> s)); - - ProfileResult avgAggResult = tagsAggResultSubAggregations.get("avg"); - assertThat(avgAggResult, notNullValue()); - assertThat(avgAggResult.getQueryName(), equalTo("AvgAggregator")); - assertThat(avgAggResult.getTime(), greaterThan(0L)); - Map avgBreakdown = avgAggResult.getTimeBreakdown(); - assertThat(avgBreakdown, notNullValue()); - assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(avgBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(avgBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); - assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(avgBreakdown.get(POST_COLLECTION), greaterThan(0L)); - assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); - assertMap(avgAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); - assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); - - ProfileResult maxAggResult = tagsAggResultSubAggregations.get("max"); - assertThat(maxAggResult, notNullValue()); - assertThat(maxAggResult.getQueryName(), equalTo("MaxAggregator")); - assertThat(maxAggResult.getTime(), greaterThan(0L)); - Map maxBreakdown = maxAggResult.getTimeBreakdown(); - assertThat(maxBreakdown, notNullValue()); - assertThat(maxBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(maxBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(maxBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); - assertThat(maxBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(maxBreakdown.get(POST_COLLECTION), greaterThan(0L)); - assertThat(maxBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(maxBreakdown.get(REDUCE), equalTo(0L)); - assertMap(maxAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); - assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0)); - - ProfileResult stringsAggResult = histoAggResultSubAggregations.get("strings"); - assertThat(stringsAggResult, notNullValue()); - assertThat(stringsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getSimpleName())); - assertThat(stringsAggResult.getTime(), greaterThan(0L)); - Map stringsBreakdown = stringsAggResult.getTimeBreakdown(); - assertThat(stringsBreakdown, notNullValue()); - assertThat(stringsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(stringsBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(stringsBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); - assertThat(stringsBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(stringsBreakdown.get(POST_COLLECTION), greaterThan(0L)); - assertThat(stringsBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(stringsBreakdown.get(REDUCE), equalTo(0L)); - assertRemapTermsDebugInfo(stringsAggResult); - assertThat(stringsAggResult.getProfiledChildren().size(), equalTo(3)); - - Map stringsAggResultSubAggregations = stringsAggResult.getProfiledChildren() - .stream() - .collect(Collectors.toMap(ProfileResult::getLuceneDescription, s -> s)); - - avgAggResult = stringsAggResultSubAggregations.get("avg"); - assertThat(avgAggResult, notNullValue()); - assertThat(avgAggResult.getQueryName(), equalTo("AvgAggregator")); - assertThat(avgAggResult.getTime(), greaterThan(0L)); - avgBreakdown = avgAggResult.getTimeBreakdown(); - assertThat(avgBreakdown, notNullValue()); - assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(avgBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(avgBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); - assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(avgBreakdown.get(POST_COLLECTION), greaterThan(0L)); - assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); - assertMap(avgAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); - assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); - - maxAggResult = stringsAggResultSubAggregations.get("max"); - assertThat(maxAggResult, notNullValue()); - assertThat(maxAggResult.getQueryName(), equalTo("MaxAggregator")); - assertThat(maxAggResult.getTime(), greaterThan(0L)); - maxBreakdown = maxAggResult.getTimeBreakdown(); - assertThat(maxBreakdown, notNullValue()); - assertThat(maxBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(maxBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(maxBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); - assertThat(maxBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(maxBreakdown.get(POST_COLLECTION), greaterThan(0L)); - assertThat(maxBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(maxBreakdown.get(REDUCE), equalTo(0L)); - assertMap(maxAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); - assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0)); - - tagsAggResult = stringsAggResultSubAggregations.get("tags"); - assertThat(tagsAggResult, notNullValue()); - assertThat(tagsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getSimpleName())); - assertThat(tagsAggResult.getLuceneDescription(), equalTo("tags")); - assertThat(tagsAggResult.getTime(), greaterThan(0L)); - tagsBreakdown = tagsAggResult.getTimeBreakdown(); - assertThat(tagsBreakdown, notNullValue()); - assertThat(tagsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(tagsBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(tagsBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); - assertThat(tagsBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(tagsBreakdown.get(POST_COLLECTION), greaterThan(0L)); - assertThat(tagsBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(tagsBreakdown.get(REDUCE), equalTo(0L)); - assertRemapTermsDebugInfo(tagsAggResult); - assertThat(tagsAggResult.getProfiledChildren().size(), equalTo(2)); - - tagsAggResultSubAggregations = tagsAggResult.getProfiledChildren() - .stream() - .collect(Collectors.toMap(ProfileResult::getLuceneDescription, s -> s)); - - avgAggResult = tagsAggResultSubAggregations.get("avg"); - assertThat(avgAggResult, notNullValue()); - assertThat(avgAggResult.getQueryName(), equalTo("AvgAggregator")); - assertThat(avgAggResult.getTime(), greaterThan(0L)); - avgBreakdown = avgAggResult.getTimeBreakdown(); - assertThat(avgBreakdown, notNullValue()); - assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(avgBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(avgBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); - assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(avgBreakdown.get(POST_COLLECTION), greaterThan(0L)); - assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); - assertMap(avgAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); - assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); - - maxAggResult = tagsAggResultSubAggregations.get("max"); - assertThat(maxAggResult, notNullValue()); - assertThat(maxAggResult.getQueryName(), equalTo("MaxAggregator")); - assertThat(maxAggResult.getTime(), greaterThan(0L)); - maxBreakdown = maxAggResult.getTimeBreakdown(); - assertThat(maxBreakdown, notNullValue()); - assertThat(maxBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(maxBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(maxBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); - assertThat(maxBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(maxBreakdown.get(POST_COLLECTION), greaterThan(0L)); - assertThat(maxBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(maxBreakdown.get(REDUCE), equalTo(0L)); - assertMap(maxAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); - assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setProfile(true) + .addAggregation( + histogram("histo").field(NUMBER_FIELD) + .interval(1L) + .subAggregation( + terms("tags").field(TAG_FIELD) + .subAggregation(avg("avg").field(NUMBER_FIELD)) + .subAggregation(max("max").field(NUMBER_FIELD)) + ) + .subAggregation( + terms("strings").field(STRING_FIELD) + .subAggregation(avg("avg").field(NUMBER_FIELD)) + .subAggregation(max("max").field(NUMBER_FIELD)) + .subAggregation( + terms("tags").field(TAG_FIELD) + .subAggregation(avg("avg").field(NUMBER_FIELD)) + .subAggregation(max("max").field(NUMBER_FIELD)) + ) + ) + ), + response -> { + Map profileResults = response.getProfileResults(); + assertThat(profileResults, notNullValue()); + assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); + for (SearchProfileShardResult profileShardResult : profileResults.values()) { + assertThat(profileShardResult, notNullValue()); + AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); + assertThat(aggProfileResults, notNullValue()); + List aggProfileResultsList = aggProfileResults.getProfileResults(); + assertThat(aggProfileResultsList, notNullValue()); + assertThat(aggProfileResultsList.size(), equalTo(1)); + ProfileResult histoAggResult = aggProfileResultsList.get(0); + assertThat(histoAggResult, notNullValue()); + assertThat(histoAggResult.getQueryName(), equalTo("NumericHistogramAggregator")); + assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); + assertThat(histoAggResult.getTime(), greaterThan(0L)); + Map histoBreakdown = histoAggResult.getTimeBreakdown(); + assertThat(histoBreakdown, notNullValue()); + assertThat(histoBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(histoBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(histoBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); + assertThat(histoBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(histoBreakdown.get(POST_COLLECTION), greaterThan(0L)); + assertThat(histoBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(histoBreakdown.get(REDUCE), equalTo(0L)); + assertMap( + histoAggResult.getDebugInfo(), + matchesMap().entry(TOTAL_BUCKETS, greaterThan(0L)).entry(BUILT_BUCKETS, greaterThan(0)) + ); + assertThat(histoAggResult.getProfiledChildren().size(), equalTo(2)); + + Map histoAggResultSubAggregations = histoAggResult.getProfiledChildren() + .stream() + .collect(Collectors.toMap(ProfileResult::getLuceneDescription, s -> s)); + + ProfileResult tagsAggResult = histoAggResultSubAggregations.get("tags"); + assertThat(tagsAggResult, notNullValue()); + assertThat(tagsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getSimpleName())); + assertThat(tagsAggResult.getTime(), greaterThan(0L)); + Map tagsBreakdown = tagsAggResult.getTimeBreakdown(); + assertThat(tagsBreakdown, notNullValue()); + assertThat(tagsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(tagsBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(tagsBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); + assertThat(tagsBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(tagsBreakdown.get(POST_COLLECTION), greaterThan(0L)); + assertThat(tagsBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(tagsBreakdown.get(REDUCE), equalTo(0L)); + assertRemapTermsDebugInfo(tagsAggResult); + assertThat(tagsAggResult.getProfiledChildren().size(), equalTo(2)); + + Map tagsAggResultSubAggregations = tagsAggResult.getProfiledChildren() + .stream() + .collect(Collectors.toMap(ProfileResult::getLuceneDescription, s -> s)); + + ProfileResult avgAggResult = tagsAggResultSubAggregations.get("avg"); + assertThat(avgAggResult, notNullValue()); + assertThat(avgAggResult.getQueryName(), equalTo("AvgAggregator")); + assertThat(avgAggResult.getTime(), greaterThan(0L)); + Map avgBreakdown = avgAggResult.getTimeBreakdown(); + assertThat(avgBreakdown, notNullValue()); + assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(avgBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(avgBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); + assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(avgBreakdown.get(POST_COLLECTION), greaterThan(0L)); + assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); + assertMap(avgAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); + assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); + + ProfileResult maxAggResult = tagsAggResultSubAggregations.get("max"); + assertThat(maxAggResult, notNullValue()); + assertThat(maxAggResult.getQueryName(), equalTo("MaxAggregator")); + assertThat(maxAggResult.getTime(), greaterThan(0L)); + Map maxBreakdown = maxAggResult.getTimeBreakdown(); + assertThat(maxBreakdown, notNullValue()); + assertThat(maxBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(maxBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(maxBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); + assertThat(maxBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(maxBreakdown.get(POST_COLLECTION), greaterThan(0L)); + assertThat(maxBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(maxBreakdown.get(REDUCE), equalTo(0L)); + assertMap(maxAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); + assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0)); + + ProfileResult stringsAggResult = histoAggResultSubAggregations.get("strings"); + assertThat(stringsAggResult, notNullValue()); + assertThat(stringsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getSimpleName())); + assertThat(stringsAggResult.getTime(), greaterThan(0L)); + Map stringsBreakdown = stringsAggResult.getTimeBreakdown(); + assertThat(stringsBreakdown, notNullValue()); + assertThat(stringsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(stringsBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(stringsBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); + assertThat(stringsBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(stringsBreakdown.get(POST_COLLECTION), greaterThan(0L)); + assertThat(stringsBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(stringsBreakdown.get(REDUCE), equalTo(0L)); + assertRemapTermsDebugInfo(stringsAggResult); + assertThat(stringsAggResult.getProfiledChildren().size(), equalTo(3)); + + Map stringsAggResultSubAggregations = stringsAggResult.getProfiledChildren() + .stream() + .collect(Collectors.toMap(ProfileResult::getLuceneDescription, s -> s)); + + avgAggResult = stringsAggResultSubAggregations.get("avg"); + assertThat(avgAggResult, notNullValue()); + assertThat(avgAggResult.getQueryName(), equalTo("AvgAggregator")); + assertThat(avgAggResult.getTime(), greaterThan(0L)); + avgBreakdown = avgAggResult.getTimeBreakdown(); + assertThat(avgBreakdown, notNullValue()); + assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(avgBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(avgBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); + assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(avgBreakdown.get(POST_COLLECTION), greaterThan(0L)); + assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); + assertMap(avgAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); + assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); + + maxAggResult = stringsAggResultSubAggregations.get("max"); + assertThat(maxAggResult, notNullValue()); + assertThat(maxAggResult.getQueryName(), equalTo("MaxAggregator")); + assertThat(maxAggResult.getTime(), greaterThan(0L)); + maxBreakdown = maxAggResult.getTimeBreakdown(); + assertThat(maxBreakdown, notNullValue()); + assertThat(maxBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(maxBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(maxBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); + assertThat(maxBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(maxBreakdown.get(POST_COLLECTION), greaterThan(0L)); + assertThat(maxBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(maxBreakdown.get(REDUCE), equalTo(0L)); + assertMap(maxAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); + assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0)); + + tagsAggResult = stringsAggResultSubAggregations.get("tags"); + assertThat(tagsAggResult, notNullValue()); + assertThat(tagsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getSimpleName())); + assertThat(tagsAggResult.getLuceneDescription(), equalTo("tags")); + assertThat(tagsAggResult.getTime(), greaterThan(0L)); + tagsBreakdown = tagsAggResult.getTimeBreakdown(); + assertThat(tagsBreakdown, notNullValue()); + assertThat(tagsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(tagsBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(tagsBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); + assertThat(tagsBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(tagsBreakdown.get(POST_COLLECTION), greaterThan(0L)); + assertThat(tagsBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(tagsBreakdown.get(REDUCE), equalTo(0L)); + assertRemapTermsDebugInfo(tagsAggResult); + assertThat(tagsAggResult.getProfiledChildren().size(), equalTo(2)); + + tagsAggResultSubAggregations = tagsAggResult.getProfiledChildren() + .stream() + .collect(Collectors.toMap(ProfileResult::getLuceneDescription, s -> s)); + + avgAggResult = tagsAggResultSubAggregations.get("avg"); + assertThat(avgAggResult, notNullValue()); + assertThat(avgAggResult.getQueryName(), equalTo("AvgAggregator")); + assertThat(avgAggResult.getTime(), greaterThan(0L)); + avgBreakdown = avgAggResult.getTimeBreakdown(); + assertThat(avgBreakdown, notNullValue()); + assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(avgBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(avgBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); + assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(avgBreakdown.get(POST_COLLECTION), greaterThan(0L)); + assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); + assertMap(avgAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); + assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); + + maxAggResult = tagsAggResultSubAggregations.get("max"); + assertThat(maxAggResult, notNullValue()); + assertThat(maxAggResult.getQueryName(), equalTo("MaxAggregator")); + assertThat(maxAggResult.getTime(), greaterThan(0L)); + maxBreakdown = maxAggResult.getTimeBreakdown(); + assertThat(maxBreakdown, notNullValue()); + assertThat(maxBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(maxBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(maxBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); + assertThat(maxBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(maxBreakdown.get(POST_COLLECTION), greaterThan(0L)); + assertThat(maxBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(maxBreakdown.get(REDUCE), equalTo(0L)); + assertMap(maxAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); + assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0)); + } + } + ); } public void testNoProfile() { - SearchResponse response = prepareSearch("idx").setProfile(false) - .addAggregation( - histogram("histo").field(NUMBER_FIELD) - .interval(1L) - .subAggregation( - terms("tags").field(TAG_FIELD) - .subAggregation(avg("avg").field(NUMBER_FIELD)) - .subAggregation(max("max").field(NUMBER_FIELD)) - ) - .subAggregation( - terms("strings").field(STRING_FIELD) - .subAggregation(avg("avg").field(NUMBER_FIELD)) - .subAggregation(max("max").field(NUMBER_FIELD)) - .subAggregation( - terms("tags").field(TAG_FIELD) - .subAggregation(avg("avg").field(NUMBER_FIELD)) - .subAggregation(max("max").field(NUMBER_FIELD)) - ) - ) - ) - .get(); - assertNoFailures(response); - Map profileResults = response.getProfileResults(); - assertThat(profileResults, notNullValue()); - assertThat(profileResults.size(), equalTo(0)); + assertNoFailuresAndResponse( + prepareSearch("idx").setProfile(false) + .addAggregation( + histogram("histo").field(NUMBER_FIELD) + .interval(1L) + .subAggregation( + terms("tags").field(TAG_FIELD) + .subAggregation(avg("avg").field(NUMBER_FIELD)) + .subAggregation(max("max").field(NUMBER_FIELD)) + ) + .subAggregation( + terms("strings").field(STRING_FIELD) + .subAggregation(avg("avg").field(NUMBER_FIELD)) + .subAggregation(max("max").field(NUMBER_FIELD)) + .subAggregation( + terms("tags").field(TAG_FIELD) + .subAggregation(avg("avg").field(NUMBER_FIELD)) + .subAggregation(max("max").field(NUMBER_FIELD)) + ) + ) + ), + response -> { + Map profileResults = response.getProfileResults(); + assertThat(profileResults, notNullValue()); + assertThat(profileResults.size(), equalTo(0)); + } + ); } /** @@ -634,62 +647,66 @@ public void testFilterByFilter() throws InterruptedException, IOException { } indexRandom(true, false, builders); - SearchResponse response = prepareSearch("dateidx").setProfile(true) - .addAggregation( - new DateHistogramAggregationBuilder("histo").field("date") - .calendarInterval(DateHistogramInterval.MONTH) - // Add a sub-agg so we don't get to use metadata. That's great and all, but it outputs less debugging info for us to - // verify. - .subAggregation(new MaxAggregationBuilder("m").field("date")) - ) - .get(); - assertNoFailures(response); - Map profileResults = response.getProfileResults(); - assertThat(profileResults, notNullValue()); - assertThat(profileResults.size(), equalTo(getNumShards("dateidx").numPrimaries)); - for (SearchProfileShardResult profileShardResult : profileResults.values()) { - assertThat(profileShardResult, notNullValue()); - AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); - assertThat(aggProfileResults, notNullValue()); - List aggProfileResultsList = aggProfileResults.getProfileResults(); - assertThat(aggProfileResultsList, notNullValue()); - assertThat(aggProfileResultsList.size(), equalTo(1)); - ProfileResult histoAggResult = aggProfileResultsList.get(0); - assertThat(histoAggResult, notNullValue()); - assertThat(histoAggResult.getQueryName(), equalTo("DateHistogramAggregator.FromDateRange")); - assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); - assertThat(histoAggResult.getProfiledChildren().size(), equalTo(1)); - assertThat(histoAggResult.getTime(), greaterThan(0L)); - Map breakdown = histoAggResult.getTimeBreakdown(); - assertThat(breakdown, notNullValue()); - assertThat(breakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(breakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(breakdown.get(COLLECT), equalTo(0L)); - assertThat(breakdown.get(BUILD_AGGREGATION).longValue(), greaterThan(0L)); - assertThat(breakdown.get(REDUCE), equalTo(0L)); - assertMap( - histoAggResult.getDebugInfo(), - matchesMap().entry(BUILT_BUCKETS, greaterThan(0)) - .entry("delegate", "RangeAggregator.FromFilters") - .entry( - "delegate_debug", - matchesMap().entry("average_docs_per_range", equalTo(RangeAggregator.DOCS_PER_RANGE_TO_USE_FILTERS * 2)) - .entry("ranges", 1) - .entry("delegate", "FilterByFilterAggregator") + assertNoFailuresAndResponse( + prepareSearch("dateidx").setProfile(true) + .addAggregation( + new DateHistogramAggregationBuilder("histo").field("date") + .calendarInterval(DateHistogramInterval.MONTH) + // Add a sub-agg so we don't get to use metadata. That's great and all, but it outputs less debugging info for us to + // verify. + .subAggregation(new MaxAggregationBuilder("m").field("date")) + ), + response -> { + Map profileResults = response.getProfileResults(); + assertThat(profileResults, notNullValue()); + assertThat(profileResults.size(), equalTo(getNumShards("dateidx").numPrimaries)); + for (SearchProfileShardResult profileShardResult : profileResults.values()) { + assertThat(profileShardResult, notNullValue()); + AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); + assertThat(aggProfileResults, notNullValue()); + List aggProfileResultsList = aggProfileResults.getProfileResults(); + assertThat(aggProfileResultsList, notNullValue()); + assertThat(aggProfileResultsList.size(), equalTo(1)); + ProfileResult histoAggResult = aggProfileResultsList.get(0); + assertThat(histoAggResult, notNullValue()); + assertThat(histoAggResult.getQueryName(), equalTo("DateHistogramAggregator.FromDateRange")); + assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); + assertThat(histoAggResult.getProfiledChildren().size(), equalTo(1)); + assertThat(histoAggResult.getTime(), greaterThan(0L)); + Map breakdown = histoAggResult.getTimeBreakdown(); + assertThat(breakdown, notNullValue()); + assertThat(breakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(breakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(breakdown.get(COLLECT), equalTo(0L)); + assertThat(breakdown.get(BUILD_AGGREGATION).longValue(), greaterThan(0L)); + assertThat(breakdown.get(REDUCE), equalTo(0L)); + assertMap( + histoAggResult.getDebugInfo(), + matchesMap().entry(BUILT_BUCKETS, greaterThan(0)) + .entry("delegate", "RangeAggregator.FromFilters") .entry( "delegate_debug", - matchesMap().entry("segments_with_deleted_docs", greaterThanOrEqualTo(0)) - .entry("segments_with_doc_count_field", 0) - .entry("segments_counted", 0) - .entry("segments_collected", greaterThan(0)) + matchesMap().entry("average_docs_per_range", equalTo(RangeAggregator.DOCS_PER_RANGE_TO_USE_FILTERS * 2)) + .entry("ranges", 1) + .entry("delegate", "FilterByFilterAggregator") .entry( - "filters", - matchesList().item(matchesMap().entry("query", "*:*").entry("segments_counted_in_constant_time", 0)) + "delegate_debug", + matchesMap().entry("segments_with_deleted_docs", greaterThanOrEqualTo(0)) + .entry("segments_with_doc_count_field", 0) + .entry("segments_counted", 0) + .entry("segments_collected", greaterThan(0)) + .entry( + "filters", + matchesList().item( + matchesMap().entry("query", "*:*").entry("segments_counted_in_constant_time", 0) + ) + ) ) ) - ) - ); - } + ); + } + } + ); } public void testDateHistogramFilterByFilterDisabled() throws InterruptedException, IOException { @@ -710,56 +727,60 @@ public void testDateHistogramFilterByFilterDisabled() throws InterruptedExceptio } indexRandom(true, false, builders); - SearchResponse response = prepareSearch("date_filter_by_filter_disabled").setProfile(true) - .addAggregation(new DateHistogramAggregationBuilder("histo").field("date").calendarInterval(DateHistogramInterval.MONTH)) - .get(); - assertNoFailures(response); - Map profileResults = response.getProfileResults(); - assertThat(profileResults, notNullValue()); - assertThat(profileResults.size(), equalTo(getNumShards("date_filter_by_filter_disabled").numPrimaries)); - for (SearchProfileShardResult profileShardResult : profileResults.values()) { - assertThat(profileShardResult, notNullValue()); - AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); - assertThat(aggProfileResults, notNullValue()); - List aggProfileResultsList = aggProfileResults.getProfileResults(); - assertThat(aggProfileResultsList, notNullValue()); - assertThat(aggProfileResultsList.size(), equalTo(1)); - ProfileResult histoAggResult = aggProfileResultsList.get(0); - assertThat(histoAggResult, notNullValue()); - assertThat(histoAggResult.getQueryName(), equalTo("DateHistogramAggregator.FromDateRange")); - assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); - assertThat(histoAggResult.getProfiledChildren().size(), equalTo(0)); - assertThat(histoAggResult.getTime(), greaterThan(0L)); - Map breakdown = histoAggResult.getTimeBreakdown(); - assertMap( - breakdown, - matchesMap().entry(INITIALIZE, greaterThan(0L)) - .entry(INITIALIZE + "_count", greaterThan(0L)) - .entry(BUILD_LEAF_COLLECTOR, greaterThan(0L)) - .entry(BUILD_LEAF_COLLECTOR + "_count", greaterThan(0L)) - .entry(COLLECT, greaterThan(0L)) - .entry(COLLECT + "_count", greaterThan(0L)) - .entry(POST_COLLECTION, greaterThan(0L)) - .entry(POST_COLLECTION + "_count", 1L) - .entry(BUILD_AGGREGATION, greaterThan(0L)) - .entry(BUILD_AGGREGATION + "_count", greaterThan(0L)) - .entry(REDUCE, 0L) - .entry(REDUCE + "_count", 0L) - ); - Map debug = histoAggResult.getDebugInfo(); - assertMap( - debug, - matchesMap().entry("delegate", "RangeAggregator.NoOverlap") - .entry("built_buckets", 1) - .entry( - "delegate_debug", - matchesMap().entry("ranges", 1) - .entry("average_docs_per_range", 10000.0) - .entry("singletons", greaterThan(0)) - .entry("non-singletons", 0) - ) - ); - } + assertNoFailuresAndResponse( + prepareSearch("date_filter_by_filter_disabled").setProfile(true) + .addAggregation( + new DateHistogramAggregationBuilder("histo").field("date").calendarInterval(DateHistogramInterval.MONTH) + ), + response -> { + Map profileResults = response.getProfileResults(); + assertThat(profileResults, notNullValue()); + assertThat(profileResults.size(), equalTo(getNumShards("date_filter_by_filter_disabled").numPrimaries)); + for (SearchProfileShardResult profileShardResult : profileResults.values()) { + assertThat(profileShardResult, notNullValue()); + AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); + assertThat(aggProfileResults, notNullValue()); + List aggProfileResultsList = aggProfileResults.getProfileResults(); + assertThat(aggProfileResultsList, notNullValue()); + assertThat(aggProfileResultsList.size(), equalTo(1)); + ProfileResult histoAggResult = aggProfileResultsList.get(0); + assertThat(histoAggResult, notNullValue()); + assertThat(histoAggResult.getQueryName(), equalTo("DateHistogramAggregator.FromDateRange")); + assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); + assertThat(histoAggResult.getProfiledChildren().size(), equalTo(0)); + assertThat(histoAggResult.getTime(), greaterThan(0L)); + Map breakdown = histoAggResult.getTimeBreakdown(); + assertMap( + breakdown, + matchesMap().entry(INITIALIZE, greaterThan(0L)) + .entry(INITIALIZE + "_count", greaterThan(0L)) + .entry(BUILD_LEAF_COLLECTOR, greaterThan(0L)) + .entry(BUILD_LEAF_COLLECTOR + "_count", greaterThan(0L)) + .entry(COLLECT, greaterThan(0L)) + .entry(COLLECT + "_count", greaterThan(0L)) + .entry(POST_COLLECTION, greaterThan(0L)) + .entry(POST_COLLECTION + "_count", 1L) + .entry(BUILD_AGGREGATION, greaterThan(0L)) + .entry(BUILD_AGGREGATION + "_count", greaterThan(0L)) + .entry(REDUCE, 0L) + .entry(REDUCE + "_count", 0L) + ); + Map debug = histoAggResult.getDebugInfo(); + assertMap( + debug, + matchesMap().entry("delegate", "RangeAggregator.NoOverlap") + .entry("built_buckets", 1) + .entry( + "delegate_debug", + matchesMap().entry("ranges", 1) + .entry("average_docs_per_range", 10000.0) + .entry("singletons", greaterThan(0)) + .entry("non-singletons", 0) + ) + ); + } + } + ); } finally { updateClusterSettings(Settings.builder().putNull(SearchService.ENABLE_REWRITE_AGGS_TO_FILTER_BY_FILTER.getKey())); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/dfs/DfsProfilerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/dfs/DfsProfilerIT.java index f7b2b0f4443d3..0bc23d9bd331b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/dfs/DfsProfilerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/dfs/DfsProfilerIT.java @@ -10,7 +10,6 @@ import org.apache.lucene.tests.util.English; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.profile.ProfileResult; @@ -28,6 +27,7 @@ import static org.elasticsearch.search.profile.query.RandomQueryGenerator.randomQueryBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.emptyOrNullString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -67,53 +67,55 @@ public void testProfileDfs() throws Exception { for (int i = 0; i < iters; i++) { QueryBuilder q = randomQueryBuilder(List.of(textField), List.of(numericField), numDocs, 3); logger.info("Query: {}", q); - SearchResponse resp = prepareSearch().setQuery(q) - .setTrackTotalHits(true) - .setProfile(true) - .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setKnnSearch( - randomList( - 2, - 5, - () -> new KnnSearchBuilder( - vectorField, - new float[] { randomFloat(), randomFloat(), randomFloat() }, - randomIntBetween(5, 10), - 50, - randomBoolean() ? null : randomFloat() + assertResponse( + prepareSearch().setQuery(q) + .setTrackTotalHits(true) + .setProfile(true) + .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) + .setKnnSearch( + randomList( + 2, + 5, + () -> new KnnSearchBuilder( + vectorField, + new float[] { randomFloat(), randomFloat(), randomFloat() }, + randomIntBetween(5, 10), + 50, + randomBoolean() ? null : randomFloat() + ) ) - ) - ) - .get(); - - assertNotNull("Profile response element should not be null", resp.getProfileResults()); - assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); - for (Map.Entry shard : resp.getProfileResults().entrySet()) { - for (QueryProfileShardResult searchProfiles : shard.getValue().getQueryProfileResults()) { - for (ProfileResult result : searchProfiles.getQueryResults()) { - assertNotNull(result.getQueryName()); - assertNotNull(result.getLuceneDescription()); - assertThat(result.getTime(), greaterThan(0L)); - } - CollectorResult result = searchProfiles.getCollectorResult(); - assertThat(result.getName(), is(not(emptyOrNullString()))); - assertThat(result.getTime(), greaterThan(0L)); - } - SearchProfileDfsPhaseResult searchProfileDfsPhaseResult = shard.getValue().getSearchProfileDfsPhaseResult(); - assertThat(searchProfileDfsPhaseResult, is(notNullValue())); - for (QueryProfileShardResult queryProfileShardResult : searchProfileDfsPhaseResult.getQueryProfileShardResult()) { - for (ProfileResult result : queryProfileShardResult.getQueryResults()) { - assertNotNull(result.getQueryName()); - assertNotNull(result.getLuceneDescription()); - assertThat(result.getTime(), greaterThan(0L)); + ), + response -> { + assertNotNull("Profile response element should not be null", response.getProfileResults()); + assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); + for (Map.Entry shard : response.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shard.getValue().getQueryProfileResults()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + } + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); + assertThat(result.getTime(), greaterThan(0L)); + } + SearchProfileDfsPhaseResult searchProfileDfsPhaseResult = shard.getValue().getSearchProfileDfsPhaseResult(); + assertThat(searchProfileDfsPhaseResult, is(notNullValue())); + for (QueryProfileShardResult queryProfileShardResult : searchProfileDfsPhaseResult.getQueryProfileShardResult()) { + for (ProfileResult result : queryProfileShardResult.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + } + CollectorResult result = queryProfileShardResult.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); + assertThat(result.getTime(), greaterThan(0L)); + } + ProfileResult statsResult = searchProfileDfsPhaseResult.getDfsShardResult(); + assertThat(statsResult.getQueryName(), equalTo("statistics")); } - CollectorResult result = queryProfileShardResult.getCollectorResult(); - assertThat(result.getName(), is(not(emptyOrNullString()))); - assertThat(result.getTime(), greaterThan(0L)); } - ProfileResult statsResult = searchProfileDfsPhaseResult.getDfsShardResult(); - assertThat(statsResult.getQueryName(), equalTo("statistics")); - } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java index e7b02faede9b1..9aa5a85dba973 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java @@ -30,6 +30,7 @@ import java.util.Set; import static org.elasticsearch.search.profile.query.RandomQueryGenerator.randomQueryBuilder; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.emptyOrNullString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -62,29 +63,26 @@ public void testProfileQuery() throws Exception { for (int i = 0; i < iters; i++) { QueryBuilder q = randomQueryBuilder(stringFields, numericFields, numDocs, 3); logger.info("Query: {}", q); - - SearchResponse resp = prepareSearch().setQuery(q) - .setTrackTotalHits(true) - .setProfile(true) - .setSearchType(SearchType.QUERY_THEN_FETCH) - .get(); - - assertNotNull("Profile response element should not be null", resp.getProfileResults()); - assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); - for (Map.Entry shard : resp.getProfileResults().entrySet()) { - for (QueryProfileShardResult searchProfiles : shard.getValue().getQueryProfileResults()) { - for (ProfileResult result : searchProfiles.getQueryResults()) { - assertNotNull(result.getQueryName()); - assertNotNull(result.getLuceneDescription()); - assertThat(result.getTime(), greaterThan(0L)); + assertResponse( + prepareSearch().setQuery(q).setTrackTotalHits(true).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH), + response -> { + assertNotNull("Profile response element should not be null", response.getProfileResults()); + assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); + for (Map.Entry shard : response.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shard.getValue().getQueryProfileResults()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + } + + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); + assertThat(result.getTime(), greaterThan(0L)); + } } - - CollectorResult result = searchProfiles.getCollectorResult(); - assertThat(result.getName(), is(not(emptyOrNullString()))); - assertThat(result.getTime(), greaterThan(0L)); } - } - + ); } } @@ -185,26 +183,26 @@ public void testSimpleMatch() throws Exception { QueryBuilder q = QueryBuilders.matchQuery("field1", "one"); - SearchResponse resp = prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH).get(); + assertResponse(prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH), response -> { + Map p = response.getProfileResults(); + assertNotNull(p); + assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); - Map p = resp.getProfileResults(); - assertNotNull(p); - assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); + for (Map.Entry shardResult : response.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertEquals(result.getQueryName(), "TermQuery"); + assertEquals(result.getLuceneDescription(), "field1:one"); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } - for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { - for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { - for (ProfileResult result : searchProfiles.getQueryResults()) { - assertEquals(result.getQueryName(), "TermQuery"); - assertEquals(result.getLuceneDescription(), "field1:one"); + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); assertThat(result.getTime(), greaterThan(0L)); - assertNotNull(result.getTimeBreakdown()); } - - CollectorResult result = searchProfiles.getCollectorResult(); - assertThat(result.getName(), is(not(emptyOrNullString()))); - assertThat(result.getTime(), greaterThan(0L)); } - } + }); } /** @@ -226,45 +224,44 @@ public void testBool() throws Exception { .must(QueryBuilders.matchQuery("field1", "one")) .must(QueryBuilders.matchQuery("field1", "two")); - SearchResponse resp = prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH).get(); + assertResponse(prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH), response -> { + Map p = response.getProfileResults(); + assertNotNull(p); + assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); - Map p = resp.getProfileResults(); - assertNotNull(p); - assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); + for (Map.Entry shardResult : response.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertEquals(result.getQueryName(), "BooleanQuery"); + assertEquals(result.getLuceneDescription(), "+field1:one +field1:two"); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + assertEquals(result.getProfiledChildren().size(), 2); + + // Check the children + List children = result.getProfiledChildren(); + assertEquals(children.size(), 2); + + ProfileResult childProfile = children.get(0); + assertEquals(childProfile.getQueryName(), "TermQuery"); + assertEquals(childProfile.getLuceneDescription(), "field1:one"); + assertThat(childProfile.getTime(), greaterThan(0L)); + assertNotNull(childProfile.getTimeBreakdown()); + assertEquals(childProfile.getProfiledChildren().size(), 0); + + childProfile = children.get(1); + assertEquals(childProfile.getQueryName(), "TermQuery"); + assertEquals(childProfile.getLuceneDescription(), "field1:two"); + assertThat(childProfile.getTime(), greaterThan(0L)); + assertNotNull(childProfile.getTimeBreakdown()); + } - for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { - for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { - for (ProfileResult result : searchProfiles.getQueryResults()) { - assertEquals(result.getQueryName(), "BooleanQuery"); - assertEquals(result.getLuceneDescription(), "+field1:one +field1:two"); + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); assertThat(result.getTime(), greaterThan(0L)); - assertNotNull(result.getTimeBreakdown()); - assertEquals(result.getProfiledChildren().size(), 2); - - // Check the children - List children = result.getProfiledChildren(); - assertEquals(children.size(), 2); - - ProfileResult childProfile = children.get(0); - assertEquals(childProfile.getQueryName(), "TermQuery"); - assertEquals(childProfile.getLuceneDescription(), "field1:one"); - assertThat(childProfile.getTime(), greaterThan(0L)); - assertNotNull(childProfile.getTimeBreakdown()); - assertEquals(childProfile.getProfiledChildren().size(), 0); - - childProfile = children.get(1); - assertEquals(childProfile.getQueryName(), "TermQuery"); - assertEquals(childProfile.getLuceneDescription(), "field1:two"); - assertThat(childProfile.getTime(), greaterThan(0L)); - assertNotNull(childProfile.getTimeBreakdown()); } - - CollectorResult result = searchProfiles.getCollectorResult(); - assertThat(result.getName(), is(not(emptyOrNullString()))); - assertThat(result.getTime(), greaterThan(0L)); } - } - + }); } /** @@ -287,25 +284,25 @@ public void testEmptyBool() throws Exception { QueryBuilder q = QueryBuilders.boolQuery(); logger.info("Query: {}", q); - SearchResponse resp = prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH).get(); + assertResponse(prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH), response -> { + assertNotNull("Profile response element should not be null", response.getProfileResults()); + assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); - assertNotNull("Profile response element should not be null", resp.getProfileResults()); - assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); + for (Map.Entry shardResult : response.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } - for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { - for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { - for (ProfileResult result : searchProfiles.getQueryResults()) { - assertNotNull(result.getQueryName()); - assertNotNull(result.getLuceneDescription()); + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); assertThat(result.getTime(), greaterThan(0L)); - assertNotNull(result.getTimeBreakdown()); } - - CollectorResult result = searchProfiles.getCollectorResult(); - assertThat(result.getName(), is(not(emptyOrNullString()))); - assertThat(result.getTime(), greaterThan(0L)); } - } + }); } /** @@ -332,25 +329,25 @@ public void testCollapsingBool() throws Exception { logger.info("Query: {}", q); - SearchResponse resp = prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH).get(); + assertResponse(prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH), response -> { + assertNotNull("Profile response element should not be null", response.getProfileResults()); + assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); - assertNotNull("Profile response element should not be null", resp.getProfileResults()); - assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); + for (Map.Entry shardResult : response.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } - for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { - for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { - for (ProfileResult result : searchProfiles.getQueryResults()) { - assertNotNull(result.getQueryName()); - assertNotNull(result.getLuceneDescription()); + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); assertThat(result.getTime(), greaterThan(0L)); - assertNotNull(result.getTimeBreakdown()); } - - CollectorResult result = searchProfiles.getCollectorResult(); - assertThat(result.getName(), is(not(emptyOrNullString()))); - assertThat(result.getTime(), greaterThan(0L)); } - } + }); } public void testBoosting() throws Exception { @@ -372,25 +369,25 @@ public void testBoosting() throws Exception { .negativeBoost(randomFloat()); logger.info("Query: {}", q); - SearchResponse resp = prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH).get(); + assertResponse(prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH), response -> { + assertNotNull("Profile response element should not be null", response.getProfileResults()); + assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); - assertNotNull("Profile response element should not be null", resp.getProfileResults()); - assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); + for (Map.Entry shardResult : response.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } - for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { - for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { - for (ProfileResult result : searchProfiles.getQueryResults()) { - assertNotNull(result.getQueryName()); - assertNotNull(result.getLuceneDescription()); + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); assertThat(result.getTime(), greaterThan(0L)); - assertNotNull(result.getTimeBreakdown()); } - - CollectorResult result = searchProfiles.getCollectorResult(); - assertThat(result.getName(), is(not(emptyOrNullString()))); - assertThat(result.getTime(), greaterThan(0L)); } - } + }); } public void testDisMaxRange() throws Exception { @@ -412,25 +409,25 @@ public void testDisMaxRange() throws Exception { .add(QueryBuilders.rangeQuery("field2").from(null).to(73).includeLower(true).includeUpper(true)); logger.info("Query: {}", q); - SearchResponse resp = prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH).get(); + assertResponse(prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH), response -> { + assertNotNull("Profile response element should not be null", response.getProfileResults()); + assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); - assertNotNull("Profile response element should not be null", resp.getProfileResults()); - assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); + for (Map.Entry shardResult : response.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } - for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { - for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { - for (ProfileResult result : searchProfiles.getQueryResults()) { - assertNotNull(result.getQueryName()); - assertNotNull(result.getLuceneDescription()); + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); assertThat(result.getTime(), greaterThan(0L)); - assertNotNull(result.getTimeBreakdown()); } - - CollectorResult result = searchProfiles.getCollectorResult(); - assertThat(result.getName(), is(not(emptyOrNullString()))); - assertThat(result.getTime(), greaterThan(0L)); } - } + }); } public void testRange() throws Exception { @@ -451,25 +448,25 @@ public void testRange() throws Exception { logger.info("Query: {}", q.toString()); - SearchResponse resp = prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH).get(); + assertResponse(prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH), response -> { + assertNotNull("Profile response element should not be null", response.getProfileResults()); + assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); - assertNotNull("Profile response element should not be null", resp.getProfileResults()); - assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); + for (Map.Entry shardResult : response.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } - for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { - for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { - for (ProfileResult result : searchProfiles.getQueryResults()) { - assertNotNull(result.getQueryName()); - assertNotNull(result.getLuceneDescription()); + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); assertThat(result.getTime(), greaterThan(0L)); - assertNotNull(result.getTimeBreakdown()); } - - CollectorResult result = searchProfiles.getCollectorResult(); - assertThat(result.getName(), is(not(emptyOrNullString()))); - assertThat(result.getTime(), greaterThan(0L)); } - } + }); } public void testPhrase() throws Exception { @@ -492,36 +489,35 @@ public void testPhrase() throws Exception { logger.info("Query: {}", q); - SearchResponse resp = prepareSearch().setQuery(q) - .setIndices("test") - .setProfile(true) - .setSearchType(SearchType.QUERY_THEN_FETCH) - .get(); + assertResponse( + prepareSearch().setQuery(q).setIndices("test").setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH), + response -> { + if (response.getShardFailures().length > 0) { + for (ShardSearchFailure f : response.getShardFailures()) { + logger.error("Shard search failure: {}", f); + } + fail(); + } - if (resp.getShardFailures().length > 0) { - for (ShardSearchFailure f : resp.getShardFailures()) { - logger.error("Shard search failure: {}", f); - } - fail(); - } + assertNotNull("Profile response element should not be null", response.getProfileResults()); + assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); - assertNotNull("Profile response element should not be null", resp.getProfileResults()); - assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); + for (Map.Entry shardResult : response.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } - for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { - for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { - for (ProfileResult result : searchProfiles.getQueryResults()) { - assertNotNull(result.getQueryName()); - assertNotNull(result.getLuceneDescription()); - assertThat(result.getTime(), greaterThan(0L)); - assertNotNull(result.getTimeBreakdown()); + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); + assertThat(result.getTime(), greaterThan(0L)); + } } - - CollectorResult result = searchProfiles.getCollectorResult(); - assertThat(result.getName(), is(not(emptyOrNullString()))); - assertThat(result.getTime(), greaterThan(0L)); } - } + ); } /** @@ -543,8 +539,9 @@ public void testNoProfile() throws Exception { logger.info("Query: {}", q); - SearchResponse resp = prepareSearch().setQuery(q).setProfile(false).get(); - assertThat("Profile response element should be an empty map", resp.getProfileResults().size(), equalTo(0)); + assertResponse( + prepareSearch().setQuery(q).setProfile(false), + response -> assertThat("Profile response element should be an empty map", response.getProfileResults().size(), equalTo(0)) + ); } - } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/ExistsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/ExistsIT.java index 099100a7a67e3..846696c81e288 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/ExistsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/ExistsIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.explain.ExplainResponse; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.SearchHit; @@ -30,7 +29,9 @@ import static java.util.Collections.singletonMap; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCountAndNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; public class ExistsIT extends ESIntegTestCase { @@ -113,46 +114,46 @@ public void testExists() throws Exception { expected.put("vec", 2); final long numDocs = sources.length; - SearchResponse allDocs = prepareSearch("idx").setSize(sources.length).get(); - assertNoFailures(allDocs); - assertHitCount(allDocs, numDocs); - for (Map.Entry entry : expected.entrySet()) { - final String fieldName = entry.getKey(); - final int count = entry.getValue(); - // exists - SearchResponse resp = prepareSearch("idx").setQuery(QueryBuilders.existsQuery(fieldName)).get(); - assertNoFailures(resp); - try { - assertEquals( - String.format( - Locale.ROOT, - "exists(%s, %d) mapping: %s response: %s", - fieldName, - count, - Strings.toString(mapping), - resp - ), - count, - resp.getHits().getTotalHits().value - ); - } catch (AssertionError e) { - for (SearchHit searchHit : allDocs.getHits()) { - final String index = searchHit.getIndex(); - final String id = searchHit.getId(); - final ExplainResponse explanation = client().prepareExplain(index, id) - .setQuery(QueryBuilders.existsQuery(fieldName)) - .get(); - logger.info( - "Explanation for [{}] / [{}] / [{}]: [{}]", - fieldName, - id, - searchHit.getSourceAsString(), - explanation.getExplanation() - ); - } - throw e; + assertNoFailuresAndResponse(prepareSearch("idx").setSize(sources.length), allDocs -> { + assertHitCount(allDocs, numDocs); + for (Map.Entry entry : expected.entrySet()) { + final String fieldName = entry.getKey(); + final int count = entry.getValue(); + // exists + assertNoFailuresAndResponse(prepareSearch("idx").setQuery(QueryBuilders.existsQuery(fieldName)), response -> { + try { + assertEquals( + String.format( + Locale.ROOT, + "exists(%s, %d) mapping: %s response: %s", + fieldName, + count, + Strings.toString(mapping), + response + ), + count, + response.getHits().getTotalHits().value + ); + } catch (AssertionError e) { + for (SearchHit searchHit : allDocs.getHits()) { + final String index = searchHit.getIndex(); + final String id = searchHit.getId(); + final ExplainResponse explanation = client().prepareExplain(index, id) + .setQuery(QueryBuilders.existsQuery(fieldName)) + .get(); + logger.info( + "Explanation for [{}] / [{}] / [{}]: [{}]", + fieldName, + id, + searchHit.getSourceAsString(), + explanation.getExplanation() + ); + } + throw e; + } + }); } - } + }); } public void testFieldAlias() throws Exception { @@ -198,10 +199,7 @@ public void testFieldAlias() throws Exception { for (Map.Entry entry : expected.entrySet()) { String fieldName = entry.getKey(); int expectedCount = entry.getValue(); - - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.existsQuery(fieldName)).get(); - assertNoFailures(response); - assertHitCount(response, expectedCount); + assertHitCountAndNoFailures(prepareSearch("idx").setQuery(QueryBuilders.existsQuery(fieldName)), expectedCount); } } @@ -231,8 +229,6 @@ public void testFieldAliasWithNoDocValues() throws Exception { indexRequests.add(client().prepareIndex("idx").setSource("foo", 43)); indexRandom(true, false, indexRequests); - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.existsQuery("foo-alias")).get(); - assertNoFailures(response); - assertHitCount(response, 2); + assertHitCountAndNoFailures(prepareSearch("idx").setQuery(QueryBuilders.existsQuery("foo-alias")), 2L); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/IntervalQueriesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/IntervalQueriesIT.java index 1e18c0ca3c59c..fd119d3145353 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/IntervalQueriesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/IntervalQueriesIT.java @@ -12,7 +12,6 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.KeywordTokenizer; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.index.analysis.AnalyzerProvider; import org.elasticsearch.index.analysis.AnalyzerScope; import org.elasticsearch.index.query.IntervalQueryBuilder; @@ -30,6 +29,7 @@ import static java.util.Collections.singletonMap; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; public class IntervalQueriesIT extends ESIntegTestCase { @@ -56,10 +56,11 @@ public void testEmptyIntervalsWithNestedMappings() throws InterruptedException { client().prepareIndex("nested").setId("3").setSource("text", "quick") ); - SearchResponse resp = prepareSearch("nested").setQuery( - new IntervalQueryBuilder("empty_text", new IntervalsSourceProvider.Match("an empty query", 0, true, null, null, null)) - ).get(); - assertEquals(0, resp.getFailedShards()); + assertNoFailures( + prepareSearch("nested").setQuery( + new IntervalQueryBuilder("empty_text", new IntervalsSourceProvider.Match("an empty query", 0, true, null, null, null)) + ) + ); } private static class EmptyAnalyzer extends Analyzer { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/MultiMatchQueryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/MultiMatchQueryIT.java index f251ab5cb6269..26c2da7736f6c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/MultiMatchQueryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/MultiMatchQueryIT.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; @@ -53,6 +54,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHitsWithoutFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; @@ -267,72 +270,91 @@ private XContentBuilder createMapping() throws IOException { public void testDefaults() throws ExecutionException, InterruptedException { MatchQueryParser.Type type = MatchQueryParser.Type.BOOLEAN; - SearchResponse searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category").operator(Operator.OR) - ) - ).get(); - Set topNIds = Sets.newHashSet("theone", "theother"); - for (int i = 0; i < searchResponse.getHits().getHits().length; i++) { - topNIds.remove(searchResponse.getHits().getAt(i).getId()); - // very likely that we hit a random doc that has the same score so orders are random since - // the doc id is the tie-breaker - } - assertThat(topNIds, empty()); - assertThat(searchResponse.getHits().getHits()[0].getScore(), greaterThan(searchResponse.getHits().getHits()[1].getScore())); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category").operator(Operator.OR) - .type(type) - ) - ).get(); - assertFirstHit(searchResponse, anyOf(hasId("theone"), hasId("theother"))); - assertThat(searchResponse.getHits().getHits()[0].getScore(), greaterThan(searchResponse.getHits().getHits()[1].getScore())); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("marvel hero", "full_name", "first_name", "last_name", "category").operator(Operator.OR).type(type) - ) - ).get(); - assertFirstHit(searchResponse, hasId("theother")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category").operator(Operator.AND).type(type) - ) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category").operator(Operator.AND).type(type) - ) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theone")); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category").operator(Operator.OR) + ) + ), + response -> { + Set topNIds = Sets.newHashSet("theone", "theother"); + for (int i = 0; i < response.getHits().getHits().length; i++) { + topNIds.remove(response.getHits().getAt(i).getId()); + // very likely that we hit a random doc that has the same score so orders are random since + // the doc id is the tie-breaker + } + assertThat(topNIds, empty()); + assertThat(response.getHits().getHits()[0].getScore(), greaterThan(response.getHits().getHits()[1].getScore())); + } + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category").operator(Operator.OR) + .type(type) + ) + ), + response -> { + assertFirstHit(response, anyOf(hasId("theone"), hasId("theother"))); + assertThat(response.getHits().getHits()[0].getScore(), greaterThan(response.getHits().getHits()[1].getScore())); + } + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("marvel hero", "full_name", "first_name", "last_name", "category").operator(Operator.OR).type(type) + ) + ), + response -> assertFirstHit(response, hasId("theother")) + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category").operator(Operator.AND).type(type) + ) + ), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("theone")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category").operator(Operator.AND).type(type) + ) + ), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("theone")); + } + ); } public void testPhraseType() { - SearchResponse searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("Man the Ultimate", "full_name_phrase", "first_name_phrase", "last_name_phrase", "category_phrase") - .operator(Operator.OR) - .type(MatchQueryParser.Type.PHRASE) - ) - ).get(); - assertFirstHit(searchResponse, hasId("ultimate2")); - assertHitCount(searchResponse, 1L); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("Captain", "full_name_phrase", "first_name_phrase", "last_name_phrase", "category_phrase").operator( - Operator.OR - ).type(MatchQueryParser.Type.PHRASE) - ) - ).get(); - assertThat(searchResponse.getHits().getTotalHits().value, greaterThan(1L)); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("Man the Ultimate", "full_name_phrase", "first_name_phrase", "last_name_phrase", "category_phrase") + .operator(Operator.OR) + .type(MatchQueryParser.Type.PHRASE) + ) + ), + response -> { + assertFirstHit(response, hasId("ultimate2")); + assertHitCount(response, 1L); + } + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("Captain", "full_name_phrase", "first_name_phrase", "last_name_phrase", "category_phrase").operator( + Operator.OR + ).type(MatchQueryParser.Type.PHRASE) + ) + ), + response -> assertThat(response.getHits().getTotalHits().value, greaterThan(1L)) + ); assertSearchHitsWithoutFailures( prepareSearch("test").setQuery( @@ -348,14 +370,15 @@ public void testPhraseType() { } public void testSingleField() throws NoSuchFieldException, IllegalAccessException { - SearchResponse searchResponse = prepareSearch("test").setQuery(randomizeType(multiMatchQuery("15", "skill"))).get(); - assertNoFailures(searchResponse); - assertFirstHit(searchResponse, hasId("theone")); + assertNoFailuresAndResponse( + prepareSearch("test").setQuery(randomizeType(multiMatchQuery("15", "skill"))), + response -> assertFirstHit(response, hasId("theone")) + ); - searchResponse = prepareSearch("test").setQuery(randomizeType(multiMatchQuery("15", "skill", "int-field")).analyzer("category")) - .get(); - assertNoFailures(searchResponse); - assertFirstHit(searchResponse, hasId("theone")); + assertNoFailuresAndResponse( + prepareSearch("test").setQuery(randomizeType(multiMatchQuery("15", "skill", "int-field")).analyzer("category")), + response -> assertFirstHit(response, hasId("theone")) + ); String[] fields = { "full_name", @@ -393,34 +416,39 @@ public void testSingleField() throws NoSuchFieldException, IllegalAccessExceptio builder.append(RandomPicks.randomFrom(random(), query)).append(" "); } MultiMatchQueryBuilder multiMatchQueryBuilder = randomizeType(multiMatchQuery(builder.toString(), field)); - SearchResponse multiMatchResp = prepareSearch("test") - // id sort field is a tie, in case hits have the same score, - // the hits will be sorted the same consistently - .addSort("_score", SortOrder.DESC) - .addSort("id", SortOrder.ASC) - .setQuery(multiMatchQueryBuilder) - .get(); - MatchQueryBuilder matchQueryBuilder = QueryBuilders.matchQuery(field, builder.toString()); - - SearchResponse matchResp = prepareSearch("test") - // id tie sort - .addSort("_score", SortOrder.DESC) - .addSort("id", SortOrder.ASC) - .setQuery(matchQueryBuilder) - .get(); - assertThat( - "field: " + field + " query: " + builder.toString(), - multiMatchResp.getHits().getTotalHits().value, - equalTo(matchResp.getHits().getTotalHits().value) + assertResponse( + prepareSearch("test") + // id sort field is a tie, in case hits have the same score, + // the hits will be sorted the same consistently + .addSort("_score", SortOrder.DESC) + .addSort("id", SortOrder.ASC) + .setQuery(multiMatchQueryBuilder), + multiMatchResp -> { + MatchQueryBuilder matchQueryBuilder = QueryBuilders.matchQuery(field, builder.toString()); + assertResponse( + prepareSearch("test") + // id tie sort + .addSort("_score", SortOrder.DESC) + .addSort("id", SortOrder.ASC) + .setQuery(matchQueryBuilder), + matchResp -> { + assertThat( + "field: " + field + " query: " + builder.toString(), + multiMatchResp.getHits().getTotalHits().value, + equalTo(matchResp.getHits().getTotalHits().value) + ); + SearchHits hits = multiMatchResp.getHits(); + if (field.startsWith("missing")) { + assertEquals(0, hits.getHits().length); + } + for (int j = 0; j < hits.getHits().length; j++) { + assertThat(hits.getHits()[j].getScore(), equalTo(matchResp.getHits().getHits()[j].getScore())); + assertThat(hits.getHits()[j].getId(), equalTo(matchResp.getHits().getHits()[j].getId())); + } + } + ); + } ); - SearchHits hits = multiMatchResp.getHits(); - if (field.startsWith("missing")) { - assertEquals(0, hits.getHits().length); - } - for (int j = 0; j < hits.getHits().length; j++) { - assertThat(hits.getHits()[j].getScore(), equalTo(matchResp.getHits().getHits()[j].getScore())); - assertThat(hits.getHits()[j].getId(), equalTo(matchResp.getHits().getHits()[j].getId())); - } } } @@ -435,23 +463,24 @@ public void testEquivalence() { MultiMatchQueryBuilder multiMatchQueryBuilder = randomBoolean() ? multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category") : multiMatchQuery("marvel hero captain america", "*_name", randomBoolean() ? "category" : "categ*"); - SearchResponse left = prepareSearch("test").setSize(numDocs) - .addSort(SortBuilders.scoreSort()) - .addSort(SortBuilders.fieldSort("id")) - .setQuery(randomizeType(multiMatchQueryBuilder.operator(Operator.OR).type(type))) - .get(); - - SearchResponse right = prepareSearch("test").setSize(numDocs) - .addSort(SortBuilders.scoreSort()) - .addSort(SortBuilders.fieldSort("id")) - .setQuery( - disMaxQuery().add(matchQuery("full_name", "marvel hero captain america")) - .add(matchQuery("first_name", "marvel hero captain america")) - .add(matchQuery("last_name", "marvel hero captain america")) - .add(matchQuery("category", "marvel hero captain america")) + assertResponse( + prepareSearch("test").setSize(numDocs) + .addSort(SortBuilders.scoreSort()) + .addSort(SortBuilders.fieldSort("id")) + .setQuery(randomizeType(multiMatchQueryBuilder.operator(Operator.OR).type(type))), + left -> assertResponse( + prepareSearch("test").setSize(numDocs) + .addSort(SortBuilders.scoreSort()) + .addSort(SortBuilders.fieldSort("id")) + .setQuery( + disMaxQuery().add(matchQuery("full_name", "marvel hero captain america")) + .add(matchQuery("first_name", "marvel hero captain america")) + .add(matchQuery("last_name", "marvel hero captain america")) + .add(matchQuery("category", "marvel hero captain america")) + ), + right -> assertEquivalent("marvel hero captain america", left, right) ) - .get(); - assertEquivalent("marvel hero captain america", left, right); + ); } { @@ -461,64 +490,68 @@ public void testEquivalence() { MultiMatchQueryBuilder multiMatchQueryBuilder = randomBoolean() ? multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category") : multiMatchQuery("captain america", "*_name", randomBoolean() ? "category" : "categ*"); - SearchResponse left = prepareSearch("test").setSize(numDocs) - .addSort(SortBuilders.scoreSort()) - .addSort(SortBuilders.fieldSort("id")) - .setQuery( - randomizeType(multiMatchQueryBuilder.operator(op).tieBreaker(1.0f).minimumShouldMatch(minShouldMatch).type(type)) - ) - .get(); - - SearchResponse right = prepareSearch("test").setSize(numDocs) - .addSort(SortBuilders.scoreSort()) - .addSort(SortBuilders.fieldSort("id")) - .setQuery( - boolQuery().minimumShouldMatch(minShouldMatch) - .should( - randomBoolean() - ? termQuery("full_name", "captain america") - : matchQuery("full_name", "captain america").operator(op) + assertResponse( + prepareSearch("test").setSize(numDocs) + .addSort(SortBuilders.scoreSort()) + .addSort(SortBuilders.fieldSort("id")) + .setQuery( + randomizeType( + multiMatchQueryBuilder.operator(op).tieBreaker(1.0f).minimumShouldMatch(minShouldMatch).type(type) ) - .should(matchQuery("first_name", "captain america").operator(op)) - .should(matchQuery("last_name", "captain america").operator(op)) - .should(matchQuery("category", "captain america").operator(op)) + ), + left -> assertResponse( + prepareSearch("test").setSize(numDocs) + .addSort(SortBuilders.scoreSort()) + .addSort(SortBuilders.fieldSort("id")) + .setQuery( + boolQuery().minimumShouldMatch(minShouldMatch) + .should( + randomBoolean() + ? termQuery("full_name", "captain america") + : matchQuery("full_name", "captain america").operator(op) + ) + .should(matchQuery("first_name", "captain america").operator(op)) + .should(matchQuery("last_name", "captain america").operator(op)) + .should(matchQuery("category", "captain america").operator(op)) + ), + right -> assertEquivalent("captain america", left, right) ) - .get(); - assertEquivalent("captain america", left, right); + ); } { String minShouldMatch = randomBoolean() ? null : "" + between(0, 1); - SearchResponse left = prepareSearch("test").setSize(numDocs) - .addSort(SortBuilders.scoreSort()) - .addSort(SortBuilders.fieldSort("id")) - .setQuery( - randomizeType( - multiMatchQuery("capta", "full_name", "first_name", "last_name", "category").type( - MatchQueryParser.Type.PHRASE_PREFIX - ).tieBreaker(1.0f).minimumShouldMatch(minShouldMatch) - ) - ) - .get(); - - SearchResponse right = prepareSearch("test").setSize(numDocs) - .addSort(SortBuilders.scoreSort()) - .addSort(SortBuilders.fieldSort("id")) - .setQuery( - boolQuery().minimumShouldMatch(minShouldMatch) - .should(matchPhrasePrefixQuery("full_name", "capta")) - .should(matchPhrasePrefixQuery("first_name", "capta")) - .should(matchPhrasePrefixQuery("last_name", "capta")) - .should(matchPhrasePrefixQuery("category", "capta")) + assertResponse( + prepareSearch("test").setSize(numDocs) + .addSort(SortBuilders.scoreSort()) + .addSort(SortBuilders.fieldSort("id")) + .setQuery( + randomizeType( + multiMatchQuery("capta", "full_name", "first_name", "last_name", "category").type( + MatchQueryParser.Type.PHRASE_PREFIX + ).tieBreaker(1.0f).minimumShouldMatch(minShouldMatch) + ) + ), + left -> assertResponse( + prepareSearch("test").setSize(numDocs) + .addSort(SortBuilders.scoreSort()) + .addSort(SortBuilders.fieldSort("id")) + .setQuery( + boolQuery().minimumShouldMatch(minShouldMatch) + .should(matchPhrasePrefixQuery("full_name", "capta")) + .should(matchPhrasePrefixQuery("first_name", "capta")) + .should(matchPhrasePrefixQuery("last_name", "capta")) + .should(matchPhrasePrefixQuery("category", "capta")) + ), + right -> assertEquivalent("capta", left, right) ) - .get(); - assertEquivalent("capta", left, right); + ); } { String minShouldMatch = randomBoolean() ? null : "" + between(0, 1); - SearchResponse left; + SearchRequestBuilder leftSearch; if (randomBoolean()) { - left = prepareSearch("test").setSize(numDocs) + leftSearch = prepareSearch("test").setSize(numDocs) .addSort(SortBuilders.scoreSort()) .addSort(SortBuilders.fieldSort("id")) .setQuery( @@ -527,10 +560,9 @@ public void testEquivalence() { MatchQueryParser.Type.PHRASE ).minimumShouldMatch(minShouldMatch) ) - ) - .get(); + ); } else { - left = prepareSearch("test").setSize(numDocs) + leftSearch = prepareSearch("test").setSize(numDocs) .addSort(SortBuilders.scoreSort()) .addSort(SortBuilders.fieldSort("id")) .setQuery( @@ -539,163 +571,206 @@ public void testEquivalence() { MatchQueryParser.Type.PHRASE ).tieBreaker(1.0f).minimumShouldMatch(minShouldMatch) ) - ) - .get(); + ); } - SearchResponse right = prepareSearch("test").setSize(numDocs) - .addSort(SortBuilders.scoreSort()) - .addSort(SortBuilders.fieldSort("id")) - .setQuery( - boolQuery().minimumShouldMatch(minShouldMatch) - .should(matchPhraseQuery("full_name", "captain america")) - .should(matchPhraseQuery("first_name", "captain america")) - .should(matchPhraseQuery("last_name", "captain america")) - .should(matchPhraseQuery("category", "captain america")) + assertResponse( + leftSearch, + left -> assertResponse( + prepareSearch("test").setSize(numDocs) + .addSort(SortBuilders.scoreSort()) + .addSort(SortBuilders.fieldSort("id")) + .setQuery( + boolQuery().minimumShouldMatch(minShouldMatch) + .should(matchPhraseQuery("full_name", "captain america")) + .should(matchPhraseQuery("first_name", "captain america")) + .should(matchPhraseQuery("last_name", "captain america")) + .should(matchPhraseQuery("category", "captain america")) + ), + right -> assertEquivalent("captain america", left, right) ) - .get(); - assertEquivalent("captain america", left, right); + ); } } } public void testCrossFieldMode() throws ExecutionException, InterruptedException { - SearchResponse searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("captain america", "full_name", "first_name", "last_name").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) - .operator(Operator.OR) - ) - ).get(); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category").type( - MultiMatchQueryBuilder.Type.CROSS_FIELDS - ).operator(Operator.OR) - ) - ).get(); - assertFirstHit(searchResponse, hasId("theother")); - assertSecondHit(searchResponse, hasId("theone")); - assertThat(searchResponse.getHits().getHits()[0].getScore(), greaterThan(searchResponse.getHits().getHits()[1].getScore())); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("marvel hero", "full_name", "first_name", "last_name", "category").type( - MultiMatchQueryBuilder.Type.CROSS_FIELDS - ).operator(Operator.OR) - ) - ).get(); - assertFirstHit(searchResponse, hasId("theother")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category").type( - MultiMatchQueryBuilder.Type.CROSS_FIELDS - ).operator(Operator.AND) - ) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("captain america 15", "full_name", "first_name", "last_name", "category", "skill").type( - MultiMatchQueryBuilder.Type.CROSS_FIELDS - ).analyzer("category").lenient(true).operator(Operator.AND) - ) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("captain america 15", "full_name", "first_name", "last_name", "category", "skill", "int-field").type( - MultiMatchQueryBuilder.Type.CROSS_FIELDS - ).analyzer("category").lenient(true).operator(Operator.AND) - ) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("captain america 15", "skill", "full_name", "first_name", "last_name", "category", "int-field").type( - MultiMatchQueryBuilder.Type.CROSS_FIELDS - ).analyzer("category").lenient(true).operator(Operator.AND) - ) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("captain america 15", "first_name", "last_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) - .lenient(true) - .analyzer("category") - ) - ).get(); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType(multiMatchQuery("15", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).analyzer("category")) - ).get(); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType(multiMatchQuery("25 15", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).analyzer("category")) - ).get(); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("25 15", "int-field", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).analyzer("category") - ) - ).get(); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("25 15", "first_name", "int-field", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) - .analyzer("category") - ) - ).get(); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("25 15", "int-field", "skill", "first_name").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) - .analyzer("category") - ) - ).get(); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("25 15", "int-field", "first_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) - .analyzer("category") - ) - ).get(); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("captain america marvel hero", "first_name", "last_name", "category").type( - MultiMatchQueryBuilder.Type.CROSS_FIELDS - ).analyzer("category").operator(Operator.OR) - ) - ).get(); - assertFirstHit(searchResponse, hasId("theone")); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("captain america", "full_name", "first_name", "last_name").type( + MultiMatchQueryBuilder.Type.CROSS_FIELDS + ).operator(Operator.OR) + ) + ), + response -> assertFirstHit(response, hasId("theone")) + ); + + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category").type( + MultiMatchQueryBuilder.Type.CROSS_FIELDS + ).operator(Operator.OR) + ) + ), + response -> { + assertFirstHit(response, hasId("theother")); + assertSecondHit(response, hasId("theone")); + assertThat(response.getHits().getHits()[0].getScore(), greaterThan(response.getHits().getHits()[1].getScore())); + } + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("marvel hero", "full_name", "first_name", "last_name", "category").type( + MultiMatchQueryBuilder.Type.CROSS_FIELDS + ).operator(Operator.OR) + ) + ), + response -> assertFirstHit(response, hasId("theother")) + ); + + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category").type( + MultiMatchQueryBuilder.Type.CROSS_FIELDS + ).operator(Operator.AND) + ) + ), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("theone")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("captain america 15", "full_name", "first_name", "last_name", "category", "skill").type( + MultiMatchQueryBuilder.Type.CROSS_FIELDS + ).analyzer("category").lenient(true).operator(Operator.AND) + ) + ), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("theone")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("captain america 15", "full_name", "first_name", "last_name", "category", "skill", "int-field").type( + MultiMatchQueryBuilder.Type.CROSS_FIELDS + ).analyzer("category").lenient(true).operator(Operator.AND) + ) + ), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("theone")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("captain america 15", "skill", "full_name", "first_name", "last_name", "category", "int-field").type( + MultiMatchQueryBuilder.Type.CROSS_FIELDS + ).analyzer("category").lenient(true).operator(Operator.AND) + ) + ), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("theone")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("captain america 15", "first_name", "last_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) + .lenient(true) + .analyzer("category") + ) + ), + response -> assertFirstHit(response, hasId("theone")) + ); + + assertResponse( + prepareSearch("test").setQuery( + randomizeType(multiMatchQuery("15", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).analyzer("category")) + ), + response -> assertFirstHit(response, hasId("theone")) + ); + + assertResponse( + prepareSearch("test").setQuery( + randomizeType(multiMatchQuery("25 15", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).analyzer("category")) + ), + response -> assertFirstHit(response, hasId("theone")) + ); + + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("25 15", "int-field", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).analyzer("category") + ) + ), + response -> assertFirstHit(response, hasId("theone")) + ); + + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("25 15", "first_name", "int-field", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) + .analyzer("category") + ) + ), + response -> assertFirstHit(response, hasId("theone")) + ); + + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("25 15", "int-field", "skill", "first_name").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) + .analyzer("category") + ) + ), + response -> assertFirstHit(response, hasId("theone")) + ); + + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("25 15", "int-field", "first_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) + .analyzer("category") + ) + ), + response -> assertFirstHit(response, hasId("theone")) + ); + + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("captain america marvel hero", "first_name", "last_name", "category").type( + MultiMatchQueryBuilder.Type.CROSS_FIELDS + ).analyzer("category").operator(Operator.OR) + ) + ), + response -> assertFirstHit(response, hasId("theone")) + ); // test group based on analyzer -- all fields are grouped into a cross field search - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("captain america marvel hero", "first_name", "last_name", "category").type( - MultiMatchQueryBuilder.Type.CROSS_FIELDS - ).analyzer("category").operator(Operator.AND) - ) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theone")); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("captain america marvel hero", "first_name", "last_name", "category").type( + MultiMatchQueryBuilder.Type.CROSS_FIELDS + ).analyzer("category").operator(Operator.AND) + ) + ), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("theone")); + } + ); // counter example assertHitCount( prepareSearch("test").setQuery( @@ -721,83 +796,112 @@ public void testCrossFieldMode() throws ExecutionException, InterruptedException ); // test if boosts work - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("the ultimate", "full_name", "first_name", "category").field("last_name", 10) - .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) - .operator(Operator.AND) - ) - ).get(); - assertHitCount(searchResponse, 2L); - assertFirstHit(searchResponse, hasId("ultimate1")); // has ultimate in the last_name and that is boosted - assertSecondHit(searchResponse, hasId("ultimate2")); - assertThat(searchResponse.getHits().getHits()[0].getScore(), greaterThan(searchResponse.getHits().getHits()[1].getScore())); - + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("the ultimate", "full_name", "first_name", "category").field("last_name", 10) + .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) + .operator(Operator.AND) + ) + ), + response -> { + assertHitCount(response, 2L); + assertFirstHit(response, hasId("ultimate1")); // has ultimate in the last_name and that is boosted + assertSecondHit(response, hasId("ultimate2")); + assertThat(response.getHits().getHits()[0].getScore(), greaterThan(response.getHits().getHits()[1].getScore())); + } + ); // since we try to treat the matching fields as one field scores are very similar but we have a small bias towards the // more frequent field that acts as a tie-breaker internally - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("the ultimate", "full_name", "first_name", "last_name", "category").type( - MultiMatchQueryBuilder.Type.CROSS_FIELDS - ).operator(Operator.AND) - ) - ).get(); - assertHitCount(searchResponse, 2L); - assertFirstHit(searchResponse, hasId("ultimate2")); - assertSecondHit(searchResponse, hasId("ultimate1")); - assertThat(searchResponse.getHits().getHits()[0].getScore(), greaterThan(searchResponse.getHits().getHits()[1].getScore())); - + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("the ultimate", "full_name", "first_name", "last_name", "category").type( + MultiMatchQueryBuilder.Type.CROSS_FIELDS + ).operator(Operator.AND) + ) + ), + response -> { + assertHitCount(response, 2L); + assertFirstHit(response, hasId("ultimate2")); + assertSecondHit(response, hasId("ultimate1")); + assertThat(response.getHits().getHits()[0].getScore(), greaterThan(response.getHits().getHits()[1].getScore())); + } + ); // Test group based on numeric fields - searchResponse = prepareSearch("test").setQuery( - randomizeType(multiMatchQuery("15", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType(multiMatchQuery("15", "skill", "first_name").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theone")); - + assertResponse( + prepareSearch("test").setQuery(randomizeType(multiMatchQuery("15", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS))), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("theone")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType(multiMatchQuery("15", "skill", "first_name").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)) + ), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("theone")); + } + ); // Two numeric fields together caused trouble at one point! - searchResponse = prepareSearch("test").setQuery( - randomizeType(multiMatchQuery("15", "int-field", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType(multiMatchQuery("15", "int-field", "first_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType(multiMatchQuery("alpha 15", "first_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).lenient(true)) - ).get(); - /* - * Doesn't find the one because "alpha 15" isn't a number and we don't - * break on spaces. - */ - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("ultimate1")); - + assertResponse( + prepareSearch("test").setQuery( + randomizeType(multiMatchQuery("15", "int-field", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)) + ), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("theone")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType(multiMatchQuery("15", "int-field", "first_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)) + ), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("theone")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("alpha 15", "first_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).lenient(true) + ) + ), + response -> { + /* + * Doesn't find the one because "alpha 15" isn't a number and we don't + * break on spaces. + */ + assertHitCount(response, 1L); + assertFirstHit(response, hasId("ultimate1")); + } + ); // Lenient wasn't always properly lenient with two numeric fields - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("alpha 15", "int-field", "first_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).lenient(true) - ) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("ultimate1")); - + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("alpha 15", "int-field", "first_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) + .lenient(true) + ) + ), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("ultimate1")); + } + ); // Check that cross fields works with date fields - searchResponse = prepareSearch("test").setQuery( - randomizeType(multiMatchQuery("now", "f*", "date").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)).lenient(true) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("nowHero")); + assertResponse( + prepareSearch("test").setQuery( + randomizeType(multiMatchQuery("now", "f*", "date").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)).lenient(true) + ), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("nowHero")); + } + ); } /** @@ -819,14 +923,21 @@ public void testFuzzyFieldLevelBoosting() throws InterruptedException, Execution builders.add(client().prepareIndex(idx).setId("2").setSource("title", "bar", "body", "foo")); indexRandom(true, false, builders); - SearchResponse searchResponse = prepareSearch(idx).setExplain(true) - .setQuery(multiMatchQuery("foo").field("title", 100).field("body").fuzziness(Fuzziness.ZERO)) - .get(); - SearchHit[] hits = searchResponse.getHits().getHits(); - assertNotEquals("both documents should be on different shards", hits[0].getShard().getShardId(), hits[1].getShard().getShardId()); - assertEquals("1", hits[0].getId()); - assertEquals("2", hits[1].getId()); - assertThat(hits[0].getScore(), greaterThan(hits[1].getScore())); + assertResponse( + prepareSearch(idx).setExplain(true) + .setQuery(multiMatchQuery("foo").field("title", 100).field("body").fuzziness(Fuzziness.ZERO)), + response -> { + SearchHit[] hits = response.getHits().getHits(); + assertNotEquals( + "both documents should be on different shards", + hits[0].getShard().getShardId(), + hits[1].getShard().getShardId() + ); + assertEquals("1", hits[0].getId()); + assertEquals("2", hits[1].getId()); + assertThat(hits[0].getScore(), greaterThan(hits[1].getScore())); + } + ); } private static void assertEquivalent(String query, SearchResponse left, SearchResponse right) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/QueryStringIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/QueryStringIT.java index 882e18eb593aa..f101106917184 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/QueryStringIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/QueryStringIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.Operator; import org.elasticsearch.search.SearchHit; @@ -28,7 +27,8 @@ import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery; import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -49,17 +49,18 @@ public void testBasicAllQuery() throws Exception { reqs.add(client().prepareIndex("test").setId("3").setSource("f3", "foo bar baz")); indexRandom(true, false, reqs); - SearchResponse resp = prepareSearch("test").setQuery(queryStringQuery("foo")).get(); - assertHitCount(resp, 2L); - assertHits(resp.getHits(), "1", "3"); - - resp = prepareSearch("test").setQuery(queryStringQuery("bar")).get(); - assertHitCount(resp, 2L); - assertHits(resp.getHits(), "1", "3"); - - resp = prepareSearch("test").setQuery(queryStringQuery("Bar")).get(); - assertHitCount(resp, 3L); - assertHits(resp.getHits(), "1", "2", "3"); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("foo")), response -> { + assertHitCount(response, 2L); + assertHits(response.getHits(), "1", "3"); + }); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("bar")), response -> { + assertHitCount(response, 2L); + assertHits(response.getHits(), "1", "3"); + }); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("Bar")), response -> { + assertHitCount(response, 3L); + assertHits(response.getHits(), "1", "2", "3"); + }); } public void testWithDate() throws Exception { @@ -68,21 +69,22 @@ public void testWithDate() throws Exception { reqs.add(client().prepareIndex("test").setId("2").setSource("f1", "bar", "f_date", "2015/09/01")); indexRandom(true, false, reqs); - SearchResponse resp = prepareSearch("test").setQuery(queryStringQuery("foo bar")).get(); - assertHits(resp.getHits(), "1", "2"); - assertHitCount(resp, 2L); - - resp = prepareSearch("test").setQuery(queryStringQuery("\"2015/09/02\"")).get(); - assertHits(resp.getHits(), "1"); - assertHitCount(resp, 1L); - - resp = prepareSearch("test").setQuery(queryStringQuery("bar \"2015/09/02\"")).get(); - assertHits(resp.getHits(), "1", "2"); - assertHitCount(resp, 2L); - - resp = prepareSearch("test").setQuery(queryStringQuery("\"2015/09/02\" \"2015/09/01\"")).get(); - assertHits(resp.getHits(), "1", "2"); - assertHitCount(resp, 2L); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("foo bar")), response -> { + assertHits(response.getHits(), "1", "2"); + assertHitCount(response, 2L); + }); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("\"2015/09/02\"")), response -> { + assertHits(response.getHits(), "1"); + assertHitCount(response, 1L); + }); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("bar \"2015/09/02\"")), response -> { + assertHits(response.getHits(), "1", "2"); + assertHitCount(response, 2L); + }); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("\"2015/09/02\" \"2015/09/01\"")), response -> { + assertHits(response.getHits(), "1", "2"); + assertHitCount(response, 2L); + }); } public void testWithLotsOfTypes() throws Exception { @@ -95,21 +97,22 @@ public void testWithLotsOfTypes() throws Exception { ); indexRandom(true, false, reqs); - SearchResponse resp = prepareSearch("test").setQuery(queryStringQuery("foo bar")).get(); - assertHits(resp.getHits(), "1", "2"); - assertHitCount(resp, 2L); - - resp = prepareSearch("test").setQuery(queryStringQuery("\"2015/09/02\"")).get(); - assertHits(resp.getHits(), "1"); - assertHitCount(resp, 1L); - - resp = prepareSearch("test").setQuery(queryStringQuery("127.0.0.2 \"2015/09/02\"")).get(); - assertHits(resp.getHits(), "1", "2"); - assertHitCount(resp, 2L); - - resp = prepareSearch("test").setQuery(queryStringQuery("127.0.0.1 OR 1.8")).get(); - assertHits(resp.getHits(), "1", "2"); - assertHitCount(resp, 2L); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("foo bar")), response -> { + assertHits(response.getHits(), "1", "2"); + assertHitCount(response, 2L); + }); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("\"2015/09/02\"")), response -> { + assertHits(response.getHits(), "1"); + assertHitCount(response, 1L); + }); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("127.0.0.2 \"2015/09/02\"")), response -> { + assertHits(response.getHits(), "1", "2"); + assertHitCount(response, 2L); + }); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("127.0.0.1 OR 1.8")), response -> { + assertHits(response.getHits(), "1", "2"); + assertHitCount(response, 2L); + }); } public void testDocWithAllTypes() throws Exception { @@ -118,32 +121,20 @@ public void testDocWithAllTypes() throws Exception { reqs.add(client().prepareIndex("test").setId("1").setSource(docBody, XContentType.JSON)); indexRandom(true, false, reqs); - SearchResponse resp = prepareSearch("test").setQuery(queryStringQuery("foo")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(queryStringQuery("Bar")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(queryStringQuery("Baz")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(queryStringQuery("19")).get(); - assertHits(resp.getHits(), "1"); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("foo")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("Bar")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("Baz")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("19")), response -> assertHits(response.getHits(), "1")); // nested doesn't match because it's hidden - resp = prepareSearch("test").setQuery(queryStringQuery("1476383971")).get(); - assertHits(resp.getHits(), "1"); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("1476383971")), response -> assertHits(response.getHits(), "1")); // bool doesn't match - resp = prepareSearch("test").setQuery(queryStringQuery("7")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(queryStringQuery("23")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(queryStringQuery("1293")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(queryStringQuery("42")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(queryStringQuery("1.7")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(queryStringQuery("1.5")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(queryStringQuery("127.0.0.1")).get(); - assertHits(resp.getHits(), "1"); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("7")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("23")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("1293")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("42")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("1.7")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("1.5")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("127.0.0.1")), response -> assertHits(response.getHits(), "1")); // binary doesn't match // suggest doesn't match // geo_point doesn't match @@ -156,17 +147,18 @@ public void testKeywordWithWhitespace() throws Exception { reqs.add(client().prepareIndex("test").setId("3").setSource("f1", "foo bar")); indexRandom(true, false, reqs); - SearchResponse resp = prepareSearch("test").setQuery(queryStringQuery("foo")).get(); - assertHits(resp.getHits(), "3"); - assertHitCount(resp, 1L); - - resp = prepareSearch("test").setQuery(queryStringQuery("bar")).get(); - assertHits(resp.getHits(), "2", "3"); - assertHitCount(resp, 2L); - - resp = prepareSearch("test").setQuery(queryStringQuery("Foo Bar")).get(); - assertHits(resp.getHits(), "1", "2", "3"); - assertHitCount(resp, 3L); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("foo")), response -> { + assertHits(response.getHits(), "3"); + assertHitCount(response, 1L); + }); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("bar")), response -> { + assertHits(response.getHits(), "2", "3"); + assertHitCount(response, 2L); + }); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("Foo Bar")), response -> { + assertHits(response.getHits(), "1", "2", "3"); + assertHitCount(response, 3L); + }); } public void testAllFields() throws Exception { @@ -182,9 +174,10 @@ public void testAllFields() throws Exception { assertHitCount(prepareSearch("test_1").setQuery(queryStringQuery("foo eggplant").defaultOperator(Operator.AND)), 0L); - SearchResponse resp = prepareSearch("test_1").setQuery(queryStringQuery("foo eggplant").defaultOperator(Operator.OR)).get(); - assertHits(resp.getHits(), "1"); - assertHitCount(resp, 1L); + assertResponse(prepareSearch("test_1").setQuery(queryStringQuery("foo eggplant").defaultOperator(Operator.OR)), response -> { + assertHits(response.getHits(), "1"); + assertHitCount(response, 1L); + }); } public void testPhraseQueryOnFieldWithNoPositions() throws Exception { @@ -227,11 +220,10 @@ public void testFieldAlias() throws Exception { indexRequests.add(client().prepareIndex("test").setId("3").setSource("f3", "another value", "f2", "three")); indexRandom(true, false, indexRequests); - SearchResponse response = prepareSearch("test").setQuery(queryStringQuery("value").field("f3_alias")).get(); - - assertNoFailures(response); - assertHitCount(response, 2); - assertHits(response.getHits(), "2", "3"); + assertNoFailuresAndResponse(prepareSearch("test").setQuery(queryStringQuery("value").field("f3_alias")), response -> { + assertHitCount(response, 2); + assertHits(response.getHits(), "2", "3"); + }); } public void testFieldAliasWithEmbeddedFieldNames() throws Exception { @@ -241,11 +233,10 @@ public void testFieldAliasWithEmbeddedFieldNames() throws Exception { indexRequests.add(client().prepareIndex("test").setId("3").setSource("f3", "another value", "f2", "three")); indexRandom(true, false, indexRequests); - SearchResponse response = prepareSearch("test").setQuery(queryStringQuery("f3_alias:value AND f2:three")).get(); - - assertNoFailures(response); - assertHitCount(response, 1); - assertHits(response.getHits(), "3"); + assertNoFailuresAndResponse(prepareSearch("test").setQuery(queryStringQuery("f3_alias:value AND f2:three")), response -> { + assertHitCount(response, 1); + assertHits(response.getHits(), "3"); + }); } public void testFieldAliasWithWildcardField() throws Exception { @@ -255,11 +246,10 @@ public void testFieldAliasWithWildcardField() throws Exception { indexRequests.add(client().prepareIndex("test").setId("3").setSource("f3", "another value", "f2", "three")); indexRandom(true, false, indexRequests); - SearchResponse response = prepareSearch("test").setQuery(queryStringQuery("value").field("f3_*")).get(); - - assertNoFailures(response); - assertHitCount(response, 2); - assertHits(response.getHits(), "2", "3"); + assertNoFailuresAndResponse(prepareSearch("test").setQuery(queryStringQuery("value").field("f3_*")), response -> { + assertHitCount(response, 2); + assertHits(response.getHits(), "2", "3"); + }); } public void testFieldAliasOnDisallowedFieldType() throws Exception { @@ -269,11 +259,10 @@ public void testFieldAliasOnDisallowedFieldType() throws Exception { // The wildcard field matches aliases for both a text and geo_point field. // By default, the geo_point field should be ignored when building the query. - SearchResponse response = prepareSearch("test").setQuery(queryStringQuery("text").field("f*_alias")).get(); - - assertNoFailures(response); - assertHitCount(response, 1); - assertHits(response.getHits(), "1"); + assertNoFailuresAndResponse(prepareSearch("test").setQuery(queryStringQuery("text").field("f*_alias")), response -> { + assertHitCount(response, 1); + assertHits(response.getHits(), "1"); + }); } private void assertHits(SearchHits hits, String... ids) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/ScriptScoreQueryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/ScriptScoreQueryIT.java index c9c7c2a56eea9..95ad5560aacd9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/ScriptScoreQueryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/ScriptScoreQueryIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.search.query; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.query.QueryBuilder; @@ -32,6 +31,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThirdHit; @@ -73,17 +73,21 @@ public void testScriptScore() { Map params = new HashMap<>(); params.put("param1", 0.1); Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['field2'].value * param1", params); - SearchResponse resp = prepareSearch("test-index").setQuery(scriptScoreQuery(matchQuery("field1", "text0"), script)).get(); - assertNoFailures(resp); - assertOrderedSearchHits(resp, "10", "8", "6", "4", "2"); - assertFirstHit(resp, hasScore(1.0f)); - assertSecondHit(resp, hasScore(0.8f)); - assertThirdHit(resp, hasScore(0.6f)); + assertNoFailuresAndResponse( + prepareSearch("test-index").setQuery(scriptScoreQuery(matchQuery("field1", "text0"), script)), + response -> { + assertOrderedSearchHits(response, "10", "8", "6", "4", "2"); + assertFirstHit(response, hasScore(1.0f)); + assertSecondHit(response, hasScore(0.8f)); + assertThirdHit(response, hasScore(0.6f)); + } + ); // applying min score - resp = prepareSearch("test-index").setQuery(scriptScoreQuery(matchQuery("field1", "text0"), script).setMinScore(0.6f)).get(); - assertNoFailures(resp); - assertOrderedSearchHits(resp, "10", "8", "6"); + assertNoFailuresAndResponse( + prepareSearch("test-index").setQuery(scriptScoreQuery(matchQuery("field1", "text0"), script).setMinScore(0.6f)), + response -> assertOrderedSearchHits(response, "10", "8", "6") + ); } public void testScriptScoreBoolQuery() { @@ -98,11 +102,11 @@ public void testScriptScoreBoolQuery() { params.put("param1", 0.1); Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['field2'].value * param1", params); QueryBuilder boolQuery = boolQuery().should(matchQuery("field1", "text1")).should(matchQuery("field1", "text10")); - SearchResponse resp = prepareSearch("test-index").setQuery(scriptScoreQuery(boolQuery, script)).get(); - assertNoFailures(resp); - assertOrderedSearchHits(resp, "10", "1"); - assertFirstHit(resp, hasScore(1.0f)); - assertSecondHit(resp, hasScore(0.1f)); + assertNoFailuresAndResponse(prepareSearch("test-index").setQuery(scriptScoreQuery(boolQuery, script)), response -> { + assertOrderedSearchHits(response, "10", "1"); + assertFirstHit(response, hasScore(1.0f)); + assertSecondHit(response, hasScore(0.1f)); + }); } // test that when the internal query is rewritten script_score works well @@ -118,9 +122,10 @@ public void testRewrittenQuery() { RangeQueryBuilder rangeQB = new RangeQueryBuilder("field1").from("2019-01-01"); // the query should be rewritten to from:null Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['field2'].value * param1", Map.of("param1", 0.1)); - SearchResponse resp = prepareSearch("test-index2").setQuery(scriptScoreQuery(rangeQB, script)).get(); - assertNoFailures(resp); - assertOrderedSearchHits(resp, "3", "2", "1"); + assertNoFailuresAndResponse( + prepareSearch("test-index2").setQuery(scriptScoreQuery(rangeQB, script)), + response -> assertOrderedSearchHits(response, "3", "2", "1") + ); } public void testDisallowExpensiveQueries() { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java index 918746021f381..072922da54798 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -17,7 +17,6 @@ import org.apache.lucene.util.AttributeSource; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.document.DocumentField; @@ -106,6 +105,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCountAndNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHitsWithoutFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit; @@ -208,36 +209,47 @@ public void testConstantScoreQuery() throws Exception { client().prepareIndex("test").setId("2").setSource("field1", "quick lazy huge brown fox", "field2", "quick lazy huge brown fox") ); - SearchResponse searchResponse = prepareSearch().setQuery(constantScoreQuery(matchQuery("field1", "quick"))).get(); - assertHitCount(searchResponse, 2L); - for (SearchHit searchHit : searchResponse.getHits().getHits()) { - assertThat(searchHit, hasScore(1.0f)); - } - - searchResponse = prepareSearch("test").setQuery( - boolQuery().must(matchAllQuery()).must(constantScoreQuery(matchQuery("field1", "quick")).boost(1.0f + random().nextFloat())) - ).get(); - assertHitCount(searchResponse, 2L); - assertFirstHit(searchResponse, hasScore(searchResponse.getHits().getAt(1).getScore())); - - prepareSearch("test").setQuery(constantScoreQuery(matchQuery("field1", "quick")).boost(1.0f + random().nextFloat())).get(); - assertHitCount(searchResponse, 2L); - assertFirstHit(searchResponse, hasScore(searchResponse.getHits().getAt(1).getScore())); - - searchResponse = prepareSearch("test").setQuery( - constantScoreQuery( - boolQuery().must(matchAllQuery()) - .must( - constantScoreQuery(matchQuery("field1", "quick")).boost(1.0f + (random.nextBoolean() ? 0.0f : random.nextFloat())) - ) - ) - ).get(); - assertHitCount(searchResponse, 2L); - assertFirstHit(searchResponse, hasScore(searchResponse.getHits().getAt(1).getScore())); - for (SearchHit searchHit : searchResponse.getHits().getHits()) { - assertThat(searchHit, hasScore(1.0f)); - } - + assertResponse(prepareSearch().setQuery(constantScoreQuery(matchQuery("field1", "quick"))), response -> { + assertHitCount(response, 2L); + for (SearchHit searchHit : response.getHits().getHits()) { + assertThat(searchHit, hasScore(1.0f)); + } + }); + assertResponse( + prepareSearch("test").setQuery( + boolQuery().must(matchAllQuery()).must(constantScoreQuery(matchQuery("field1", "quick")).boost(1.0f + random().nextFloat())) + ), + response -> { + assertHitCount(response, 2L); + assertFirstHit(response, hasScore(response.getHits().getAt(1).getScore())); + } + ); + assertResponse( + prepareSearch("test").setQuery(constantScoreQuery(matchQuery("field1", "quick")).boost(1.0f + random().nextFloat())), + response -> { + assertHitCount(response, 2L); + assertFirstHit(response, hasScore(response.getHits().getAt(1).getScore())); + } + ); + assertResponse( + prepareSearch("test").setQuery( + constantScoreQuery( + boolQuery().must(matchAllQuery()) + .must( + constantScoreQuery(matchQuery("field1", "quick")).boost( + 1.0f + (random.nextBoolean() ? 0.0f : random.nextFloat()) + ) + ) + ) + ), + response -> { + assertHitCount(response, 2L); + assertFirstHit(response, hasScore(response.getHits().getAt(1).getScore())); + for (SearchHit searchHit : response.getHits().getHits()) { + assertThat(searchHit, hasScore(1.0f)); + } + } + ); int num = scaledRandomIntBetween(100, 200); IndexRequestBuilder[] builders = new IndexRequestBuilder[num]; for (int i = 0; i < builders.length; i++) { @@ -249,24 +261,30 @@ public void testConstantScoreQuery() throws Exception { int queryRounds = scaledRandomIntBetween(10, 20); for (int i = 0; i < queryRounds; i++) { MatchQueryBuilder matchQuery = matchQuery("f", English.intToEnglish(between(0, num))); - searchResponse = prepareSearch("test_1").setQuery(constantScoreQuery(matchQuery)).setSize(num).get(); - long totalHits = searchResponse.getHits().getTotalHits().value; - SearchHits hits = searchResponse.getHits(); - for (SearchHit searchHit : hits) { - assertThat(searchHit, hasScore(1.0f)); - } - searchResponse = prepareSearch("test_1").setQuery( - boolQuery().must(matchAllQuery()) - .must(constantScoreQuery(matchQuery).boost(1.0f + (random.nextBoolean() ? 0.0f : random.nextFloat()))) - ).setSize(num).get(); - hits = searchResponse.getHits(); - assertThat(hits.getTotalHits().value, equalTo(totalHits)); - if (totalHits > 1) { - float expected = hits.getAt(0).getScore(); + final long[] constantScoreTotalHits = new long[1]; + assertResponse(prepareSearch("test_1").setQuery(constantScoreQuery(matchQuery)).setSize(num), response -> { + constantScoreTotalHits[0] = response.getHits().getTotalHits().value; + SearchHits hits = response.getHits(); for (SearchHit searchHit : hits) { - assertThat(searchHit, hasScore(expected)); + assertThat(searchHit, hasScore(1.0f)); } - } + }); + assertResponse( + prepareSearch("test_1").setQuery( + boolQuery().must(matchAllQuery()) + .must(constantScoreQuery(matchQuery).boost(1.0f + (random.nextBoolean() ? 0.0f : random.nextFloat()))) + ).setSize(num), + response -> { + SearchHits hits = response.getHits(); + assertThat(hits.getTotalHits().value, equalTo(constantScoreTotalHits[0])); + if (constantScoreTotalHits[0] > 1) { + float expected = hits.getAt(0).getScore(); + for (SearchHit searchHit : hits) { + assertThat(searchHit, hasScore(expected)); + } + } + } + ); } } @@ -283,12 +301,14 @@ public void testAllDocsQueryString() throws InterruptedException, ExecutionExcep for (int i = 0; i < iters; i++) { assertHitCount(prepareSearch("test").setQuery(queryStringQuery("*:*^10.0").boost(10.0f)), 2L); - SearchResponse searchResponse = prepareSearch("test").setQuery( - boolQuery().must(matchAllQuery()).must(constantScoreQuery(matchAllQuery())) - ).get(); - assertHitCount(searchResponse, 2L); - assertThat((double) searchResponse.getHits().getAt(0).getScore(), closeTo(2.0, 0.1)); - assertThat((double) searchResponse.getHits().getAt(1).getScore(), closeTo(2.0, 0.1)); + assertResponse( + prepareSearch("test").setQuery(boolQuery().must(matchAllQuery()).must(constantScoreQuery(matchAllQuery()))), + response -> { + assertHitCount(response, 2L); + assertThat((double) response.getHits().getAt(0).getScore(), closeTo(2.0, 0.1)); + assertThat((double) response.getHits().getAt(1).getScore(), closeTo(2.0, 0.1)); + } + ); } } @@ -525,14 +545,15 @@ public void testMatchQueryNumeric() throws Exception { client().prepareIndex("test").setId("3").setSource("long", 3L, "double", 3.0d) ); - SearchResponse searchResponse = prepareSearch().setQuery(matchQuery("long", "1")).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - - searchResponse = prepareSearch().setQuery(matchQuery("double", "2")).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("2")); - expectThrows(SearchPhaseExecutionException.class, () -> prepareSearch().setQuery(matchQuery("double", "2 3 4")).get()); + assertResponse(prepareSearch().setQuery(matchQuery("long", "1")), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch().setQuery(matchQuery("double", "2")), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("2")); + expectThrows(SearchPhaseExecutionException.class, () -> prepareSearch().setQuery(matchQuery("double", "2 3 4")).get()); + }); } public void testMatchQueryFuzzy() throws Exception { @@ -594,10 +615,10 @@ public void testMultiMatchQuery() throws Exception { builder = multiMatchQuery("value1").field("field1").field("field3", 1.5f).operator(Operator.AND); // Operator only applies on terms // inside a field! Fields are // always OR-ed together. - SearchResponse searchResponse = prepareSearch().setQuery(builder).get(); - assertHitCount(searchResponse, 2L); - assertSearchHits(searchResponse, "3", "1"); - + assertResponse(prepareSearch().setQuery(builder), response -> { + assertHitCount(response, 2L); + assertSearchHits(response, "3", "1"); + }); // Test lenient client().prepareIndex("test").setId("3").setSource("field1", "value7", "field2", "value8", "field4", 5).get(); refresh(); @@ -607,19 +628,23 @@ public void testMultiMatchQuery() throws Exception { // when the number for shards is randomized and we expect failures // we can either run into partial or total failures depending on the current number of shards Matcher reasonMatcher = containsString("NumberFormatException: For input string: \"value1\""); - ShardSearchFailure[] shardFailures; try { - prepareSearch().setQuery(builder).get(); - shardFailures = searchResponse.getShardFailures(); - assertThat("Expected shard failures, got none", shardFailures, not(emptyArray())); + assertResponse(prepareSearch().setQuery(builder), response -> { + ShardSearchFailure[] shardFailures = response.getShardFailures(); + assertThat("Expected shard failures, got none", shardFailures, not(emptyArray())); + for (ShardSearchFailure shardSearchFailure : shardFailures) { + assertThat(shardSearchFailure.status(), equalTo(RestStatus.BAD_REQUEST)); + assertThat(shardSearchFailure.reason(), reasonMatcher); + } + }); + } catch (SearchPhaseExecutionException e) { assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); - shardFailures = e.shardFailures(); - } - - for (ShardSearchFailure shardSearchFailure : shardFailures) { - assertThat(shardSearchFailure.status(), equalTo(RestStatus.BAD_REQUEST)); - assertThat(shardSearchFailure.reason(), reasonMatcher); + ShardSearchFailure[] shardFailures = e.shardFailures(); + for (ShardSearchFailure shardSearchFailure : shardFailures) { + assertThat(shardSearchFailure.status(), equalTo(RestStatus.BAD_REQUEST)); + assertThat(shardSearchFailure.reason(), reasonMatcher); + } } builder.lenient(true); @@ -672,35 +697,36 @@ public void testMultiMatchQueryMinShouldMatch() { MultiMatchQueryBuilder multiMatchQuery = multiMatchQuery("value1 value2 foo", "field1", "field2"); multiMatchQuery.minimumShouldMatch("70%"); - SearchResponse searchResponse = prepareSearch().setQuery(multiMatchQuery).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - + assertResponse(prepareSearch().setQuery(multiMatchQuery), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); multiMatchQuery.minimumShouldMatch("30%"); - searchResponse = prepareSearch().setQuery(multiMatchQuery).get(); - assertHitCount(searchResponse, 2L); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("2")); - + assertResponse(prepareSearch().setQuery(multiMatchQuery), response -> { + assertHitCount(response, 2L); + assertFirstHit(response, hasId("1")); + assertSecondHit(response, hasId("2")); + }); multiMatchQuery.minimumShouldMatch("70%"); - searchResponse = prepareSearch().setQuery(multiMatchQuery).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - + assertResponse(prepareSearch().setQuery(multiMatchQuery), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); multiMatchQuery.minimumShouldMatch("30%"); - searchResponse = prepareSearch().setQuery(multiMatchQuery).get(); - assertHitCount(searchResponse, 2L); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("2")); - + assertResponse(prepareSearch().setQuery(multiMatchQuery), response -> { + assertHitCount(response, 2L); + assertFirstHit(response, hasId("1")); + assertSecondHit(response, hasId("2")); + }); multiMatchQuery = multiMatchQuery("value1 value2 bar", "field1"); multiMatchQuery.minimumShouldMatch("100%"); assertHitCount(prepareSearch().setQuery(multiMatchQuery), 0L); multiMatchQuery.minimumShouldMatch("70%"); - searchResponse = prepareSearch().setQuery(multiMatchQuery).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); + assertResponse(prepareSearch().setQuery(multiMatchQuery), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); // Min should match > # optional clauses returns no docs. multiMatchQuery = multiMatchQuery("value1 value2 value3", "field1", "field2"); multiMatchQuery.minimumShouldMatch("4"); @@ -715,10 +741,10 @@ public void testBoolQueryMinShouldMatchBiggerThanNumberOfShouldClauses() throws BoolQueryBuilder boolQuery = boolQuery().must(termQuery("field1", "value1")) .should(boolQuery().should(termQuery("field1", "value1")).should(termQuery("field1", "value2")).minimumShouldMatch(3)); - SearchResponse searchResponse = prepareSearch().setQuery(boolQuery).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - + assertResponse(prepareSearch().setQuery(boolQuery), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); boolQuery = boolQuery().must(termQuery("field1", "value1")) .should(boolQuery().should(termQuery("field1", "value1")).should(termQuery("field1", "value2")).minimumShouldMatch(1)) // Only one should clause is defined, returns no docs. @@ -728,10 +754,10 @@ public void testBoolQueryMinShouldMatchBiggerThanNumberOfShouldClauses() throws boolQuery = boolQuery().should(termQuery("field1", "value1")) .should(boolQuery().should(termQuery("field1", "value1")).should(termQuery("field1", "value2")).minimumShouldMatch(3)) .minimumShouldMatch(1); - searchResponse = prepareSearch().setQuery(boolQuery).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - + assertResponse(prepareSearch().setQuery(boolQuery), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); boolQuery = boolQuery().must(termQuery("field1", "value1")) .must(boolQuery().should(termQuery("field1", "value1")).should(termQuery("field1", "value2")).minimumShouldMatch(3)); assertHitCount(prepareSearch().setQuery(boolQuery), 0L); @@ -743,10 +769,10 @@ public void testFuzzyQueryString() { client().prepareIndex("test").setId("2").setSource("str", "shay", "date", "2012-02-05", "num", 20).get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(queryStringQuery("str:kimcy~1")).get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); + assertNoFailuresAndResponse(prepareSearch().setQuery(queryStringQuery("str:kimcy~1")), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); } @TestIssueLogging( @@ -764,15 +790,14 @@ public void testQuotedQueryStringWithBoost() throws InterruptedException { client().prepareIndex("test").setId("2").setSource("important", "nothing important", "less_important", "phrase match") ); - SearchResponse searchResponse = prepareSearch().setQuery( - queryStringQuery("\"phrase match\"").field("important", boost).field("less_important") - ).get(); - assertHitCount(searchResponse, 2L); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("2")); - assertThat( - (double) searchResponse.getHits().getAt(0).getScore(), - closeTo(boost * searchResponse.getHits().getAt(1).getScore(), .1) + assertResponse( + prepareSearch().setQuery(queryStringQuery("\"phrase match\"").field("important", boost).field("less_important")), + response -> { + assertHitCount(response, 2L); + assertFirstHit(response, hasId("1")); + assertSecondHit(response, hasId("2")); + assertThat((double) response.getHits().getAt(0).getScore(), closeTo(boost * response.getHits().getAt(1).getScore(), .1)); + } ); } @@ -782,16 +807,16 @@ public void testSpecialRangeSyntaxInQueryString() { client().prepareIndex("test").setId("2").setSource("str", "shay", "date", "2012-02-05", "num", 20).get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(queryStringQuery("num:>19")).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("2")); - + assertResponse(prepareSearch().setQuery(queryStringQuery("num:>19")), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("2")); + }); assertHitCount(prepareSearch().setQuery(queryStringQuery("num:>20")), 0L); - searchResponse = prepareSearch().setQuery(queryStringQuery("num:>=20")).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("2")); - + assertResponse(prepareSearch().setQuery(queryStringQuery("num:>=20")), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("2")); + }); assertHitCount(prepareSearch().setQuery(queryStringQuery("num:>11")), 2L); assertHitCount(prepareSearch().setQuery(queryStringQuery("num:<20")), 1L); assertHitCount(prepareSearch().setQuery(queryStringQuery("num:<=20")), 2L); @@ -996,25 +1021,26 @@ public void testBasicQueryById() throws Exception { client().prepareIndex("test").setId("3").setSource("field1", "value3").get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(idsQuery().addIds("1", "2")).get(); - assertHitCount(searchResponse, 2L); - assertThat(searchResponse.getHits().getHits().length, equalTo(2)); - - searchResponse = prepareSearch().setQuery(idsQuery().addIds("1")).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - - searchResponse = prepareSearch().setQuery(idsQuery().addIds("1", "2")).get(); - assertHitCount(searchResponse, 2L); - assertThat(searchResponse.getHits().getHits().length, equalTo(2)); - - searchResponse = prepareSearch().setQuery(idsQuery().addIds("1")).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - - searchResponse = prepareSearch().setQuery(idsQuery().addIds("1", "2", "3", "4")).get(); - assertHitCount(searchResponse, 3L); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); + assertResponse(prepareSearch().setQuery(idsQuery().addIds("1", "2")), response -> { + assertHitCount(response, 2L); + assertThat(response.getHits().getHits().length, equalTo(2)); + }); + assertResponse(prepareSearch().setQuery(idsQuery().addIds("1")), response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getHits().length, equalTo(1)); + }); + assertResponse(prepareSearch().setQuery(idsQuery().addIds("1", "2")), response -> { + assertHitCount(response, 2L); + assertThat(response.getHits().getHits().length, equalTo(2)); + }); + assertResponse(prepareSearch().setQuery(idsQuery().addIds("1")), response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getHits().length, equalTo(1)); + }); + assertResponse(prepareSearch().setQuery(idsQuery().addIds("1", "2", "3", "4")), response -> { + assertHitCount(response, 3L); + assertThat(response.getHits().getHits().length, equalTo(3)); + }); } public void testNumericTermsAndRanges() throws Exception { @@ -1051,86 +1077,106 @@ public void testNumericTermsAndRanges() throws Exception { .get(); refresh(); - SearchResponse searchResponse; logger.info("--> term query on 1"); - searchResponse = prepareSearch("test").setQuery(termQuery("num_byte", 1)).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(termQuery("num_short", 1)).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(termQuery("num_integer", 1)).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(termQuery("num_long", 1)).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(termQuery("num_float", 1)).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(termQuery("num_double", 1)).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - + assertResponse(prepareSearch("test").setQuery(termQuery("num_byte", 1)), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(termQuery("num_short", 1)), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(termQuery("num_integer", 1)), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(termQuery("num_long", 1)), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(termQuery("num_float", 1)), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(termQuery("num_double", 1)), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); logger.info("--> terms query on 1"); - searchResponse = prepareSearch("test").setQuery(termsQuery("num_byte", new int[] { 1 })).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(termsQuery("num_short", new int[] { 1 })).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(termsQuery("num_integer", new int[] { 1 })).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(termsQuery("num_long", new int[] { 1 })).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(termsQuery("num_float", new double[] { 1 })).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(termsQuery("num_double", new double[] { 1 })).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - + assertResponse(prepareSearch("test").setQuery(termsQuery("num_byte", new int[] { 1 })), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(termsQuery("num_short", new int[] { 1 })), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(termsQuery("num_integer", new int[] { 1 })), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(termsQuery("num_long", new int[] { 1 })), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(termsQuery("num_float", new double[] { 1 })), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(termsQuery("num_double", new double[] { 1 })), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); logger.info("--> term filter on 1"); - searchResponse = prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_byte", 1))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_short", 1))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_integer", 1))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_long", 1))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_float", 1))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_double", 1))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - + assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_byte", 1))), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_short", 1))), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_integer", 1))), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_long", 1))), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_float", 1))), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_double", 1))), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); logger.info("--> terms filter on 1"); - searchResponse = prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_byte", new int[] { 1 }))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_short", new int[] { 1 }))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_integer", new int[] { 1 }))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_long", new int[] { 1 }))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_float", new int[] { 1 }))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_double", new int[] { 1 }))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); + assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_byte", new int[] { 1 }))), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_short", new int[] { 1 }))), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_integer", new int[] { 1 }))), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_long", new int[] { 1 }))), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_float", new int[] { 1 }))), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_double", new int[] { 1 }))), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); } public void testNumericRangeFilter_2826() throws Exception { @@ -1238,8 +1284,7 @@ public void testIntervals() throws InterruptedException { } } }"""; - SearchResponse response = prepareSearch("test").setQuery(wrapperQuery(json)).get(); - assertHitCount(response, 1L); + assertHitCount(prepareSearch("test").setQuery(wrapperQuery(json)), 1L); } // see #2994 @@ -1437,12 +1482,16 @@ public void testMinScore() throws ExecutionException, InterruptedException { client().prepareIndex("test").setId("4").setSource("score", 0.5).get(); refresh(); - SearchResponse searchResponse = prepareSearch("test").setQuery( - functionScoreQuery(ScoreFunctionBuilders.fieldValueFactorFunction("score").missing(1.0)).setMinScore(1.5f) - ).get(); - assertHitCount(searchResponse, 2); - assertFirstHit(searchResponse, hasId("3")); - assertSecondHit(searchResponse, hasId("1")); + assertResponse( + prepareSearch("test").setQuery( + functionScoreQuery(ScoreFunctionBuilders.fieldValueFactorFunction("score").missing(1.0)).setMinScore(1.5f) + ), + response -> { + assertHitCount(response, 2); + assertFirstHit(response, hasId("3")); + assertSecondHit(response, hasId("1")); + } + ); } public void testQueryStringWithSlopAndFields() { @@ -1506,65 +1555,96 @@ public void testRangeQueryWithTimeZone() throws Exception { .setSource("date", Instant.now().atZone(ZoneOffset.ofHours(1)).toInstant().toEpochMilli(), "num", 4) ); - SearchResponse searchResponse = prepareSearch("test").setQuery( - QueryBuilders.rangeQuery("date").from("2014-01-01T00:00:00").to("2014-01-01T00:59:00") - ).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getId(), is("1")); - searchResponse = prepareSearch("test").setQuery( - QueryBuilders.rangeQuery("date").from("2013-12-31T23:00:00").to("2013-12-31T23:59:00") - ).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getId(), is("2")); - searchResponse = prepareSearch("test").setQuery( - QueryBuilders.rangeQuery("date").from("2014-01-01T01:00:00").to("2014-01-01T01:59:00") - ).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getId(), is("3")); - + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.rangeQuery("date").from("2014-01-01T00:00:00").to("2014-01-01T00:59:00")), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("1")); + } + ); + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.rangeQuery("date").from("2013-12-31T23:00:00").to("2013-12-31T23:59:00")), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("2")); + } + ); + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.rangeQuery("date").from("2014-01-01T01:00:00").to("2014-01-01T01:59:00")), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("3")); + } + ); // We explicitly define a time zone in the from/to dates so whatever the time zone is, it won't be used - searchResponse = prepareSearch("test").setQuery( - QueryBuilders.rangeQuery("date").from("2014-01-01T00:00:00Z").to("2014-01-01T00:59:00Z").timeZone("+10:00") - ).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getId(), is("1")); - searchResponse = prepareSearch("test").setQuery( - QueryBuilders.rangeQuery("date").from("2013-12-31T23:00:00Z").to("2013-12-31T23:59:00Z").timeZone("+10:00") - ).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getId(), is("2")); - searchResponse = prepareSearch("test").setQuery( - QueryBuilders.rangeQuery("date").from("2014-01-01T01:00:00Z").to("2014-01-01T01:59:00Z").timeZone("+10:00") - ).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getId(), is("3")); - + assertResponse( + prepareSearch("test").setQuery( + QueryBuilders.rangeQuery("date").from("2014-01-01T00:00:00Z").to("2014-01-01T00:59:00Z").timeZone("+10:00") + ), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("1")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + QueryBuilders.rangeQuery("date").from("2013-12-31T23:00:00Z").to("2013-12-31T23:59:00Z").timeZone("+10:00") + ), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("2")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + QueryBuilders.rangeQuery("date").from("2014-01-01T01:00:00Z").to("2014-01-01T01:59:00Z").timeZone("+10:00") + ), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("3")); + } + ); // We define a time zone to be applied to the filter and from/to have no time zone - searchResponse = prepareSearch("test").setQuery( - QueryBuilders.rangeQuery("date").from("2014-01-01T03:00:00").to("2014-01-01T03:59:00").timeZone("+03:00") - ).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getId(), is("1")); - searchResponse = prepareSearch("test").setQuery( - QueryBuilders.rangeQuery("date").from("2014-01-01T02:00:00").to("2014-01-01T02:59:00").timeZone("+03:00") - ).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getId(), is("2")); - searchResponse = prepareSearch("test").setQuery( - QueryBuilders.rangeQuery("date").from("2014-01-01T04:00:00").to("2014-01-01T04:59:00").timeZone("+03:00") - ).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getId(), is("3")); - - searchResponse = prepareSearch("test").setQuery( - QueryBuilders.rangeQuery("date").from("2014-01-01").to("2014-01-01T00:59:00").timeZone("-01:00") - ).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getId(), is("3")); - - searchResponse = prepareSearch("test").setQuery(QueryBuilders.rangeQuery("date").from("now/d-1d").timeZone("+01:00")).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getId(), is("4")); + assertResponse( + prepareSearch("test").setQuery( + QueryBuilders.rangeQuery("date").from("2014-01-01T03:00:00").to("2014-01-01T03:59:00").timeZone("+03:00") + ), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("1")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + QueryBuilders.rangeQuery("date").from("2014-01-01T02:00:00").to("2014-01-01T02:59:00").timeZone("+03:00") + ), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("2")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + QueryBuilders.rangeQuery("date").from("2014-01-01T04:00:00").to("2014-01-01T04:59:00").timeZone("+03:00") + ), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("3")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + QueryBuilders.rangeQuery("date").from("2014-01-01").to("2014-01-01T00:59:00").timeZone("-01:00") + ), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("3")); + } + ); + assertResponse(prepareSearch("test").setQuery(QueryBuilders.rangeQuery("date").from("now/d-1d").timeZone("+01:00")), response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("4")); + }); } /** @@ -1636,23 +1716,27 @@ public void testMatchPhrasePrefixQuery() throws ExecutionException, InterruptedE public void testQueryStringParserCache() throws Exception { createIndex("test"); indexRandom(true, false, client().prepareIndex("test").setId("1").setSource("nameTokens", "xyz")); - - SearchResponse response = prepareSearch("test").setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setQuery(QueryBuilders.queryStringQuery("xyz").boost(100)) - .get(); - assertThat(response.getHits().getTotalHits().value, equalTo(1L)); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - - float first = response.getHits().getAt(0).getScore(); + final float[] first = new float[1]; + assertResponse( + prepareSearch("test").setSearchType(SearchType.DFS_QUERY_THEN_FETCH).setQuery(QueryBuilders.queryStringQuery("xyz").boost(100)), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + first[0] = response.getHits().getAt(0).getScore(); + } + ); for (int i = 0; i < 100; i++) { - response = prepareSearch("test").setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setQuery(QueryBuilders.queryStringQuery("xyz").boost(100)) - .get(); - - assertThat(response.getHits().getTotalHits().value, equalTo(1L)); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - float actual = response.getHits().getAt(0).getScore(); - assertThat(i + " expected: " + first + " actual: " + actual, Float.compare(first, actual), equalTo(0)); + final int finalI = i; + assertResponse( + prepareSearch("test").setSearchType(SearchType.DFS_QUERY_THEN_FETCH) + .setQuery(QueryBuilders.queryStringQuery("xyz").boost(100)), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + float actual = response.getHits().getAt(0).getScore(); + assertThat(finalI + " expected: " + first[0] + " actual: " + actual, Float.compare(first[0], actual), equalTo(0)); + } + ); } } @@ -1666,8 +1750,7 @@ public void testRangeQueryRangeFields_24744() throws Exception { refresh(); RangeQueryBuilder range = new RangeQueryBuilder("int_range").relation("intersects").from(Integer.MIN_VALUE).to(Integer.MAX_VALUE); - SearchResponse searchResponse = prepareSearch("test").setQuery(range).get(); - assertHitCount(searchResponse, 1); + assertHitCount(prepareSearch("test").setQuery(range), 1L); } public void testNestedQueryWithFieldAlias() throws Exception { @@ -1732,17 +1815,16 @@ public void testFieldAliasesForMetaFields() throws Exception { indexRandom(true, false, indexRequest); updateClusterSettings(Settings.builder().put(IndicesService.INDICES_ID_FIELD_DATA_ENABLED_SETTING.getKey(), true)); try { - SearchResponse searchResponse = prepareSearch().setQuery(termQuery("routing-alias", "custom")) - .addDocValueField("id-alias") - .get(); - assertHitCount(searchResponse, 1L); + assertResponse(prepareSearch().setQuery(termQuery("routing-alias", "custom")).addDocValueField("id-alias"), response -> { + assertHitCount(response, 1L); - SearchHit hit = searchResponse.getHits().getAt(0); - assertEquals(2, hit.getFields().size()); - assertTrue(hit.getFields().containsKey("id-alias")); + SearchHit hit = response.getHits().getAt(0); + assertEquals(2, hit.getFields().size()); + assertTrue(hit.getFields().containsKey("id-alias")); - DocumentField field = hit.getFields().get("id-alias"); - assertThat(field.getValue().toString(), equalTo("1")); + DocumentField field = hit.getFields().get("id-alias"); + assertThat(field.getValue().toString(), equalTo("1")); + }); } finally { // unset cluster setting updateClusterSettings(Settings.builder().putNull(IndicesService.INDICES_ID_FIELD_DATA_ENABLED_SETTING.getKey())); @@ -1886,11 +1968,12 @@ public void testFetchIdFieldQuery() { ensureGreen(); refresh(); - SearchResponse response = prepareSearch("test").addFetchField("_id").setSize(docCount).get(); - SearchHit[] hits = response.getHits().getHits(); - assertEquals(docCount, hits.length); - for (SearchHit hit : hits) { - assertNotNull(hit.getFields().get("_id").getValue()); - } + assertResponse(prepareSearch("test").addFetchField("_id").setSize(docCount), response -> { + SearchHit[] hits = response.getHits().getHits(); + assertEquals(docCount, hits.length); + for (SearchHit hit : hits) { + assertNotNull(hit.getFields().get("_id").getValue()); + } + }); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/SimpleQueryStringIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/SimpleQueryStringIT.java index 78d98b76b9bc8..cc41ac0089a51 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/SimpleQueryStringIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/SimpleQueryStringIT.java @@ -14,7 +14,6 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.analysis.PreConfiguredTokenFilter; @@ -49,7 +48,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHitsWithoutFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; @@ -89,24 +89,31 @@ public void testSimpleQueryString() throws ExecutionException, InterruptedExcept // Tests boost value setting. In this case doc 1 should always be ranked above the other // two matches. - SearchResponse searchResponse = prepareSearch().setQuery( - boolQuery().should(simpleQueryStringQuery("\"foo bar\"").boost(10.0f)).should(termQuery("body", "eggplant")) - ).get(); - assertHitCount(searchResponse, 2L); - assertFirstHit(searchResponse, hasId("3")); - + assertResponse( + prepareSearch().setQuery( + boolQuery().should(simpleQueryStringQuery("\"foo bar\"").boost(10.0f)).should(termQuery("body", "eggplant")) + ), + response -> { + assertHitCount(response, 2L); + assertFirstHit(response, hasId("3")); + } + ); assertSearchHitsWithoutFailures(prepareSearch().setQuery(simpleQueryStringQuery("foo bar").defaultOperator(Operator.AND)), "3"); assertSearchHitsWithoutFailures(prepareSearch().setQuery(simpleQueryStringQuery("\"quux baz\" +(eggplant | spaghetti)")), "4", "5"); assertSearchHitsWithoutFailures(prepareSearch().setQuery(simpleQueryStringQuery("eggplants").analyzer("mock_snowball")), "4"); - searchResponse = prepareSearch().setQuery( - simpleQueryStringQuery("spaghetti").field("body", 1000.0f).field("otherbody", 2.0f).queryName("myquery") - ).get(); - assertHitCount(searchResponse, 2L); - assertFirstHit(searchResponse, hasId("5")); - assertSearchHits(searchResponse, "5", "6"); - assertThat(searchResponse.getHits().getAt(0).getMatchedQueries()[0], equalTo("myquery")); + assertResponse( + prepareSearch().setQuery( + simpleQueryStringQuery("spaghetti").field("body", 1000.0f).field("otherbody", 2.0f).queryName("myquery") + ), + response -> { + assertHitCount(response, 2L); + assertFirstHit(response, hasId("5")); + assertSearchHits(response, "5", "6"); + assertThat(response.getHits().getAt(0).getMatchedQueries()[0], equalTo("myquery")); + } + ); assertSearchHitsWithoutFailures(prepareSearch().setQuery(simpleQueryStringQuery("spaghetti").field("*body")), "5", "6"); } @@ -281,12 +288,14 @@ public void testSimpleQueryStringLenient() throws ExecutionException, Interrupte ); refresh(); - SearchResponse searchResponse = prepareSearch().setAllowPartialSearchResults(true) - .setQuery(simpleQueryStringQuery("foo").field("field")) - .get(); - assertFailures(searchResponse); - assertHitCount(searchResponse, 1L); - assertSearchHits(searchResponse, "1"); + assertResponse( + prepareSearch().setAllowPartialSearchResults(true).setQuery(simpleQueryStringQuery("foo").field("field")), + response -> { + assertFailures(response); + assertHitCount(response, 1L); + assertSearchHits(response, "1"); + } + ); assertSearchHitsWithoutFailures(prepareSearch().setQuery(simpleQueryStringQuery("foo").field("field").lenient(true)), "1"); } @@ -373,17 +382,18 @@ public void testBasicAllQuery() throws Exception { reqs.add(client().prepareIndex("test").setId("3").setSource("f3", "foo bar baz")); indexRandom(true, false, reqs); - SearchResponse resp = prepareSearch("test").setQuery(simpleQueryStringQuery("foo")).get(); - assertHitCount(resp, 2L); - assertHits(resp.getHits(), "1", "3"); - - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("bar")).get(); - assertHitCount(resp, 2L); - assertHits(resp.getHits(), "1", "3"); - - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("Bar")).get(); - assertHitCount(resp, 3L); - assertHits(resp.getHits(), "1", "2", "3"); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("foo")), response -> { + assertHitCount(response, 2L); + assertHits(response.getHits(), "1", "3"); + }); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("bar")), response -> { + assertHitCount(response, 2L); + assertHits(response.getHits(), "1", "3"); + }); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("Bar")), response -> { + assertHitCount(response, 3L); + assertHits(response.getHits(), "1", "2", "3"); + }); } public void testWithDate() throws Exception { @@ -396,21 +406,22 @@ public void testWithDate() throws Exception { reqs.add(client().prepareIndex("test").setId("2").setSource("f1", "bar", "f_date", "2015/09/01")); indexRandom(true, false, reqs); - SearchResponse resp = prepareSearch("test").setQuery(simpleQueryStringQuery("foo bar")).get(); - assertHits(resp.getHits(), "1", "2"); - assertHitCount(resp, 2L); - - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("\"2015/09/02\"")).get(); - assertHits(resp.getHits(), "1"); - assertHitCount(resp, 1L); - - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("bar \"2015/09/02\"")).get(); - assertHits(resp.getHits(), "1", "2"); - assertHitCount(resp, 2L); - - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("\"2015/09/02\" \"2015/09/01\"")).get(); - assertHits(resp.getHits(), "1", "2"); - assertHitCount(resp, 2L); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("foo bar")), response -> { + assertHits(response.getHits(), "1", "2"); + assertHitCount(response, 2L); + }); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("\"2015/09/02\"")), response -> { + assertHits(response.getHits(), "1"); + assertHitCount(response, 1L); + }); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("bar \"2015/09/02\"")), response -> { + assertHits(response.getHits(), "1", "2"); + assertHitCount(response, 2L); + }); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("\"2015/09/02\" \"2015/09/01\"")), response -> { + assertHits(response.getHits(), "1", "2"); + assertHitCount(response, 2L); + }); } public void testWithLotsOfTypes() throws Exception { @@ -427,21 +438,22 @@ public void testWithLotsOfTypes() throws Exception { ); indexRandom(true, false, reqs); - SearchResponse resp = prepareSearch("test").setQuery(simpleQueryStringQuery("foo bar")).get(); - assertHits(resp.getHits(), "1", "2"); - assertHitCount(resp, 2L); - - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("\"2015/09/02\"")).get(); - assertHits(resp.getHits(), "1"); - assertHitCount(resp, 1L); - - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("127.0.0.2 \"2015/09/02\"")).get(); - assertHits(resp.getHits(), "1", "2"); - assertHitCount(resp, 2L); - - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("127.0.0.1 1.8")).get(); - assertHits(resp.getHits(), "1", "2"); - assertHitCount(resp, 2L); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("foo bar")), response -> { + assertHits(response.getHits(), "1", "2"); + assertHitCount(response, 2L); + }); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("\"2015/09/02\"")), response -> { + assertHits(response.getHits(), "1"); + assertHitCount(response, 1L); + }); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("127.0.0.2 \"2015/09/02\"")), response -> { + assertHits(response.getHits(), "1", "2"); + assertHitCount(response, 2L); + }); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("127.0.0.1 1.8")), response -> { + assertHits(response.getHits(), "1", "2"); + assertHitCount(response, 2L); + }); } public void testDocWithAllTypes() throws Exception { @@ -454,39 +466,35 @@ public void testDocWithAllTypes() throws Exception { reqs.add(client().prepareIndex("test").setId("1").setSource(docBody, XContentType.JSON)); indexRandom(true, false, reqs); - SearchResponse resp = prepareSearch("test").setQuery(simpleQueryStringQuery("foo")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("Bar")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("Baz")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("19")).get(); - assertHits(resp.getHits(), "1"); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("foo")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("Bar")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("Baz")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("19")), response -> assertHits(response.getHits(), "1")); // nested doesn't match because it's hidden - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("1476383971")).get(); - assertHits(resp.getHits(), "1"); + assertResponse( + prepareSearch("test").setQuery(simpleQueryStringQuery("1476383971")), + response -> assertHits(response.getHits(), "1") + ); // bool doesn't match - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("7")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("23")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("1293")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("42")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("1.7")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("1.5")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("127.0.0.1")).get(); - assertHits(resp.getHits(), "1"); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("7")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("23")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("1293")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("42")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("1.7")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("1.5")), response -> assertHits(response.getHits(), "1")); + assertResponse( + prepareSearch("test").setQuery(simpleQueryStringQuery("127.0.0.1")), + response -> assertHits(response.getHits(), "1") + ); // binary doesn't match // suggest doesn't match // geo_point doesn't match // geo_shape doesn't match - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("foo Bar 19 127.0.0.1").defaultOperator(Operator.AND)).get(); - assertHits(resp.getHits(), "1"); + assertResponse( + prepareSearch("test").setQuery(simpleQueryStringQuery("foo Bar 19 127.0.0.1").defaultOperator(Operator.AND)), + response -> assertHits(response.getHits(), "1") + ); } public void testKeywordWithWhitespace() throws Exception { @@ -500,13 +508,14 @@ public void testKeywordWithWhitespace() throws Exception { reqs.add(client().prepareIndex("test").setId("3").setSource("f1", "foo bar")); indexRandom(true, false, reqs); - SearchResponse resp = prepareSearch("test").setQuery(simpleQueryStringQuery("foo")).get(); - assertHits(resp.getHits(), "3"); - assertHitCount(resp, 1L); - - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("bar")).get(); - assertHits(resp.getHits(), "2", "3"); - assertHitCount(resp, 2L); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("foo")), response -> { + assertHits(response.getHits(), "3"); + assertHitCount(response, 1L); + }); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("bar")), response -> { + assertHits(response.getHits(), "2", "3"); + assertHitCount(response, 2L); + }); } public void testAllFieldsWithSpecifiedLeniency() throws Exception { @@ -536,11 +545,10 @@ public void testFieldAlias() throws Exception { indexRequests.add(client().prepareIndex("test").setId("3").setSource("f3", "another value", "f2", "three")); indexRandom(true, false, indexRequests); - SearchResponse response = prepareSearch("test").setQuery(simpleQueryStringQuery("value").field("f3_alias")).get(); - - assertNoFailures(response); - assertHitCount(response, 2); - assertHits(response.getHits(), "2", "3"); + assertNoFailuresAndResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("value").field("f3_alias")), response -> { + assertHitCount(response, 2); + assertHits(response.getHits(), "2", "3"); + }); } public void testFieldAliasWithWildcardField() throws Exception { @@ -554,11 +562,10 @@ public void testFieldAliasWithWildcardField() throws Exception { indexRequests.add(client().prepareIndex("test").setId("3").setSource("f3", "another value", "f2", "three")); indexRandom(true, false, indexRequests); - SearchResponse response = prepareSearch("test").setQuery(simpleQueryStringQuery("value").field("f3_*")).get(); - - assertNoFailures(response); - assertHitCount(response, 2); - assertHits(response.getHits(), "2", "3"); + assertNoFailuresAndResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("value").field("f3_*")), response -> { + assertHitCount(response, 2); + assertHits(response.getHits(), "2", "3"); + }); } public void testFieldAliasOnDisallowedFieldType() throws Exception { @@ -572,11 +579,10 @@ public void testFieldAliasOnDisallowedFieldType() throws Exception { // The wildcard field matches aliases for both a text and boolean field. // By default, the boolean field should be ignored when building the query. - SearchResponse response = prepareSearch("test").setQuery(queryStringQuery("text").field("f*_alias")).get(); - - assertNoFailures(response); - assertHitCount(response, 1); - assertHits(response.getHits(), "1"); + assertNoFailuresAndResponse(prepareSearch("test").setQuery(queryStringQuery("text").field("f*_alias")), response -> { + assertHitCount(response, 1); + assertHits(response.getHits(), "1"); + }); } private void assertHits(SearchHits hits, String... ids) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchPreferenceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchPreferenceIT.java index 1d13bea9e0639..ee87b868f280d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchPreferenceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchPreferenceIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -32,6 +31,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -67,21 +67,25 @@ public void testStopOneNodePreferenceWithRedState() throws IOException { "_prefer_nodes:somenode,server2" }; for (String pref : preferences) { logger.info("--> Testing out preference={}", pref); - SearchResponse searchResponse = prepareSearch().setSize(0).setPreference(pref).get(); - assertThat(RestStatus.OK, equalTo(searchResponse.status())); - assertThat(pref, searchResponse.getFailedShards(), greaterThanOrEqualTo(0)); - searchResponse = prepareSearch().setPreference(pref).get(); - assertThat(RestStatus.OK, equalTo(searchResponse.status())); - assertThat(pref, searchResponse.getFailedShards(), greaterThanOrEqualTo(0)); + assertResponse(prepareSearch().setSize(0).setPreference(pref), response -> { + assertThat(RestStatus.OK, equalTo(response.status())); + assertThat(pref, response.getFailedShards(), greaterThanOrEqualTo(0)); + }); + assertResponse(prepareSearch().setPreference(pref), response -> { + assertThat(RestStatus.OK, equalTo(response.status())); + assertThat(pref, response.getFailedShards(), greaterThanOrEqualTo(0)); + }); } // _only_local is a stricter preference, we need to send the request to a data node - SearchResponse searchResponse = dataNodeClient().prepareSearch().setSize(0).setPreference("_only_local").get(); - assertThat(RestStatus.OK, equalTo(searchResponse.status())); - assertThat("_only_local", searchResponse.getFailedShards(), greaterThanOrEqualTo(0)); - searchResponse = dataNodeClient().prepareSearch().setPreference("_only_local").get(); - assertThat(RestStatus.OK, equalTo(searchResponse.status())); - assertThat("_only_local", searchResponse.getFailedShards(), greaterThanOrEqualTo(0)); + assertResponse(dataNodeClient().prepareSearch().setSize(0).setPreference("_only_local"), response -> { + assertThat(RestStatus.OK, equalTo(response.status())); + assertThat("_only_local", response.getFailedShards(), greaterThanOrEqualTo(0)); + }); + assertResponse(dataNodeClient().prepareSearch().setPreference("_only_local"), response -> { + assertThat(RestStatus.OK, equalTo(response.status())); + assertThat("_only_local", response.getFailedShards(), greaterThanOrEqualTo(0)); + }); } public void testNoPreferenceRandom() { @@ -97,12 +101,16 @@ public void testNoPreferenceRandom() { refresh(); final Client client = internalCluster().smartClient(); - SearchResponse searchResponse = client.prepareSearch("test").setQuery(matchAllQuery()).get(); - String firstNodeId = searchResponse.getHits().getAt(0).getShard().getNodeId(); - searchResponse = client.prepareSearch("test").setQuery(matchAllQuery()).get(); - String secondNodeId = searchResponse.getHits().getAt(0).getShard().getNodeId(); - - assertThat(firstNodeId, not(equalTo(secondNodeId))); + assertResponse( + client.prepareSearch("test").setQuery(matchAllQuery()), + fist -> assertResponse( + client.prepareSearch("test").setQuery(matchAllQuery()), + second -> assertThat( + fist.getHits().getAt(0).getShard().getNodeId(), + not(equalTo(second.getHits().getAt(0).getShard().getNodeId())) + ) + ) + ); } public void testSimplePreference() { @@ -112,14 +120,20 @@ public void testSimplePreference() { client().prepareIndex("test").setSource("field1", "value1").get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); + assertResponse( + prepareSearch().setQuery(matchAllQuery()), + response -> assertThat(response.getHits().getTotalHits().value, equalTo(1L)) + ); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setPreference("_local").get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).setPreference("_local"), + response -> assertThat(response.getHits().getTotalHits().value, equalTo(1L)) + ); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setPreference("1234").get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).setPreference("1234"), + response -> assertThat(response.getHits().getTotalHits().value, equalTo(1L)) + ); } public void testThatSpecifyingNonExistingNodesReturnsUsefulError() { @@ -188,9 +202,10 @@ public void testNodesOnlyRandom() { private void assertSearchOnRandomNodes(SearchRequestBuilder request) { Set hitNodes = new HashSet<>(); for (int i = 0; i < 2; i++) { - SearchResponse searchResponse = request.get(); - assertThat(searchResponse.getHits().getHits().length, greaterThan(0)); - hitNodes.add(searchResponse.getHits().getAt(0).getShard().getNodeId()); + assertResponse(request, response -> { + assertThat(response.getHits().getHits().length, greaterThan(0)); + hitNodes.add(response.getHits().getAt(0).getShard().getNodeId()); + }); } assertThat(hitNodes.size(), greaterThan(1)); } @@ -259,8 +274,9 @@ public void testCustomPreferenceUnaffectedByOtherShardMovements() { } private static void assertSearchesSpecificNode(String index, String customPreference, String nodeId) { - final SearchResponse searchResponse = prepareSearch(index).setQuery(matchAllQuery()).setPreference(customPreference).get(); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getShard().getNodeId(), equalTo(nodeId)); + assertResponse(prepareSearch(index).setQuery(matchAllQuery()).setPreference(customPreference), response -> { + assertThat(response.getHits().getHits().length, equalTo(1)); + assertThat(response.getHits().getAt(0).getShard().getNodeId(), equalTo(nodeId)); + }); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchReplicaSelectionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchReplicaSelectionIT.java index 35ea9614d182a..1362b0166a709 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchReplicaSelectionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchReplicaSelectionIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.OperationRouting; @@ -23,6 +22,7 @@ import java.util.Set; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -49,18 +49,18 @@ public void testNodeSelection() { // Before we've gathered stats for all nodes, we should try each node once. Set nodeIds = new HashSet<>(); - SearchResponse searchResponse = client.prepareSearch().setQuery(matchAllQuery()).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - nodeIds.add(searchResponse.getHits().getAt(0).getShard().getNodeId()); - - searchResponse = client.prepareSearch().setQuery(matchAllQuery()).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - nodeIds.add(searchResponse.getHits().getAt(0).getShard().getNodeId()); - - searchResponse = client.prepareSearch().setQuery(matchAllQuery()).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - nodeIds.add(searchResponse.getHits().getAt(0).getShard().getNodeId()); - + assertResponse(client.prepareSearch().setQuery(matchAllQuery()), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + nodeIds.add(response.getHits().getAt(0).getShard().getNodeId()); + }); + assertResponse(client.prepareSearch().setQuery(matchAllQuery()), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + nodeIds.add(response.getHits().getAt(0).getShard().getNodeId()); + }); + assertResponse(client.prepareSearch().setQuery(matchAllQuery()), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + nodeIds.add(response.getHits().getAt(0).getShard().getNodeId()); + }); assertEquals(3, nodeIds.size()); // Now after more searches, we should select a node with the lowest ARS rank. @@ -78,13 +78,14 @@ public void testNodeSelection() { assertNotNull(nodeStats); assertEquals(3, nodeStats.getAdaptiveSelectionStats().getComputedStats().size()); - searchResponse = client.prepareSearch().setQuery(matchAllQuery()).get(); - String selectedNodeId = searchResponse.getHits().getAt(0).getShard().getNodeId(); - double selectedRank = nodeStats.getAdaptiveSelectionStats().getRanks().get(selectedNodeId); + assertResponse(client.prepareSearch().setQuery(matchAllQuery()), response -> { + String selectedNodeId = response.getHits().getAt(0).getShard().getNodeId(); + double selectedRank = nodeStats.getAdaptiveSelectionStats().getRanks().get(selectedNodeId); - for (Map.Entry entry : nodeStats.getAdaptiveSelectionStats().getRanks().entrySet()) { - double rank = entry.getValue(); - assertThat(rank, greaterThanOrEqualTo(selectedRank)); - } + for (Map.Entry entry : nodeStats.getAdaptiveSelectionStats().getRanks().entrySet()) { + double rank = entry.getValue(); + assertThat(rank, greaterThanOrEqualTo(selectedRank)); + } + }); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/scriptfilter/ScriptQuerySearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/scriptfilter/ScriptQuerySearchIT.java index dc460468db605..9e7ea2fed8aa2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/scriptfilter/ScriptQuerySearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/scriptfilter/ScriptQuerySearchIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.search.scriptfilter; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.fielddata.ScriptDocValues; @@ -37,6 +36,7 @@ import static org.elasticsearch.index.query.QueryBuilders.scriptQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; @@ -114,19 +114,20 @@ public void testCustomScriptBinaryField() throws Exception { flush(); refresh(); - SearchResponse response = prepareSearch().setQuery( - scriptQuery(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['binaryData'].get(0).length > 15", emptyMap())) - ) - .addScriptField( - "sbinaryData", - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['binaryData'].get(0).length", emptyMap()) + assertResponse( + prepareSearch().setQuery( + scriptQuery(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['binaryData'].get(0).length > 15", emptyMap())) ) - .get(); - - assertThat(response.getHits().getTotalHits().value, equalTo(1L)); - assertThat(response.getHits().getAt(0).getId(), equalTo("2")); - assertThat(response.getHits().getAt(0).getFields().get("sbinaryData").getValues().get(0), equalTo(16)); - + .addScriptField( + "sbinaryData", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['binaryData'].get(0).length", emptyMap()) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("2")); + assertThat(response.getHits().getAt(0).getFields().get("sbinaryData").getValues().get(0), equalTo(16)); + } + ); } private byte[] getRandomBytes(int len) { @@ -168,51 +169,64 @@ public void testCustomScriptBoost() throws Exception { refresh(); logger.info("running doc['num1'].value > 1"); - SearchResponse response = prepareSearch().setQuery( - scriptQuery(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value > 1", Collections.emptyMap())) - ) - .addSort("num1", SortOrder.ASC) - .addScriptField("sNum1", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value", Collections.emptyMap())) - .get(); - - assertThat(response.getHits().getTotalHits().value, equalTo(2L)); - assertThat(response.getHits().getAt(0).getId(), equalTo("2")); - assertThat(response.getHits().getAt(0).getFields().get("sNum1").getValues().get(0), equalTo(2.0)); - assertThat(response.getHits().getAt(1).getId(), equalTo("3")); - assertThat(response.getHits().getAt(1).getFields().get("sNum1").getValues().get(0), equalTo(3.0)); - + assertResponse( + prepareSearch().setQuery( + scriptQuery(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value > 1", Collections.emptyMap())) + ) + .addSort("num1", SortOrder.ASC) + .addScriptField( + "sNum1", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value", Collections.emptyMap()) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("2")); + assertThat(response.getHits().getAt(0).getFields().get("sNum1").getValues().get(0), equalTo(2.0)); + assertThat(response.getHits().getAt(1).getId(), equalTo("3")); + assertThat(response.getHits().getAt(1).getFields().get("sNum1").getValues().get(0), equalTo(3.0)); + } + ); Map params = new HashMap<>(); params.put("param1", 2); logger.info("running doc['num1'].value > param1"); - response = prepareSearch().setQuery( - scriptQuery(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value > param1", params)) - ) - .addSort("num1", SortOrder.ASC) - .addScriptField("sNum1", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value", Collections.emptyMap())) - .get(); - - assertThat(response.getHits().getTotalHits().value, equalTo(1L)); - assertThat(response.getHits().getAt(0).getId(), equalTo("3")); - assertThat(response.getHits().getAt(0).getFields().get("sNum1").getValues().get(0), equalTo(3.0)); - + assertResponse( + prepareSearch().setQuery( + scriptQuery(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value > param1", params)) + ) + .addSort("num1", SortOrder.ASC) + .addScriptField( + "sNum1", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value", Collections.emptyMap()) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("3")); + assertThat(response.getHits().getAt(0).getFields().get("sNum1").getValues().get(0), equalTo(3.0)); + } + ); params = new HashMap<>(); params.put("param1", -1); logger.info("running doc['num1'].value > param1"); - response = prepareSearch().setQuery( - scriptQuery(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value > param1", params)) - ) - .addSort("num1", SortOrder.ASC) - .addScriptField("sNum1", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value", Collections.emptyMap())) - .get(); - - assertThat(response.getHits().getTotalHits().value, equalTo(3L)); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(0).getFields().get("sNum1").getValues().get(0), equalTo(1.0)); - assertThat(response.getHits().getAt(1).getId(), equalTo("2")); - assertThat(response.getHits().getAt(1).getFields().get("sNum1").getValues().get(0), equalTo(2.0)); - assertThat(response.getHits().getAt(2).getId(), equalTo("3")); - assertThat(response.getHits().getAt(2).getFields().get("sNum1").getValues().get(0), equalTo(3.0)); + assertResponse( + prepareSearch().setQuery( + scriptQuery(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value > param1", params)) + ) + .addSort("num1", SortOrder.ASC) + .addScriptField( + "sNum1", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value", Collections.emptyMap()) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(0).getFields().get("sNum1").getValues().get(0), equalTo(1.0)); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(1).getFields().get("sNum1").getValues().get(0), equalTo(2.0)); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + assertThat(response.getHits().getAt(2).getFields().get("sNum1").getValues().get(0), equalTo(3.0)); + } + ); } public void testDisallowExpensiveQueries() { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/simple/SimpleSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/simple/SimpleSearchIT.java index 61490cac43e45..4189482a73f33 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/simple/SimpleSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/simple/SimpleSearchIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; @@ -47,7 +46,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCountAndNoFailures; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -210,12 +210,12 @@ public void testSimpleDateRange() throws Exception { assertHitCountAndNoFailures(prepareSearch("test").setQuery(QueryBuilders.rangeQuery("field").gt("1000")), 3L); // a numeric value of 1000 should be parsed as 1000 millis since epoch and return only docs after 1970 - SearchResponse searchResponse = prepareSearch("test").setQuery(QueryBuilders.rangeQuery("field").gt(1000)).get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 2L); - String[] expectedIds = new String[] { "1", "2" }; - assertThat(searchResponse.getHits().getHits()[0].getId(), is(oneOf(expectedIds))); - assertThat(searchResponse.getHits().getHits()[1].getId(), is(oneOf(expectedIds))); + assertNoFailuresAndResponse(prepareSearch("test").setQuery(QueryBuilders.rangeQuery("field").gt(1000)), response -> { + assertHitCount(response, 2L); + String[] expectedIds = new String[] { "1", "2" }; + assertThat(response.getHits().getHits()[0].getId(), is(oneOf(expectedIds))); + assertThat(response.getHits().getHits()[1].getId(), is(oneOf(expectedIds))); + }); } public void testRangeQueryKeyword() throws Exception { @@ -255,17 +255,23 @@ public void testSimpleTerminateAfterCount() throws Exception { ensureGreen(); refresh(); - SearchResponse searchResponse; for (int i = 1; i < max; i++) { - searchResponse = prepareSearch("test").setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(max)).setTerminateAfter(i).get(); - assertHitCount(searchResponse, i); - assertTrue(searchResponse.isTerminatedEarly()); + final int finalI = i; + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(max)).setTerminateAfter(i), + response -> { + assertHitCount(response, finalI); + assertTrue(response.isTerminatedEarly()); + } + ); } - - searchResponse = prepareSearch("test").setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(max)).setTerminateAfter(2 * max).get(); - - assertHitCount(searchResponse, max); - assertFalse(searchResponse.isTerminatedEarly()); + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(max)).setTerminateAfter(2 * max), + response -> { + assertHitCount(response, max); + assertFalse(response.isTerminatedEarly()); + } + ); } public void testSimpleIndexSortEarlyTerminate() throws Exception { @@ -283,17 +289,17 @@ public void testSimpleIndexSortEarlyTerminate() throws Exception { ensureGreen(); refresh(); - SearchResponse searchResponse; for (int i = 1; i < max; i++) { - searchResponse = prepareSearch("test").addDocValueField("rank") - .setTrackTotalHits(false) - .addSort("rank", SortOrder.ASC) - .setSize(i) - .get(); - assertNull(searchResponse.getHits().getTotalHits()); - for (int j = 0; j < i; j++) { - assertThat(searchResponse.getHits().getAt(j).field("rank").getValue(), equalTo((long) j)); - } + final int finalI = i; + assertResponse( + prepareSearch("test").addDocValueField("rank").setTrackTotalHits(false).addSort("rank", SortOrder.ASC).setSize(i), + response -> { + assertNull(response.getHits().getTotalHits()); + for (int j = 0; j < finalI; j++) { + assertThat(response.getHits().getAt(j).field("rank").getValue(), equalTo((long) j)); + } + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java index 2926d36becb4a..f5dd2182e0551 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java @@ -63,6 +63,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; @@ -120,30 +122,32 @@ public void testIssue8226() { } refresh(); // sort DESC - SearchResponse searchResponse = prepareSearch().addSort( - new FieldSortBuilder("entry").order(SortOrder.DESC).unmappedType(useMapping ? null : "long") - ).setSize(10).get(); - logClusterState(); - assertNoFailures(searchResponse); - - for (int j = 1; j < searchResponse.getHits().getHits().length; j++) { - Number current = (Number) searchResponse.getHits().getHits()[j].getSourceAsMap().get("entry"); - Number previous = (Number) searchResponse.getHits().getHits()[j - 1].getSourceAsMap().get("entry"); - assertThat(searchResponse.toString(), current.intValue(), lessThan(previous.intValue())); - } + assertNoFailuresAndResponse( + prepareSearch().addSort(new FieldSortBuilder("entry").order(SortOrder.DESC).unmappedType(useMapping ? null : "long")) + .setSize(10), + response -> { + logClusterState(); + for (int j = 1; j < response.getHits().getHits().length; j++) { + Number current = (Number) response.getHits().getHits()[j].getSourceAsMap().get("entry"); + Number previous = (Number) response.getHits().getHits()[j - 1].getSourceAsMap().get("entry"); + assertThat(response.toString(), current.intValue(), lessThan(previous.intValue())); + } + } + ); // sort ASC - searchResponse = prepareSearch().addSort( - new FieldSortBuilder("entry").order(SortOrder.ASC).unmappedType(useMapping ? null : "long") - ).setSize(10).get(); - logClusterState(); - assertNoFailures(searchResponse); - - for (int j = 1; j < searchResponse.getHits().getHits().length; j++) { - Number current = (Number) searchResponse.getHits().getHits()[j].getSourceAsMap().get("entry"); - Number previous = (Number) searchResponse.getHits().getHits()[j - 1].getSourceAsMap().get("entry"); - assertThat(searchResponse.toString(), current.intValue(), greaterThan(previous.intValue())); - } + assertNoFailuresAndResponse( + prepareSearch().addSort(new FieldSortBuilder("entry").order(SortOrder.ASC).unmappedType(useMapping ? null : "long")) + .setSize(10), + response -> { + logClusterState(); + for (int j = 1; j < response.getHits().getHits().length; j++) { + Number current = (Number) response.getHits().getHits()[j].getSourceAsMap().get("entry"); + Number previous = (Number) response.getHits().getHits()[j - 1].getSourceAsMap().get("entry"); + assertThat(response.toString(), current.intValue(), greaterThan(previous.intValue())); + } + } + ); } public void testIssue6614() throws ExecutionException, InterruptedException { @@ -172,33 +176,40 @@ public void testIssue6614() throws ExecutionException, InterruptedException { docs += builders.size(); builders.clear(); } - SearchResponse allDocsResponse = prepareSearch().setQuery( - QueryBuilders.boolQuery() - .must(QueryBuilders.termQuery("foo", "bar")) - .must(QueryBuilders.rangeQuery("timeUpdated").gte("2014/0" + randomIntBetween(1, 7) + "/01")) - ).addSort(new FieldSortBuilder("timeUpdated").order(SortOrder.ASC).unmappedType("date")).setSize(docs).get(); - assertNoFailures(allDocsResponse); - - final int numiters = randomIntBetween(1, 20); - for (int i = 0; i < numiters; i++) { - SearchResponse searchResponse = prepareSearch().setQuery( + final int finalDocs = docs; + assertNoFailuresAndResponse( + prepareSearch().setQuery( QueryBuilders.boolQuery() .must(QueryBuilders.termQuery("foo", "bar")) - .must(QueryBuilders.rangeQuery("timeUpdated").gte("2014/" + Strings.format("%02d", randomIntBetween(1, 7)) + "/01")) - ) - .addSort(new FieldSortBuilder("timeUpdated").order(SortOrder.ASC).unmappedType("date")) - .setSize(scaledRandomIntBetween(1, docs)) - .get(); - assertNoFailures(searchResponse); - for (int j = 0; j < searchResponse.getHits().getHits().length; j++) { - assertThat( - searchResponse.toString() + "\n vs. \n" + allDocsResponse.toString(), - searchResponse.getHits().getHits()[j].getId(), - equalTo(allDocsResponse.getHits().getHits()[j].getId()) - ); + .must(QueryBuilders.rangeQuery("timeUpdated").gte("2014/0" + randomIntBetween(1, 7) + "/01")) + ).addSort(new FieldSortBuilder("timeUpdated").order(SortOrder.ASC).unmappedType("date")).setSize(docs), + allDocsResponse -> { + final int numiters = randomIntBetween(1, 20); + for (int i = 0; i < numiters; i++) { + assertNoFailuresAndResponse( + prepareSearch().setQuery( + QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery("foo", "bar")) + .must( + QueryBuilders.rangeQuery("timeUpdated") + .gte("2014/" + Strings.format("%02d", randomIntBetween(1, 7)) + "/01") + ) + ) + .addSort(new FieldSortBuilder("timeUpdated").order(SortOrder.ASC).unmappedType("date")) + .setSize(scaledRandomIntBetween(1, finalDocs)), + response -> { + for (int j = 0; j < response.getHits().getHits().length; j++) { + assertThat( + response.toString() + "\n vs. \n" + allDocsResponse.toString(), + response.getHits().getHits()[j].getId(), + equalTo(allDocsResponse.getHits().getHits()[j].getId()) + ); + } + } + ); + } } - } - + ); } public void testTrackScores() throws Exception { @@ -214,20 +225,19 @@ public void testTrackScores() throws Exception { ); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()).addSort("svalue", SortOrder.ASC).get(); - - assertThat(searchResponse.getHits().getMaxScore(), equalTo(Float.NaN)); - for (SearchHit hit : searchResponse.getHits()) { - assertThat(hit.getScore(), equalTo(Float.NaN)); - } - + assertResponse(prepareSearch().setQuery(matchAllQuery()).addSort("svalue", SortOrder.ASC), response -> { + assertThat(response.getHits().getMaxScore(), equalTo(Float.NaN)); + for (SearchHit hit : response.getHits()) { + assertThat(hit.getScore(), equalTo(Float.NaN)); + } + }); // now check with score tracking - searchResponse = prepareSearch().setQuery(matchAllQuery()).addSort("svalue", SortOrder.ASC).setTrackScores(true).get(); - - assertThat(searchResponse.getHits().getMaxScore(), not(equalTo(Float.NaN))); - for (SearchHit hit : searchResponse.getHits()) { - assertThat(hit.getScore(), not(equalTo(Float.NaN))); - } + assertResponse(prepareSearch().setQuery(matchAllQuery()).addSort("svalue", SortOrder.ASC).setTrackScores(true), response -> { + assertThat(response.getHits().getMaxScore(), not(equalTo(Float.NaN))); + for (SearchHit hit : response.getHits()) { + assertThat(hit.getScore(), not(equalTo(Float.NaN))); + } + }); } public void testRandomSorting() throws IOException, InterruptedException, ExecutionException { @@ -273,40 +283,42 @@ public void testRandomSorting() throws IOException, InterruptedException, Execut indexRandom(true, builders); { int size = between(1, denseBytes.size()); - SearchResponse searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .setSize(size) - .addSort("dense_bytes", SortOrder.ASC) - .get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) numDocs)); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - Set> entrySet = denseBytes.entrySet(); - Iterator> iterator = entrySet.iterator(); - for (int i = 0; i < size; i++) { - assertThat(iterator.hasNext(), equalTo(true)); - Entry next = iterator.next(); - assertThat("pos: " + i, searchResponse.getHits().getAt(i).getId(), equalTo(next.getValue())); - assertThat(searchResponse.getHits().getAt(i).getSortValues()[0].toString(), equalTo(next.getKey().utf8ToString())); - } + assertNoFailuresAndResponse( + prepareSearch("test").setQuery(matchAllQuery()).setSize(size).addSort("dense_bytes", SortOrder.ASC), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo((long) numDocs)); + assertThat(response.getHits().getHits().length, equalTo(size)); + Set> entrySet = denseBytes.entrySet(); + Iterator> iterator = entrySet.iterator(); + for (int i = 0; i < size; i++) { + assertThat(iterator.hasNext(), equalTo(true)); + Entry next = iterator.next(); + assertThat("pos: " + i, response.getHits().getAt(i).getId(), equalTo(next.getValue())); + assertThat(response.getHits().getAt(i).getSortValues()[0].toString(), equalTo(next.getKey().utf8ToString())); + } + } + ); } if (sparseBytes.isEmpty() == false) { int size = between(1, sparseBytes.size()); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .setPostFilter(QueryBuilders.existsQuery("sparse_bytes")) - .setSize(size) - .addSort("sparse_bytes", SortOrder.ASC) - .get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) sparseBytes.size())); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - Set> entrySet = sparseBytes.entrySet(); - Iterator> iterator = entrySet.iterator(); - for (int i = 0; i < size; i++) { - assertThat(iterator.hasNext(), equalTo(true)); - Entry next = iterator.next(); - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(next.getValue())); - assertThat(searchResponse.getHits().getAt(i).getSortValues()[0].toString(), equalTo(next.getKey().utf8ToString())); - } + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()) + .setPostFilter(QueryBuilders.existsQuery("sparse_bytes")) + .setSize(size) + .addSort("sparse_bytes", SortOrder.ASC), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo((long) sparseBytes.size())); + assertThat(response.getHits().getHits().length, equalTo(size)); + Set> entrySet = sparseBytes.entrySet(); + Iterator> iterator = entrySet.iterator(); + for (int i = 0; i < size; i++) { + assertThat(iterator.hasNext(), equalTo(true)); + Entry next = iterator.next(); + assertThat(response.getHits().getAt(i).getId(), equalTo(next.getValue())); + assertThat(response.getHits().getAt(i).getSortValues()[0].toString(), equalTo(next.getKey().utf8ToString())); + } + } + ); } } @@ -318,53 +330,59 @@ public void test3078() { client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", Integer.toString(i)).get(); } refresh(); - SearchResponse searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)) - .get(); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getSortValues()[0].toString(), equalTo("10")); - assertThat(searchResponse.getHits().getAt(2).getSortValues()[0].toString(), equalTo("100")); - + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)), + response -> { + assertThat(response.getHits().getAt(0).getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getAt(1).getSortValues()[0].toString(), equalTo("10")); + assertThat(response.getHits().getAt(2).getSortValues()[0].toString(), equalTo("100")); + } + ); // reindex and refresh client().prepareIndex("test").setId(Integer.toString(1)).setSource("field", Integer.toString(1)).get(); refresh(); - searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)) - .get(); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getSortValues()[0].toString(), equalTo("10")); - assertThat(searchResponse.getHits().getAt(2).getSortValues()[0].toString(), equalTo("100")); - + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)), + response -> { + assertThat(response.getHits().getAt(0).getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getAt(1).getSortValues()[0].toString(), equalTo("10")); + assertThat(response.getHits().getAt(2).getSortValues()[0].toString(), equalTo("100")); + } + ); // reindex - no refresh client().prepareIndex("test").setId(Integer.toString(1)).setSource("field", Integer.toString(1)).get(); - searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)) - .get(); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getSortValues()[0].toString(), equalTo("10")); - assertThat(searchResponse.getHits().getAt(2).getSortValues()[0].toString(), equalTo("100")); - + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)), + response -> { + assertThat(response.getHits().getAt(0).getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getAt(1).getSortValues()[0].toString(), equalTo("10")); + assertThat(response.getHits().getAt(2).getSortValues()[0].toString(), equalTo("100")); + } + ); // force merge forceMerge(); refresh(); client().prepareIndex("test").setId(Integer.toString(1)).setSource("field", Integer.toString(1)).get(); - searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)) - .get(); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getSortValues()[0].toString(), equalTo("10")); - assertThat(searchResponse.getHits().getAt(2).getSortValues()[0].toString(), equalTo("100")); - + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)), + response -> { + assertThat(response.getHits().getAt(0).getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getAt(1).getSortValues()[0].toString(), equalTo("10")); + assertThat(response.getHits().getAt(2).getSortValues()[0].toString(), equalTo("100")); + } + ); refresh(); - searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)) - .get(); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getSortValues()[0].toString(), equalTo("10")); - assertThat(searchResponse.getHits().getAt(2).getSortValues()[0].toString(), equalTo("100")); + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)), + response -> { + assertThat(response.getHits().getAt(0).getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getAt(1).getSortValues()[0].toString(), equalTo("10")); + assertThat(response.getHits().getAt(2).getSortValues()[0].toString(), equalTo("100")); + } + ); } public void testScoreSortDirection() throws Exception { @@ -377,30 +395,40 @@ public void testScoreSortDirection() throws Exception { refresh(); - SearchResponse searchResponse = prepareSearch("test").setQuery( - QueryBuilders.functionScoreQuery(matchAllQuery(), ScoreFunctionBuilders.fieldValueFactorFunction("field")) - ).get(); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getScore(), Matchers.lessThan(searchResponse.getHits().getAt(0).getScore())); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2")); - assertThat(searchResponse.getHits().getAt(2).getScore(), Matchers.lessThan(searchResponse.getHits().getAt(1).getScore())); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); - - searchResponse = prepareSearch("test").setQuery( - QueryBuilders.functionScoreQuery(matchAllQuery(), ScoreFunctionBuilders.fieldValueFactorFunction("field")) - ).addSort("_score", SortOrder.DESC).get(); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getScore(), Matchers.lessThan(searchResponse.getHits().getAt(0).getScore())); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2")); - assertThat(searchResponse.getHits().getAt(2).getScore(), Matchers.lessThan(searchResponse.getHits().getAt(1).getScore())); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); - - searchResponse = prepareSearch("test").setQuery( - QueryBuilders.functionScoreQuery(matchAllQuery(), ScoreFunctionBuilders.fieldValueFactorFunction("field")) - ).addSort("_score", SortOrder.DESC).get(); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2")); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); + assertResponse( + prepareSearch("test").setQuery( + QueryBuilders.functionScoreQuery(matchAllQuery(), ScoreFunctionBuilders.fieldValueFactorFunction("field")) + ), + response -> { + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getScore(), Matchers.lessThan(response.getHits().getAt(0).getScore())); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(2).getScore(), Matchers.lessThan(response.getHits().getAt(1).getScore())); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + QueryBuilders.functionScoreQuery(matchAllQuery(), ScoreFunctionBuilders.fieldValueFactorFunction("field")) + ).addSort("_score", SortOrder.DESC), + response -> { + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getScore(), Matchers.lessThan(response.getHits().getAt(0).getScore())); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(2).getScore(), Matchers.lessThan(response.getHits().getAt(1).getScore())); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + QueryBuilders.functionScoreQuery(matchAllQuery(), ScoreFunctionBuilders.fieldValueFactorFunction("field")) + ).addSort("_score", SortOrder.DESC), + response -> { + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + } + ); } public void testScoreSortDirectionWithFunctionScore() throws Exception { @@ -413,30 +441,33 @@ public void testScoreSortDirectionWithFunctionScore() throws Exception { refresh(); - SearchResponse searchResponse = prepareSearch("test").setQuery( - functionScoreQuery(matchAllQuery(), fieldValueFactorFunction("field")) - ).get(); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getScore(), Matchers.lessThan(searchResponse.getHits().getAt(0).getScore())); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2")); - assertThat(searchResponse.getHits().getAt(2).getScore(), Matchers.lessThan(searchResponse.getHits().getAt(1).getScore())); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); - - searchResponse = prepareSearch("test").setQuery(functionScoreQuery(matchAllQuery(), fieldValueFactorFunction("field"))) - .addSort("_score", SortOrder.DESC) - .get(); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getScore(), Matchers.lessThan(searchResponse.getHits().getAt(0).getScore())); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2")); - assertThat(searchResponse.getHits().getAt(2).getScore(), Matchers.lessThan(searchResponse.getHits().getAt(1).getScore())); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); - - searchResponse = prepareSearch("test").setQuery(functionScoreQuery(matchAllQuery(), fieldValueFactorFunction("field"))) - .addSort("_score", SortOrder.DESC) - .get(); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2")); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); + assertResponse(prepareSearch("test").setQuery(functionScoreQuery(matchAllQuery(), fieldValueFactorFunction("field"))), response -> { + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getScore(), Matchers.lessThan(response.getHits().getAt(0).getScore())); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(2).getScore(), Matchers.lessThan(response.getHits().getAt(1).getScore())); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + }); + assertResponse( + prepareSearch("test").setQuery(functionScoreQuery(matchAllQuery(), fieldValueFactorFunction("field"))) + .addSort("_score", SortOrder.DESC), + response -> { + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getScore(), Matchers.lessThan(response.getHits().getAt(0).getScore())); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(2).getScore(), Matchers.lessThan(response.getHits().getAt(1).getScore())); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + } + ); + assertResponse( + prepareSearch("test").setQuery(functionScoreQuery(matchAllQuery(), fieldValueFactorFunction("field"))) + .addSort("_score", SortOrder.DESC), + response -> { + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + } + ); } public void testIssue2986() { @@ -446,11 +477,11 @@ public void testIssue2986() { client().prepareIndex("test").setId("2").setSource("{\"field1\":\"value2\"}", XContentType.JSON).get(); client().prepareIndex("test").setId("3").setSource("{\"field1\":\"value3\"}", XContentType.JSON).get(); refresh(); - SearchResponse result = prepareSearch("test").setQuery(matchAllQuery()).setTrackScores(true).addSort("field1", SortOrder.ASC).get(); - - for (SearchHit hit : result.getHits()) { - assertFalse(Float.isNaN(hit.getScore())); - } + assertResponse(prepareSearch("test").setQuery(matchAllQuery()).setTrackScores(true).addSort("field1", SortOrder.ASC), response -> { + for (SearchHit hit : response.getHits()) { + assertFalse(Float.isNaN(hit.getScore())); + } + }); } public void testIssue2991() { @@ -474,23 +505,24 @@ public void testIssue2991() { client().prepareIndex("test").setId("2").setSource("tag", "beta").get(); refresh(); - SearchResponse resp = prepareSearch("test").setSize(2) - .setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("tag").order(SortOrder.ASC)) - .get(); - assertHitCount(resp, 4); - assertThat(resp.getHits().getHits().length, equalTo(2)); - assertFirstHit(resp, hasId("1")); - assertSecondHit(resp, hasId("2")); - - resp = prepareSearch("test").setSize(2) - .setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("tag").order(SortOrder.DESC)) - .get(); - assertHitCount(resp, 4); - assertThat(resp.getHits().getHits().length, equalTo(2)); - assertFirstHit(resp, hasId("3")); - assertSecondHit(resp, hasId("4")); + assertResponse( + prepareSearch("test").setSize(2).setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("tag").order(SortOrder.ASC)), + response -> { + assertHitCount(response, 4); + assertThat(response.getHits().getHits().length, equalTo(2)); + assertFirstHit(response, hasId("1")); + assertSecondHit(response, hasId("2")); + } + ); + assertResponse( + prepareSearch("test").setSize(2).setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("tag").order(SortOrder.DESC)), + response -> { + assertHitCount(response, 4); + assertThat(response.getHits().getHits().length, equalTo(2)); + assertFirstHit(response, hasId("3")); + assertSecondHit(response, hasId("4")); + } + ); } } @@ -565,172 +597,190 @@ public void testSimpleSorts() throws Exception { refresh(); // STRING - int size = 1 + random.nextInt(10); - - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("str_value", SortOrder.ASC).get(); - assertHitCount(searchResponse, 10); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); - assertThat( - searchResponse.getHits().getAt(i).getSortValues()[0].toString(), - equalTo(new String(new char[] { (char) (97 + i), (char) (97 + i) })) - ); + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("str_value", SortOrder.ASC), response -> { + assertHitCount(response, 10); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + assertThat( + response.getHits().getAt(i).getSortValues()[0].toString(), + equalTo(new String(new char[] { (char) (97 + i), (char) (97 + i) })) + ); + } + }); } - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("str_value", SortOrder.DESC).get(); - - assertHitCount(searchResponse, 10); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); - assertThat( - searchResponse.getHits().getAt(i).getSortValues()[0].toString(), - equalTo(new String(new char[] { (char) (97 + (9 - i)), (char) (97 + (9 - i)) })) - ); + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("str_value", SortOrder.DESC), response -> { + assertHitCount(response, 10); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); + assertThat( + response.getHits().getAt(i).getSortValues()[0].toString(), + equalTo(new String(new char[] { (char) (97 + (9 - i)), (char) (97 + (9 - i)) })) + ); + } + assertThat(response.toString(), not(containsString("error"))); + }); } - - assertThat(searchResponse.toString(), not(containsString("error"))); - // BYTE - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("byte_value", SortOrder.ASC).get(); - - assertHitCount(searchResponse, 10); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); - assertThat(((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).byteValue(), equalTo((byte) i)); + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("byte_value", SortOrder.ASC), response -> { + assertHitCount(response, 10); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + assertThat(((Number) response.getHits().getAt(i).getSortValues()[0]).byteValue(), equalTo((byte) i)); + } + }); } - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("byte_value", SortOrder.DESC).get(); - - assertHitCount(searchResponse, 10); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); - assertThat(((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).byteValue(), equalTo((byte) (9 - i))); + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("byte_value", SortOrder.DESC), response -> { + assertHitCount(response, 10); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); + assertThat(((Number) response.getHits().getAt(i).getSortValues()[0]).byteValue(), equalTo((byte) (9 - i))); + } + assertThat(response.toString(), not(containsString("error"))); + }); } - - assertThat(searchResponse.toString(), not(containsString("error"))); - // SHORT - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("short_value", SortOrder.ASC).get(); - - assertHitCount(searchResponse, 10); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); - assertThat(((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).shortValue(), equalTo((short) i)); + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("short_value", SortOrder.ASC), response -> { + assertHitCount(response, 10); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + assertThat(((Number) response.getHits().getAt(i).getSortValues()[0]).shortValue(), equalTo((short) i)); + } + }); } - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("short_value", SortOrder.DESC).get(); - - assertHitCount(searchResponse, 10); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); - assertThat(((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).shortValue(), equalTo((short) (9 - i))); + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("short_value", SortOrder.DESC), response -> { + assertHitCount(response, 10); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); + assertThat(((Number) response.getHits().getAt(i).getSortValues()[0]).shortValue(), equalTo((short) (9 - i))); + } + assertThat(response.toString(), not(containsString("error"))); + }); } - - assertThat(searchResponse.toString(), not(containsString("error"))); - // INTEGER - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("integer_value", SortOrder.ASC).get(); - - assertHitCount(searchResponse, 10); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); - assertThat(((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).intValue(), equalTo(i)); + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("integer_value", SortOrder.ASC), response -> { + assertHitCount(response, 10); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + assertThat(((Number) response.getHits().getAt(i).getSortValues()[0]).intValue(), equalTo(i)); + } + assertThat(response.toString(), not(containsString("error"))); + }); } + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("integer_value", SortOrder.DESC), response -> { + assertHitCount(response, 10); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); + assertThat(((Number) response.getHits().getAt(i).getSortValues()[0]).intValue(), equalTo((9 - i))); + } - assertThat(searchResponse.toString(), not(containsString("error"))); - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("integer_value", SortOrder.DESC).get(); - - assertHitCount(searchResponse, 10); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); - assertThat(((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).intValue(), equalTo((9 - i))); + assertThat(response.toString(), not(containsString("error"))); + }); } - - assertThat(searchResponse.toString(), not(containsString("error"))); - // LONG - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("long_value", SortOrder.ASC).get(); - - assertHitCount(searchResponse, 10); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); - assertThat(((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).longValue(), equalTo((long) i)); - } + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("long_value", SortOrder.ASC), response -> { + assertHitCount(response, 10); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + assertThat(((Number) response.getHits().getAt(i).getSortValues()[0]).longValue(), equalTo((long) i)); + } - assertThat(searchResponse.toString(), not(containsString("error"))); - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("long_value", SortOrder.DESC).get(); - assertHitCount(searchResponse, 10L); - assertHitCount(searchResponse, 10); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); - assertThat(((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).longValue(), equalTo((long) (9 - i))); + assertThat(response.toString(), not(containsString("error"))); + }); + } + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("long_value", SortOrder.DESC), response -> { + assertHitCount(response, 10L); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); + assertThat(((Number) response.getHits().getAt(i).getSortValues()[0]).longValue(), equalTo((long) (9 - i))); + } + assertThat(response.toString(), not(containsString("error"))); + }); } - - assertThat(searchResponse.toString(), not(containsString("error"))); - // FLOAT - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("float_value", SortOrder.ASC).get(); - - assertHitCount(searchResponse, 10L); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); - assertThat(((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).doubleValue(), closeTo(0.1d * i, 0.000001d)); + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("float_value", SortOrder.ASC), response -> { + assertHitCount(response, 10L); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + assertThat(((Number) response.getHits().getAt(i).getSortValues()[0]).doubleValue(), closeTo(0.1d * i, 0.000001d)); + } + assertThat(response.toString(), not(containsString("error"))); + }); } - - assertThat(searchResponse.toString(), not(containsString("error"))); - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("float_value", SortOrder.DESC).get(); - - assertHitCount(searchResponse, 10); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); - assertThat(((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).doubleValue(), closeTo(0.1d * (9 - i), 0.000001d)); + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("float_value", SortOrder.DESC), response -> { + assertHitCount(response, 10); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); + assertThat(((Number) response.getHits().getAt(i).getSortValues()[0]).doubleValue(), closeTo(0.1d * (9 - i), 0.000001d)); + } + assertThat(response.toString(), not(containsString("error"))); + }); } - - assertThat(searchResponse.toString(), not(containsString("error"))); - // DOUBLE - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("double_value", SortOrder.ASC).get(); - - assertHitCount(searchResponse, 10L); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); - assertThat(((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).doubleValue(), closeTo(0.1d * i, 0.000001d)); + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("double_value", SortOrder.ASC), response -> { + assertHitCount(response, 10L); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + assertThat(((Number) response.getHits().getAt(i).getSortValues()[0]).doubleValue(), closeTo(0.1d * i, 0.000001d)); + } + assertThat(response.toString(), not(containsString("error"))); + }); } - - assertThat(searchResponse.toString(), not(containsString("error"))); - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("double_value", SortOrder.DESC).get(); - - assertHitCount(searchResponse, 10L); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); - assertThat(((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).doubleValue(), closeTo(0.1d * (9 - i), 0.000001d)); + { + int size = 1 + random.nextInt(10); + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("double_value", SortOrder.DESC), + response -> { + assertHitCount(response, 10L); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); + assertThat( + ((Number) response.getHits().getAt(i).getSortValues()[0]).doubleValue(), + closeTo(0.1d * (9 - i), 0.000001d) + ); + } + } + ); } - - assertNoFailures(searchResponse); } public void testSortMissingNumbers() throws Exception { @@ -768,37 +818,35 @@ public void testSortMissingNumbers() throws Exception { refresh(); logger.info("--> sort with no missing (same as missing _last)"); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("i_value").order(SortOrder.ASC)) - .get(); - assertNoFailures(searchResponse); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("3")); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("2")); - + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("i_value").order(SortOrder.ASC)), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("3")); + assertThat(response.getHits().getAt(2).getId(), equalTo("2")); + } + ); logger.info("--> sort with missing _last"); - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("i_value").order(SortOrder.ASC).missing("_last")) - .get(); - assertNoFailures(searchResponse); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("3")); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("2")); - + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("i_value").order(SortOrder.ASC).missing("_last")), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("3")); + assertThat(response.getHits().getAt(2).getId(), equalTo("2")); + } + ); logger.info("--> sort with missing _first"); - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("i_value").order(SortOrder.ASC).missing("_first")) - .get(); - assertNoFailures(searchResponse); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("2")); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("i_value").order(SortOrder.ASC).missing("_first")), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("2")); + assertThat(response.getHits().getAt(1).getId(), equalTo("1")); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + } + ); } public void testSortMissingStrings() throws IOException { @@ -840,48 +888,53 @@ public void testSortMissingStrings() throws IOException { } logger.info("--> sort with no missing (same as missing _last)"); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC)) - .get(); - assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0)); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("3")); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("2")); - + assertResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC)), + response -> { + assertThat(Arrays.toString(response.getShardFailures()), response.getFailedShards(), equalTo(0)); + + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("3")); + assertThat(response.getHits().getAt(2).getId(), equalTo("2")); + } + ); logger.info("--> sort with missing _last"); - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC).missing("_last")) - .get(); - assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0)); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("3")); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("2")); - + assertResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC).missing("_last")), + response -> { + assertThat(Arrays.toString(response.getShardFailures()), response.getFailedShards(), equalTo(0)); + + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("3")); + assertThat(response.getHits().getAt(2).getId(), equalTo("2")); + } + ); logger.info("--> sort with missing _first"); - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC).missing("_first")) - .get(); - assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0)); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("2")); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); - + assertResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC).missing("_first")), + response -> { + assertThat(Arrays.toString(response.getShardFailures()), response.getFailedShards(), equalTo(0)); + + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("2")); + assertThat(response.getHits().getAt(1).getId(), equalTo("1")); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + } + ); logger.info("--> sort with missing b"); - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC).missing("b")) - .get(); - assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0)); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2")); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC).missing("b")), + response -> { + assertThat(Arrays.toString(response.getShardFailures()), response.getFailedShards(), equalTo(0)); + + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + } + ); } public void testSortMissingDates() throws IOException { @@ -914,24 +967,27 @@ public void testSortMissingDates() throws IOException { format = type.equals("date") ? "strict_date_optional_time" : "strict_date_optional_time_nanos"; } - SearchResponse searchResponse = prepareSearch(index).addSort( - SortBuilders.fieldSort("mydate").order(SortOrder.ASC).setFormat(format) - ).get(); - assertHitsInOrder(searchResponse, new String[] { "1", "2", "3" }); + assertResponse( + prepareSearch(index).addSort(SortBuilders.fieldSort("mydate").order(SortOrder.ASC).setFormat(format)), + response -> assertHitsInOrder(response, new String[] { "1", "2", "3" }) + ); - searchResponse = prepareSearch(index).addSort( - SortBuilders.fieldSort("mydate").order(SortOrder.ASC).missing("_first").setFormat(format) - ).get(); - assertHitsInOrder(searchResponse, new String[] { "3", "1", "2" }); + assertResponse( + prepareSearch(index).addSort(SortBuilders.fieldSort("mydate").order(SortOrder.ASC).missing("_first").setFormat(format)), + response -> assertHitsInOrder(response, new String[] { "3", "1", "2" }) + ); - searchResponse = prepareSearch(index).addSort(SortBuilders.fieldSort("mydate").order(SortOrder.DESC).setFormat(format)) - .get(); - assertHitsInOrder(searchResponse, new String[] { "2", "1", "3" }); + assertResponse( + prepareSearch(index).addSort(SortBuilders.fieldSort("mydate").order(SortOrder.DESC).setFormat(format)), + response -> assertHitsInOrder(response, new String[] { "2", "1", "3" }) + ); - searchResponse = prepareSearch(index).addSort( - SortBuilders.fieldSort("mydate").order(SortOrder.DESC).missing("_first").setFormat(format) - ).get(); - assertHitsInOrder(searchResponse, new String[] { "3", "2", "1" }); + assertResponse( + prepareSearch(index).addSort( + SortBuilders.fieldSort("mydate").order(SortOrder.DESC).missing("_first").setFormat(format) + ), + response -> assertHitsInOrder(response, new String[] { "3", "2", "1" }) + ); } } } @@ -975,25 +1031,33 @@ public void testSortMissingDatesMixedTypes() throws IOException { } String index = "test*"; - SearchResponse searchResponse = prepareSearch(index).addSort( - SortBuilders.fieldSort("mydate").order(SortOrder.ASC).setFormat(format).setNumericType("date_nanos") - ).addSort(SortBuilders.fieldSort("other_field").order(SortOrder.ASC)).get(); - assertHitsInOrder(searchResponse, new String[] { "1", "2", "4", "5", "3", "6" }); - - searchResponse = prepareSearch(index).addSort( - SortBuilders.fieldSort("mydate").order(SortOrder.ASC).missing("_first").setFormat(format).setNumericType("date_nanos") - ).addSort(SortBuilders.fieldSort("other_field").order(SortOrder.ASC)).get(); - assertHitsInOrder(searchResponse, new String[] { "3", "6", "1", "2", "4", "5" }); - - searchResponse = prepareSearch(index).addSort( - SortBuilders.fieldSort("mydate").order(SortOrder.DESC).setFormat(format).setNumericType("date_nanos") - ).addSort(SortBuilders.fieldSort("other_field").order(SortOrder.ASC)).get(); - assertHitsInOrder(searchResponse, new String[] { "5", "4", "2", "1", "3", "6" }); - - searchResponse = prepareSearch(index).addSort( - SortBuilders.fieldSort("mydate").order(SortOrder.DESC).missing("_first").setFormat(format).setNumericType("date_nanos") - ).addSort(SortBuilders.fieldSort("other_field").order(SortOrder.ASC)).get(); - assertHitsInOrder(searchResponse, new String[] { "3", "6", "5", "4", "2", "1" }); + assertResponse( + prepareSearch(index).addSort( + SortBuilders.fieldSort("mydate").order(SortOrder.ASC).setFormat(format).setNumericType("date_nanos") + ).addSort(SortBuilders.fieldSort("other_field").order(SortOrder.ASC)), + response -> assertHitsInOrder(response, new String[] { "1", "2", "4", "5", "3", "6" }) + ); + + assertResponse( + prepareSearch(index).addSort( + SortBuilders.fieldSort("mydate").order(SortOrder.ASC).missing("_first").setFormat(format).setNumericType("date_nanos") + ).addSort(SortBuilders.fieldSort("other_field").order(SortOrder.ASC)), + response -> assertHitsInOrder(response, new String[] { "3", "6", "1", "2", "4", "5" }) + ); + + assertResponse( + prepareSearch(index).addSort( + SortBuilders.fieldSort("mydate").order(SortOrder.DESC).setFormat(format).setNumericType("date_nanos") + ).addSort(SortBuilders.fieldSort("other_field").order(SortOrder.ASC)), + response -> assertHitsInOrder(response, new String[] { "5", "4", "2", "1", "3", "6" }) + ); + + assertResponse( + prepareSearch(index).addSort( + SortBuilders.fieldSort("mydate").order(SortOrder.DESC).missing("_first").setFormat(format).setNumericType("date_nanos") + ).addSort(SortBuilders.fieldSort("other_field").order(SortOrder.ASC)), + response -> assertHitsInOrder(response, new String[] { "3", "6", "5", "4", "2", "1" }) + ); } } @@ -1017,8 +1081,10 @@ public void testIgnoreUnmapped() throws Exception { logger.info("--> sort with an unmapped field, verify it fails"); try { - SearchResponse result = prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("kkk")).get(); - assertThat("Expected exception but returned with", result, nullValue()); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("kkk")), + response -> assertThat("Expected exception but returned with", response, nullValue()) + ); } catch (SearchPhaseExecutionException e) { // we check that it's a parse failure rather than a different shard failure for (ShardSearchFailure shardSearchFailure : e.shardFailures()) { @@ -1129,252 +1195,243 @@ public void testSortMVField() throws Exception { refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("long_values", SortOrder.ASC).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).longValue(), equalTo(-4L)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).longValue(), equalTo(1L)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).longValue(), equalTo(7L)); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("long_values", SortOrder.ASC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("long_values", SortOrder.DESC).get(); + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).longValue(), equalTo(-4L)); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).longValue(), equalTo(20L)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).longValue(), equalTo(10L)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).longValue(), equalTo(3L)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .setSize(10) - .addSort(SortBuilders.fieldSort("long_values").order(SortOrder.DESC).sortMode(SortMode.SUM)) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).longValue(), equalTo(53L)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).longValue(), equalTo(24L)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).longValue(), equalTo(2L)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .setSize(10) - .addSort(SortBuilders.fieldSort("long_values").order(SortOrder.DESC).sortMode(SortMode.AVG)) - .get(); + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).longValue(), equalTo(1L)); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).longValue(), equalTo(7L)); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("long_values", SortOrder.DESC), response -> { - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).longValue(), equalTo(13L)); + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).longValue(), equalTo(6L)); + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).longValue(), equalTo(20L)); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).longValue(), equalTo(1L)); + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).longValue(), equalTo(10L)); - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .setSize(10) - .addSort(SortBuilders.fieldSort("long_values").order(SortOrder.DESC).sortMode(SortMode.MEDIAN)) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).longValue(), equalTo(13L)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).longValue(), equalTo(7L)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).longValue(), equalTo(2L)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("int_values", SortOrder.ASC).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(-4)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(1)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(7)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("int_values", SortOrder.DESC).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(20)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(10)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(3)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("short_values", SortOrder.ASC).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(-4)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(1)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(7)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("short_values", SortOrder.DESC).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(20)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(10)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(3)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("byte_values", SortOrder.ASC).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(-4)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(1)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(7)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("byte_values", SortOrder.DESC).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(20)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(10)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(3)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("float_values", SortOrder.ASC).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).floatValue(), equalTo(-4f)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).floatValue(), equalTo(1f)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).floatValue(), equalTo(7f)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("float_values", SortOrder.DESC).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).floatValue(), equalTo(20f)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).floatValue(), equalTo(10f)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).floatValue(), equalTo(3f)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("double_values", SortOrder.ASC).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), equalTo(-4d)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), equalTo(1d)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), equalTo(7d)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("double_values", SortOrder.DESC).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), equalTo(20d)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), equalTo(10d)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), equalTo(3d)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("string_values", SortOrder.ASC).get(); + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).longValue(), equalTo(3L)); + }); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .setSize(10) + .addSort(SortBuilders.fieldSort("long_values").order(SortOrder.DESC).sortMode(SortMode.SUM)), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).longValue(), equalTo(53L)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[0], equalTo("!4")); + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).longValue(), equalTo(24L)); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(searchResponse.getHits().getAt(1).getSortValues()[0], equalTo("01")); + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).longValue(), equalTo(2L)); + } + ); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .setSize(10) + .addSort(SortBuilders.fieldSort("long_values").order(SortOrder.DESC).sortMode(SortMode.AVG)), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); - assertThat(searchResponse.getHits().getAt(2).getSortValues()[0], equalTo("07")); + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).longValue(), equalTo(13L)); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("string_values", SortOrder.DESC).get(); + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).longValue(), equalTo(6L)); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).longValue(), equalTo(1L)); + } + ); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .setSize(10) + .addSort(SortBuilders.fieldSort("long_values").order(SortOrder.DESC).sortMode(SortMode.MEDIAN)), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[0], equalTo("20")); + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).longValue(), equalTo(13L)); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(searchResponse.getHits().getAt(1).getSortValues()[0], equalTo("10")); + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).longValue(), equalTo(7L)); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(searchResponse.getHits().getAt(2).getSortValues()[0], equalTo("03")); + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).longValue(), equalTo(2L)); + } + ); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("int_values", SortOrder.ASC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(-4)); + + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(1)); + + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(7)); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("int_values", SortOrder.DESC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(20)); + + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(10)); + + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(3)); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("short_values", SortOrder.ASC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(-4)); + + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(1)); + + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(7)); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("short_values", SortOrder.DESC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(20)); + + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(10)); + + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(3)); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("byte_values", SortOrder.ASC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(-4)); + + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(1)); + + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(7)); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("byte_values", SortOrder.DESC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(20)); + + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(10)); + + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(3)); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("float_values", SortOrder.ASC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).floatValue(), equalTo(-4f)); + + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).floatValue(), equalTo(1f)); + + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).floatValue(), equalTo(7f)); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("float_values", SortOrder.DESC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).floatValue(), equalTo(20f)); + + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).floatValue(), equalTo(10f)); + + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).floatValue(), equalTo(3f)); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("double_values", SortOrder.ASC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), equalTo(-4d)); + + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), equalTo(1d)); + + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), equalTo(7d)); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("double_values", SortOrder.DESC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), equalTo(20d)); + + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), equalTo(10d)); + + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), equalTo(3d)); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("string_values", SortOrder.ASC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); + assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo("!4")); + + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(response.getHits().getAt(1).getSortValues()[0], equalTo("01")); + + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); + assertThat(response.getHits().getAt(2).getSortValues()[0], equalTo("07")); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("string_values", SortOrder.DESC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo("20")); + + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(response.getHits().getAt(1).getSortValues()[0], equalTo("10")); + + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(response.getHits().getAt(2).getSortValues()[0], equalTo("03")); + }); } public void testSortOnRareField() throws IOException { @@ -1399,13 +1456,12 @@ public void testSortOnRareField() throws IOException { .get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(3).addSort("string_values", SortOrder.DESC).get(); - - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(1))); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[0], equalTo("10")); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(3).addSort("string_values", SortOrder.DESC), response -> { + assertThat(response.getHits().getHits().length, equalTo(1)); + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(1))); + assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo("10")); + }); client().prepareIndex("test") .setId(Integer.toString(2)) .setSource(jsonBuilder().startObject().array("string_values", "11", "15", "20", "07").endObject()) @@ -1418,16 +1474,16 @@ public void testSortOnRareField() throws IOException { } refresh(); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(2).addSort("string_values", SortOrder.DESC).get(); - - assertThat(searchResponse.getHits().getHits().length, equalTo(2)); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(2).addSort("string_values", SortOrder.DESC), response -> { - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[0], equalTo("20")); + assertThat(response.getHits().getHits().length, equalTo(2)); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(searchResponse.getHits().getAt(1).getSortValues()[0], equalTo("10")); + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo("20")); + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(response.getHits().getAt(1).getSortValues()[0], equalTo("10")); + }); client().prepareIndex("test") .setId(Integer.toString(3)) .setSource(jsonBuilder().startObject().array("string_values", "02", "01", "03", "!4").endObject()) @@ -1440,19 +1496,19 @@ public void testSortOnRareField() throws IOException { } refresh(); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(3).addSort("string_values", SortOrder.DESC).get(); - - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(3).addSort("string_values", SortOrder.DESC), response -> { - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[0], equalTo("20")); + assertThat(response.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(searchResponse.getHits().getAt(1).getSortValues()[0], equalTo("10")); + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo("20")); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(searchResponse.getHits().getAt(2).getSortValues()[0], equalTo("03")); + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(response.getHits().getAt(1).getSortValues()[0], equalTo("10")); + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(response.getHits().getAt(2).getSortValues()[0], equalTo("03")); + }); for (int i = 0; i < 15; i++) { client().prepareIndex("test") .setId(Integer.toString(300 + i)) @@ -1461,18 +1517,19 @@ public void testSortOnRareField() throws IOException { refresh(); } - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(3).addSort("string_values", SortOrder.DESC).get(); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(3).addSort("string_values", SortOrder.DESC), response -> { - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[0], equalTo("20")); + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo("20")); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(searchResponse.getHits().getAt(1).getSortValues()[0], equalTo("10")); + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(response.getHits().getAt(1).getSortValues()[0], equalTo("10")); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(searchResponse.getHits().getAt(2).getSortValues()[0], equalTo("03")); + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(response.getHits().getAt(2).getSortValues()[0], equalTo("03")); + }); } public void testSortMetaField() throws Exception { @@ -1488,20 +1545,20 @@ public void testSortMetaField() throws Exception { indexRandom(true, indexReqs); SortOrder order = randomFrom(SortOrder.values()); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .setSize(randomIntBetween(1, numDocs + 5)) - .addSort("_id", order) - .get(); - assertNoFailures(searchResponse); - SearchHit[] hits = searchResponse.getHits().getHits(); - BytesRef previous = order == SortOrder.ASC ? new BytesRef() : UnicodeUtil.BIG_TERM; - for (int i = 0; i < hits.length; ++i) { - String idString = hits[i].getId(); - final BytesRef id = new BytesRef(idString); - assertEquals(idString, hits[i].getSortValues()[0]); - assertThat(previous, order == SortOrder.ASC ? lessThan(id) : greaterThan(id)); - previous = id; - } + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()).setSize(randomIntBetween(1, numDocs + 5)).addSort("_id", order), + response -> { + SearchHit[] hits = response.getHits().getHits(); + BytesRef previous = order == SortOrder.ASC ? new BytesRef() : UnicodeUtil.BIG_TERM; + for (int i = 0; i < hits.length; ++i) { + String idString = hits[i].getId(); + final BytesRef id = new BytesRef(idString); + assertEquals(idString, hits[i].getSortValues()[0]); + assertThat(previous, order == SortOrder.ASC ? lessThan(id) : greaterThan(id)); + previous = id; + } + } + ); // assertWarnings(ID_FIELD_DATA_DEPRECATION_MESSAGE); } finally { // unset cluster setting @@ -1588,59 +1645,64 @@ public void testNestedSort() throws IOException, InterruptedException, Execution // We sort on nested field - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("nested.foo").setNestedSort(new NestedSortBuilder("nested")).order(SortOrder.DESC)) - .get(); - assertNoFailures(searchResponse); - SearchHit[] hits = searchResponse.getHits().getHits(); - assertThat(hits.length, is(2)); - assertThat(hits[0].getSortValues().length, is(1)); - assertThat(hits[1].getSortValues().length, is(1)); - assertThat(hits[0].getSortValues()[0], is("cba")); - assertThat(hits[1].getSortValues()[0], is("bar")); - + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort(SortBuilders.fieldSort("nested.foo").setNestedSort(new NestedSortBuilder("nested")).order(SortOrder.DESC)), + response -> { + SearchHit[] hits = response.getHits().getHits(); + assertThat(hits.length, is(2)); + assertThat(hits[0].getSortValues().length, is(1)); + assertThat(hits[1].getSortValues().length, is(1)); + assertThat(hits[0].getSortValues()[0], is("cba")); + assertThat(hits[1].getSortValues()[0], is("bar")); + } + ); // We sort on nested fields with max_children limit - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("nested.foo").setNestedSort(new NestedSortBuilder("nested").setMaxChildren(1)).order(SortOrder.DESC) - ) - .get(); - assertNoFailures(searchResponse); - hits = searchResponse.getHits().getHits(); - assertThat(hits.length, is(2)); - assertThat(hits[0].getSortValues().length, is(1)); - assertThat(hits[1].getSortValues().length, is(1)); - assertThat(hits[0].getSortValues()[0], is("bar")); - assertThat(hits[1].getSortValues()[0], is("abc")); - - { - SearchPhaseExecutionException exc = expectThrows( - SearchPhaseExecutionException.class, - () -> prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("nested.bar.foo") - .setNestedSort( - new NestedSortBuilder("nested").setNestedSort(new NestedSortBuilder("nested.bar").setMaxChildren(1)) + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("nested.foo") + .setNestedSort(new NestedSortBuilder("nested").setMaxChildren(1)) + .order(SortOrder.DESC) + ), + response -> { + SearchHit[] hits = response.getHits().getHits(); + assertThat(hits.length, is(2)); + assertThat(hits[0].getSortValues().length, is(1)); + assertThat(hits[1].getSortValues().length, is(1)); + assertThat(hits[0].getSortValues()[0], is("bar")); + assertThat(hits[1].getSortValues()[0], is("abc")); + + { + SearchPhaseExecutionException exc = expectThrows( + SearchPhaseExecutionException.class, + () -> prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("nested.bar.foo") + .setNestedSort( + new NestedSortBuilder("nested").setNestedSort(new NestedSortBuilder("nested.bar").setMaxChildren(1)) + ) + .order(SortOrder.DESC) ) - .order(SortOrder.DESC) - ) - .get() - ); - assertThat(exc.toString(), containsString("max_children is only supported on top level of nested sort")); - } - + .get() + ); + assertThat(exc.toString(), containsString("max_children is only supported on top level of nested sort")); + } + } + ); // We sort on nested sub field - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("nested.foo.sub").setNestedSort(new NestedSortBuilder("nested")).order(SortOrder.DESC)) - .get(); - assertNoFailures(searchResponse); - hits = searchResponse.getHits().getHits(); - assertThat(hits.length, is(2)); - assertThat(hits[0].getSortValues().length, is(1)); - assertThat(hits[1].getSortValues().length, is(1)); - assertThat(hits[0].getSortValues()[0], is("cba bca")); - assertThat(hits[1].getSortValues()[0], is("bar bar")); - + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort(SortBuilders.fieldSort("nested.foo.sub").setNestedSort(new NestedSortBuilder("nested")).order(SortOrder.DESC)), + response -> { + SearchHit[] hits = response.getHits().getHits(); + assertThat(hits.length, is(2)); + assertThat(hits[0].getSortValues().length, is(1)); + assertThat(hits[1].getSortValues().length, is(1)); + assertThat(hits[0].getSortValues()[0], is("cba bca")); + assertThat(hits[1].getSortValues()[0], is("bar bar")); + } + ); // missing nested path SearchPhaseExecutionException exc = expectThrows( SearchPhaseExecutionException.class, @@ -1673,20 +1735,26 @@ public void testSortDuelBetweenSingleShardAndMultiShardIndex() throws Exception SortOrder order = randomBoolean() ? SortOrder.ASC : SortOrder.DESC; int from = between(0, 256); int size = between(0, 256); - SearchResponse multiShardResponse = prepareSearch("test1").setFrom(from).setSize(size).addSort(sortField, order).get(); - assertNoFailures(multiShardResponse); - SearchResponse singleShardResponse = prepareSearch("test2").setFrom(from).setSize(size).addSort(sortField, order).get(); - assertNoFailures(singleShardResponse); - - assertThat(multiShardResponse.getHits().getTotalHits().value, equalTo(singleShardResponse.getHits().getTotalHits().value)); - assertThat(multiShardResponse.getHits().getHits().length, equalTo(singleShardResponse.getHits().getHits().length)); - for (int i = 0; i < multiShardResponse.getHits().getHits().length; i++) { - assertThat( - multiShardResponse.getHits().getAt(i).getSortValues()[0], - equalTo(singleShardResponse.getHits().getAt(i).getSortValues()[0]) - ); - assertThat(multiShardResponse.getHits().getAt(i).getId(), equalTo(singleShardResponse.getHits().getAt(i).getId())); - } + assertNoFailuresAndResponse( + prepareSearch("test1").setFrom(from).setSize(size).addSort(sortField, order), + multiShardResponse -> assertNoFailuresAndResponse( + prepareSearch("test2").setFrom(from).setSize(size).addSort(sortField, order), + singleShardResponse -> { + assertThat( + multiShardResponse.getHits().getTotalHits().value, + equalTo(singleShardResponse.getHits().getTotalHits().value) + ); + assertThat(multiShardResponse.getHits().getHits().length, equalTo(singleShardResponse.getHits().getHits().length)); + for (int i = 0; i < multiShardResponse.getHits().getHits().length; i++) { + assertThat( + multiShardResponse.getHits().getAt(i).getSortValues()[0], + equalTo(singleShardResponse.getHits().getAt(i).getSortValues()[0]) + ); + assertThat(multiShardResponse.getHits().getAt(i).getId(), equalTo(singleShardResponse.getHits().getAt(i).getId())); + } + } + ) + ); } public void testCustomFormat() throws Exception { @@ -1700,17 +1768,19 @@ public void testCustomFormat() throws Exception { client().prepareIndex("test").setId("2").setSource("ip", "2001:db8::ff00:42:8329") ); - SearchResponse response = prepareSearch("test").addSort(SortBuilders.fieldSort("ip")).get(); - assertNoFailures(response); - assertEquals(2, response.getHits().getTotalHits().value); - assertArrayEquals(new String[] { "192.168.1.7" }, response.getHits().getAt(0).getSortValues()); - assertArrayEquals(new String[] { "2001:db8::ff00:42:8329" }, response.getHits().getAt(1).getSortValues()); - - response = prepareSearch("test").addSort(SortBuilders.fieldSort("ip")).searchAfter(new Object[] { "192.168.1.7" }).get(); - assertNoFailures(response); - assertEquals(2, response.getHits().getTotalHits().value); - assertEquals(1, response.getHits().getHits().length); - assertArrayEquals(new String[] { "2001:db8::ff00:42:8329" }, response.getHits().getAt(0).getSortValues()); + assertNoFailuresAndResponse(prepareSearch("test").addSort(SortBuilders.fieldSort("ip")), response -> { + assertEquals(2, response.getHits().getTotalHits().value); + assertArrayEquals(new String[] { "192.168.1.7" }, response.getHits().getAt(0).getSortValues()); + assertArrayEquals(new String[] { "2001:db8::ff00:42:8329" }, response.getHits().getAt(1).getSortValues()); + }); + assertNoFailuresAndResponse( + prepareSearch("test").addSort(SortBuilders.fieldSort("ip")).searchAfter(new Object[] { "192.168.1.7" }), + response -> { + assertEquals(2, response.getHits().getTotalHits().value); + assertEquals(1, response.getHits().getHits().length); + assertArrayEquals(new String[] { "2001:db8::ff00:42:8329" }, response.getHits().getAt(0).getSortValues()); + } + ); } public void testScriptFieldSort() throws Exception { @@ -1728,34 +1798,38 @@ public void testScriptFieldSort() throws Exception { { Script script = new Script(ScriptType.INLINE, NAME, "doc['number'].value", Collections.emptyMap()); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .setSize(randomIntBetween(1, numDocs + 5)) - .addSort(SortBuilders.scriptSort(script, ScriptSortBuilder.ScriptSortType.NUMBER)) - .addSort(SortBuilders.scoreSort()) - .get(); - - double expectedValue = 0; - for (SearchHit hit : searchResponse.getHits()) { - assertThat(hit.getSortValues().length, equalTo(2)); - assertThat(hit.getSortValues()[0], equalTo(expectedValue++)); - assertThat(hit.getSortValues()[1], equalTo(1f)); - } + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .setSize(randomIntBetween(1, numDocs + 5)) + .addSort(SortBuilders.scriptSort(script, ScriptSortBuilder.ScriptSortType.NUMBER)) + .addSort(SortBuilders.scoreSort()), + response -> { + double expectedValue = 0; + for (SearchHit hit : response.getHits()) { + assertThat(hit.getSortValues().length, equalTo(2)); + assertThat(hit.getSortValues()[0], equalTo(expectedValue++)); + assertThat(hit.getSortValues()[1], equalTo(1f)); + } + } + ); } { Script script = new Script(ScriptType.INLINE, NAME, "doc['keyword'].value", Collections.emptyMap()); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .setSize(randomIntBetween(1, numDocs + 5)) - .addSort(SortBuilders.scriptSort(script, ScriptSortBuilder.ScriptSortType.STRING)) - .addSort(SortBuilders.scoreSort()) - .get(); - - int expectedValue = 0; - for (SearchHit hit : searchResponse.getHits()) { - assertThat(hit.getSortValues().length, equalTo(2)); - assertThat(hit.getSortValues()[0], equalTo(keywords.get(expectedValue++))); - assertThat(hit.getSortValues()[1], equalTo(1f)); - } + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .setSize(randomIntBetween(1, numDocs + 5)) + .addSort(SortBuilders.scriptSort(script, ScriptSortBuilder.ScriptSortType.STRING)) + .addSort(SortBuilders.scoreSort()), + response -> { + int expectedValue = 0; + for (SearchHit hit : response.getHits()) { + assertThat(hit.getSortValues().length, equalTo(2)); + assertThat(hit.getSortValues()[0], equalTo(keywords.get(expectedValue++))); + assertThat(hit.getSortValues()[1], equalTo(1f)); + } + } + ); } } @@ -1772,16 +1846,17 @@ public void testFieldAlias() throws Exception { builders.add(client().prepareIndex("new_index").setSource("route_length_miles", 100.2)); indexRandom(true, true, builders); - SearchResponse response = prepareSearch().setQuery(matchAllQuery()) - .setSize(builders.size()) - .addSort(SortBuilders.fieldSort("route_length_miles")) - .get(); - SearchHits hits = response.getHits(); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).setSize(builders.size()).addSort(SortBuilders.fieldSort("route_length_miles")), + response -> { + SearchHits hits = response.getHits(); - assertEquals(3, hits.getHits().length); - assertEquals(42.0, hits.getAt(0).getSortValues()[0]); - assertEquals(50.5, hits.getAt(1).getSortValues()[0]); - assertEquals(100.2, hits.getAt(2).getSortValues()[0]); + assertEquals(3, hits.getHits().length); + assertEquals(42.0, hits.getAt(0).getSortValues()[0]); + assertEquals(50.5, hits.getAt(1).getSortValues()[0]); + assertEquals(100.2, hits.getAt(2).getSortValues()[0]); + } + ); } public void testFieldAliasesWithMissingValues() throws Exception { @@ -1797,16 +1872,19 @@ public void testFieldAliasesWithMissingValues() throws Exception { builders.add(client().prepareIndex("new_index").setSource("route_length_miles", 100.2)); indexRandom(true, true, builders); - SearchResponse response = prepareSearch().setQuery(matchAllQuery()) - .setSize(builders.size()) - .addSort(SortBuilders.fieldSort("route_length_miles").missing(120.3)) - .get(); - SearchHits hits = response.getHits(); - - assertEquals(3, hits.getHits().length); - assertEquals(42.0, hits.getAt(0).getSortValues()[0]); - assertEquals(100.2, hits.getAt(1).getSortValues()[0]); - assertEquals(120.3, hits.getAt(2).getSortValues()[0]); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .setSize(builders.size()) + .addSort(SortBuilders.fieldSort("route_length_miles").missing(120.3)), + response -> { + SearchHits hits = response.getHits(); + + assertEquals(3, hits.getHits().length); + assertEquals(42.0, hits.getAt(0).getSortValues()[0]); + assertEquals(100.2, hits.getAt(1).getSortValues()[0]); + assertEquals(120.3, hits.getAt(2).getSortValues()[0]); + } + ); } public void testCastNumericType() throws Exception { @@ -1822,34 +1900,40 @@ public void testCastNumericType() throws Exception { indexRandom(true, true, builders); { - SearchResponse response = prepareSearch().setQuery(matchAllQuery()) - .setSize(builders.size()) - .addSort(SortBuilders.fieldSort("field").setNumericType("long")) - .get(); - SearchHits hits = response.getHits(); - - assertEquals(3, hits.getHits().length); - for (int i = 0; i < 3; i++) { - assertThat(hits.getAt(i).getSortValues()[0].getClass(), equalTo(Long.class)); - } - assertEquals(12L, hits.getAt(0).getSortValues()[0]); - assertEquals(12L, hits.getAt(1).getSortValues()[0]); - assertEquals(12L, hits.getAt(2).getSortValues()[0]); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .setSize(builders.size()) + .addSort(SortBuilders.fieldSort("field").setNumericType("long")), + response -> { + SearchHits hits = response.getHits(); + + assertEquals(3, hits.getHits().length); + for (int i = 0; i < 3; i++) { + assertThat(hits.getAt(i).getSortValues()[0].getClass(), equalTo(Long.class)); + } + assertEquals(12L, hits.getAt(0).getSortValues()[0]); + assertEquals(12L, hits.getAt(1).getSortValues()[0]); + assertEquals(12L, hits.getAt(2).getSortValues()[0]); + } + ); } { - SearchResponse response = prepareSearch().setQuery(matchAllQuery()) - .setSize(builders.size()) - .addSort(SortBuilders.fieldSort("field").setNumericType("double")) - .get(); - SearchHits hits = response.getHits(); - assertEquals(3, hits.getHits().length); - for (int i = 0; i < 3; i++) { - assertThat(hits.getAt(i).getSortValues()[0].getClass(), equalTo(Double.class)); - } - assertEquals(12D, hits.getAt(0).getSortValues()[0]); - assertEquals(12.1D, (double) hits.getAt(1).getSortValues()[0], 0.001f); - assertEquals(12.6D, hits.getAt(2).getSortValues()[0]); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .setSize(builders.size()) + .addSort(SortBuilders.fieldSort("field").setNumericType("double")), + response -> { + SearchHits hits = response.getHits(); + assertEquals(3, hits.getHits().length); + for (int i = 0; i < 3; i++) { + assertThat(hits.getAt(i).getSortValues()[0].getClass(), equalTo(Double.class)); + } + assertEquals(12D, hits.getAt(0).getSortValues()[0]); + assertEquals(12.1D, (double) hits.getAt(1).getSortValues()[0], 0.001f); + assertEquals(12.6D, hits.getAt(2).getSortValues()[0]); + } + ); } } @@ -1864,100 +1948,114 @@ public void testCastDate() throws Exception { indexRandom(true, true, builders); { - SearchResponse response = prepareSearch().setQuery(matchAllQuery()) - .setSize(2) - .addSort(SortBuilders.fieldSort("field").setNumericType("date")) - .get(); - SearchHits hits = response.getHits(); - - assertEquals(2, hits.getHits().length); - for (int i = 0; i < 2; i++) { - assertThat(hits.getAt(i).getSortValues()[0].getClass(), equalTo(Long.class)); - } - assertEquals(1712879236854L, hits.getAt(0).getSortValues()[0]); - assertEquals(1712879237000L, hits.getAt(1).getSortValues()[0]); - - response = prepareSearch().setMaxConcurrentShardRequests(1) - .setQuery(matchAllQuery()) - .setSize(1) - .addSort(SortBuilders.fieldSort("field").setNumericType("date")) - .get(); - hits = response.getHits(); - - assertEquals(1, hits.getHits().length); - assertThat(hits.getAt(0).getSortValues()[0].getClass(), equalTo(Long.class)); - assertEquals(1712879236854L, hits.getAt(0).getSortValues()[0]); - - response = prepareSearch().setMaxConcurrentShardRequests(1) - .setQuery(matchAllQuery()) - .setSize(1) - .addSort(SortBuilders.fieldSort("field").order(SortOrder.DESC).setNumericType("date")) - .get(); - hits = response.getHits(); - - assertEquals(1, hits.getHits().length); - assertThat(hits.getAt(0).getSortValues()[0].getClass(), equalTo(Long.class)); - assertEquals(1712879237000L, hits.getAt(0).getSortValues()[0]); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).setSize(2).addSort(SortBuilders.fieldSort("field").setNumericType("date")), + response -> { + SearchHits hits = response.getHits(); + + assertEquals(2, hits.getHits().length); + for (int i = 0; i < 2; i++) { + assertThat(hits.getAt(i).getSortValues()[0].getClass(), equalTo(Long.class)); + } + assertEquals(1712879236854L, hits.getAt(0).getSortValues()[0]); + assertEquals(1712879237000L, hits.getAt(1).getSortValues()[0]); + } + ); + assertResponse( + prepareSearch().setMaxConcurrentShardRequests(1) + .setQuery(matchAllQuery()) + .setSize(1) + .addSort(SortBuilders.fieldSort("field").setNumericType("date")), + response -> { + SearchHits hits = response.getHits(); + + assertEquals(1, hits.getHits().length); + assertThat(hits.getAt(0).getSortValues()[0].getClass(), equalTo(Long.class)); + assertEquals(1712879236854L, hits.getAt(0).getSortValues()[0]); + } + ); + assertResponse( + prepareSearch().setMaxConcurrentShardRequests(1) + .setQuery(matchAllQuery()) + .setSize(1) + .addSort(SortBuilders.fieldSort("field").order(SortOrder.DESC).setNumericType("date")), + response -> { + SearchHits hits = response.getHits(); + + assertEquals(1, hits.getHits().length); + assertThat(hits.getAt(0).getSortValues()[0].getClass(), equalTo(Long.class)); + assertEquals(1712879237000L, hits.getAt(0).getSortValues()[0]); + } + ); } { - SearchResponse response = prepareSearch().setQuery(matchAllQuery()) - .setSize(2) - .addSort(SortBuilders.fieldSort("field").setNumericType("date_nanos")) - .get(); - SearchHits hits = response.getHits(); - assertEquals(2, hits.getHits().length); - for (int i = 0; i < 2; i++) { - assertThat(hits.getAt(i).getSortValues()[0].getClass(), equalTo(Long.class)); - } - assertEquals(1712879236854775807L, hits.getAt(0).getSortValues()[0]); - assertEquals(1712879237000000000L, hits.getAt(1).getSortValues()[0]); - - response = prepareSearch().setMaxConcurrentShardRequests(1) - .setQuery(matchAllQuery()) - .setSize(1) - .addSort(SortBuilders.fieldSort("field").setNumericType("date_nanos")) - .get(); - hits = response.getHits(); - assertEquals(1, hits.getHits().length); - assertThat(hits.getAt(0).getSortValues()[0].getClass(), equalTo(Long.class)); - assertEquals(1712879236854775807L, hits.getAt(0).getSortValues()[0]); - - response = prepareSearch().setMaxConcurrentShardRequests(1) - .setQuery(matchAllQuery()) - .setSize(1) - .addSort(SortBuilders.fieldSort("field").order(SortOrder.DESC).setNumericType("date_nanos")) - .get(); - hits = response.getHits(); - assertEquals(1, hits.getHits().length); - assertThat(hits.getAt(0).getSortValues()[0].getClass(), equalTo(Long.class)); - assertEquals(1712879237000000000L, hits.getAt(0).getSortValues()[0]); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).setSize(2).addSort(SortBuilders.fieldSort("field").setNumericType("date_nanos")), + response -> { + SearchHits hits = response.getHits(); + assertEquals(2, hits.getHits().length); + for (int i = 0; i < 2; i++) { + assertThat(hits.getAt(i).getSortValues()[0].getClass(), equalTo(Long.class)); + } + assertEquals(1712879236854775807L, hits.getAt(0).getSortValues()[0]); + assertEquals(1712879237000000000L, hits.getAt(1).getSortValues()[0]); + } + ); + assertResponse( + prepareSearch().setMaxConcurrentShardRequests(1) + .setQuery(matchAllQuery()) + .setSize(1) + .addSort(SortBuilders.fieldSort("field").setNumericType("date_nanos")), + response -> { + SearchHits hits = response.getHits(); + assertEquals(1, hits.getHits().length); + assertThat(hits.getAt(0).getSortValues()[0].getClass(), equalTo(Long.class)); + assertEquals(1712879236854775807L, hits.getAt(0).getSortValues()[0]); + } + ); + assertResponse( + prepareSearch().setMaxConcurrentShardRequests(1) + .setQuery(matchAllQuery()) + .setSize(1) + .addSort(SortBuilders.fieldSort("field").order(SortOrder.DESC).setNumericType("date_nanos")), + response -> { + SearchHits hits = response.getHits(); + assertEquals(1, hits.getHits().length); + assertThat(hits.getAt(0).getSortValues()[0].getClass(), equalTo(Long.class)); + assertEquals(1712879237000000000L, hits.getAt(0).getSortValues()[0]); + } + ); } { builders.clear(); builders.add(client().prepareIndex("index_date").setSource("field", "1905-04-11T23:47:17")); indexRandom(true, true, builders); - SearchResponse response = prepareSearch().setQuery(matchAllQuery()) - .setSize(1) - .addSort(SortBuilders.fieldSort("field").setNumericType("date_nanos")) - .get(); - assertNotNull(response.getShardFailures()); - assertThat(response.getShardFailures().length, equalTo(1)); - assertThat(response.getShardFailures()[0].toString(), containsString("are before the epoch in 1970")); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).setSize(1).addSort(SortBuilders.fieldSort("field").setNumericType("date_nanos")), + response -> { + assertNotNull(response.getShardFailures()); + assertThat(response.getShardFailures().length, equalTo(1)); + assertThat(response.getShardFailures()[0].toString(), containsString("are before the epoch in 1970")); + } + ); } { builders.clear(); builders.add(client().prepareIndex("index_date").setSource("field", "2346-04-11T23:47:17")); indexRandom(true, true, builders); - SearchResponse response = prepareSearch().setQuery(QueryBuilders.rangeQuery("field").gt("1970-01-01")) - .setSize(10) - .addSort(SortBuilders.fieldSort("field").setNumericType("date_nanos")) - .get(); - assertNotNull(response.getShardFailures()); - assertThat(response.getShardFailures().length, equalTo(1)); - assertThat(response.getShardFailures()[0].toString(), containsString("are after 2262")); + assertResponse( + prepareSearch().setQuery(QueryBuilders.rangeQuery("field").gt("1970-01-01")) + .setSize(10) + .addSort(SortBuilders.fieldSort("field").setNumericType("date_nanos")), + response -> { + assertNotNull(response.getShardFailures()); + assertThat(response.getShardFailures().length, equalTo(1)); + assertThat(response.getShardFailures()[0].toString(), containsString("are after 2262")); + } + ); } } @@ -1996,28 +2094,34 @@ public void testLongSortOptimizationCorrectResults() { refresh(); // *** 1. sort DESC on long_field - SearchResponse searchResponse = prepareSearch().addSort(new FieldSortBuilder("long_field").order(SortOrder.DESC)).setSize(10).get(); - assertNoFailures(searchResponse); - long previousLong = Long.MAX_VALUE; - for (int i = 0; i < searchResponse.getHits().getHits().length; i++) { - // check the correct sort order - SearchHit hit = searchResponse.getHits().getHits()[i]; - long currentLong = (long) hit.getSortValues()[0]; - assertThat("sort order is incorrect", currentLong, lessThanOrEqualTo(previousLong)); - previousLong = currentLong; - } + assertNoFailuresAndResponse( + prepareSearch().addSort(new FieldSortBuilder("long_field").order(SortOrder.DESC)).setSize(10), + response -> { + long previousLong = Long.MAX_VALUE; + for (int i = 0; i < response.getHits().getHits().length; i++) { + // check the correct sort order + SearchHit hit = response.getHits().getHits()[i]; + long currentLong = (long) hit.getSortValues()[0]; + assertThat("sort order is incorrect", currentLong, lessThanOrEqualTo(previousLong)); + previousLong = currentLong; + } + } + ); // *** 2. sort ASC on long_field - searchResponse = prepareSearch().addSort(new FieldSortBuilder("long_field").order(SortOrder.ASC)).setSize(10).get(); - assertNoFailures(searchResponse); - previousLong = Long.MIN_VALUE; - for (int i = 0; i < searchResponse.getHits().getHits().length; i++) { - // check the correct sort order - SearchHit hit = searchResponse.getHits().getHits()[i]; - long currentLong = (long) hit.getSortValues()[0]; - assertThat("sort order is incorrect", currentLong, greaterThanOrEqualTo(previousLong)); - previousLong = currentLong; - } + assertNoFailuresAndResponse( + prepareSearch().addSort(new FieldSortBuilder("long_field").order(SortOrder.ASC)).setSize(10), + response -> { + long previousLong = Long.MIN_VALUE; + for (int i = 0; i < response.getHits().getHits().length; i++) { + // check the correct sort order + SearchHit hit = response.getHits().getHits()[i]; + long currentLong = (long) hit.getSortValues()[0]; + assertThat("sort order is incorrect", currentLong, greaterThanOrEqualTo(previousLong)); + previousLong = currentLong; + } + } + ); } public void testSortMixedFieldTypes() { @@ -2033,10 +2137,7 @@ public void testSortMixedFieldTypes() { refresh(); { // mixing long and integer types is ok, as we convert integer sort to long sort - SearchResponse searchResponse = prepareSearch("index_long", "index_integer").addSort(new FieldSortBuilder("foo")) - .setSize(10) - .get(); - assertNoFailures(searchResponse); + assertNoFailures(prepareSearch("index_long", "index_integer").addSort(new FieldSortBuilder("foo")).setSize(10)); } String errMsg = "Can't sort on field [foo]; the field has incompatible sort types"; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceIT.java index 777db15b596ec..e09eb3d73b848 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.sort; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.settings.Settings; @@ -29,6 +28,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; @@ -148,81 +148,87 @@ public void testDistanceSortingMVFields() throws Exception { indicesAdmin().prepareRefresh().get(); // Order: Asc - SearchResponse searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.ASC)) - .get(); - - assertHitCount(searchResponse, 5); - assertOrderedSearchHits(searchResponse, "1", "2", "3", "4", "5"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(421.2d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(462.1d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(1055.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(4).getSortValues()[0]).doubleValue(), closeTo(2029.0d, 10d)); - + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.ASC)), + response -> { + assertHitCount(response, 5); + assertOrderedSearchHits(response, "1", "2", "3", "4", "5"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(421.2d, 10d)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(462.1d, 10d)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(1055.0d, 10d)); + assertThat(((Number) response.getHits().getAt(4).getSortValues()[0]).doubleValue(), closeTo(2029.0d, 10d)); + } + ); // Order: Asc, Mode: max - searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.ASC).sortMode(SortMode.MAX)) - .get(); - - assertHitCount(searchResponse, 5); - assertOrderedSearchHits(searchResponse, "1", "2", "4", "3", "5"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(421.2d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1258.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(5286.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(4).getSortValues()[0]).doubleValue(), closeTo(8572.0d, 10d)); - + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.ASC).sortMode(SortMode.MAX)), + response -> { + assertHitCount(response, 5); + assertOrderedSearchHits(response, "1", "2", "4", "3", "5"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(421.2d, 10d)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1258.0d, 10d)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(5286.0d, 10d)); + assertThat(((Number) response.getHits().getAt(4).getSortValues()[0]).doubleValue(), closeTo(8572.0d, 10d)); + } + ); // Order: Desc - searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.DESC)) - .get(); - - assertHitCount(searchResponse, 5); - assertOrderedSearchHits(searchResponse, "5", "3", "4", "2", "1"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(8572.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(5286.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1258.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(421.2d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(4).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); - + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.DESC)), + response -> { + assertHitCount(response, 5); + assertOrderedSearchHits(response, "5", "3", "4", "2", "1"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(8572.0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(5286.0d, 10d)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1258.0d, 10d)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(421.2d, 10d)); + assertThat(((Number) response.getHits().getAt(4).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); + } + ); // Order: Desc, Mode: min - searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.DESC).sortMode(SortMode.MIN)) - .get(); - - assertHitCount(searchResponse, 5); - assertOrderedSearchHits(searchResponse, "5", "4", "3", "2", "1"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(2029.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(1055.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(462.1d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(421.2d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(4).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); - - searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).sortMode(SortMode.AVG).order(SortOrder.ASC)) - .get(); - - assertHitCount(searchResponse, 5); - assertOrderedSearchHits(searchResponse, "1", "2", "4", "3", "5"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(421.2d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1157d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(2874d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(4).getSortValues()[0]).doubleValue(), closeTo(5301d, 10d)); - - searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).sortMode(SortMode.AVG).order(SortOrder.DESC)) - .get(); - - assertHitCount(searchResponse, 5); - assertOrderedSearchHits(searchResponse, "5", "3", "4", "2", "1"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(5301.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(2874.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1157.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(421.2d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(4).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); - + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.DESC).sortMode(SortMode.MIN)), + response -> { + assertHitCount(response, 5); + assertOrderedSearchHits(response, "5", "4", "3", "2", "1"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(2029.0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(1055.0d, 10d)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(462.1d, 10d)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(421.2d, 10d)); + assertThat(((Number) response.getHits().getAt(4).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); + } + ); + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).sortMode(SortMode.AVG).order(SortOrder.ASC)), + response -> { + assertHitCount(response, 5); + assertOrderedSearchHits(response, "1", "2", "4", "3", "5"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(421.2d, 10d)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1157d, 10d)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(2874d, 10d)); + assertThat(((Number) response.getHits().getAt(4).getSortValues()[0]).doubleValue(), closeTo(5301d, 10d)); + } + ); + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).sortMode(SortMode.AVG).order(SortOrder.DESC)), + response -> { + assertHitCount(response, 5); + assertOrderedSearchHits(response, "5", "3", "4", "2", "1"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(5301.0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(2874.0d, 10d)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1157.0d, 10d)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(421.2d, 10d)); + assertThat(((Number) response.getHits().getAt(4).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); + } + ); try { prepareSearch("test").setQuery(matchAllQuery()) .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).sortMode(SortMode.SUM)); @@ -276,25 +282,28 @@ public void testDistanceSortingWithMissingGeoPoint() throws Exception { refresh(); // Order: Asc - SearchResponse searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.ASC)) - .get(); - - assertHitCount(searchResponse, 2); - assertOrderedSearchHits(searchResponse, "1", "2"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(462.1d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.ASC)), + response -> { + assertHitCount(response, 2); + assertOrderedSearchHits(response, "1", "2"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(462.1d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + } + ); // Order: Desc - searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.DESC)) - .get(); - - // Doc with missing geo point is first, is consistent with 0.20.x - assertHitCount(searchResponse, 2); - assertOrderedSearchHits(searchResponse, "2", "1"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(5286d, 10d)); + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.DESC)), + response -> { + // Doc with missing geo point is first, is consistent with 0.20.x + assertHitCount(response, 2); + assertOrderedSearchHits(response, "2", "1"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(5286d, 10d)); + } + ); } public void testDistanceSortingNestedFields() throws Exception { @@ -416,119 +425,127 @@ public void testDistanceSortingNestedFields() throws Exception { ); // Order: Asc - SearchResponse searchResponse = prepareSearch("companies").setQuery(matchAllQuery()) - .addSort( - SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) - .order(SortOrder.ASC) - .setNestedSort(new NestedSortBuilder("branches")) - ) - .get(); - - assertHitCount(searchResponse, 4); - assertOrderedSearchHits(searchResponse, "1", "2", "3", "4"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(462.1d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1055.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(2029.0d, 10d)); - + assertResponse( + prepareSearch("companies").setQuery(matchAllQuery()) + .addSort( + SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) + .order(SortOrder.ASC) + .setNestedSort(new NestedSortBuilder("branches")) + ), + response -> { + assertHitCount(response, 4); + assertOrderedSearchHits(response, "1", "2", "3", "4"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(462.1d, 10d)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1055.0d, 10d)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(2029.0d, 10d)); + } + ); // Order: Asc, Mode: max - searchResponse = prepareSearch("companies").setQuery(matchAllQuery()) - .addSort( - SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) - .order(SortOrder.ASC) - .sortMode(SortMode.MAX) - .setNestedSort(new NestedSortBuilder("branches")) - ) - .get(); - - assertHitCount(searchResponse, 4); - assertOrderedSearchHits(searchResponse, "1", "3", "2", "4"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(1258.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(5286.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(8572.0d, 10d)); - + assertResponse( + prepareSearch("companies").setQuery(matchAllQuery()) + .addSort( + SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) + .order(SortOrder.ASC) + .sortMode(SortMode.MAX) + .setNestedSort(new NestedSortBuilder("branches")) + ), + response -> { + assertHitCount(response, 4); + assertOrderedSearchHits(response, "1", "3", "2", "4"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(1258.0d, 10d)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(5286.0d, 10d)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(8572.0d, 10d)); + } + ); // Order: Desc - searchResponse = prepareSearch("companies").setQuery(matchAllQuery()) - .addSort( - SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) - .order(SortOrder.DESC) - .setNestedSort(new NestedSortBuilder("branches")) - ) - .get(); - - assertHitCount(searchResponse, 4); - assertOrderedSearchHits(searchResponse, "4", "2", "3", "1"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(8572.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(5286.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1258.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); - + assertResponse( + prepareSearch("companies").setQuery(matchAllQuery()) + .addSort( + SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) + .order(SortOrder.DESC) + .setNestedSort(new NestedSortBuilder("branches")) + ), + response -> { + assertHitCount(response, 4); + assertOrderedSearchHits(response, "4", "2", "3", "1"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(8572.0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(5286.0d, 10d)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1258.0d, 10d)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); + } + ); // Order: Desc, Mode: min - searchResponse = prepareSearch("companies").setQuery(matchAllQuery()) - .addSort( - SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) - .order(SortOrder.DESC) - .sortMode(SortMode.MIN) - .setNestedSort(new NestedSortBuilder("branches")) - ) - .get(); - - assertHitCount(searchResponse, 4); - assertOrderedSearchHits(searchResponse, "4", "3", "2", "1"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(2029.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(1055.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(462.1d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); - - searchResponse = prepareSearch("companies").setQuery(matchAllQuery()) - .addSort( - SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) - .sortMode(SortMode.AVG) - .order(SortOrder.ASC) - .setNestedSort(new NestedSortBuilder("branches")) - ) - .get(); - - assertHitCount(searchResponse, 4); - assertOrderedSearchHits(searchResponse, "1", "3", "2", "4"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(1157.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(2874.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(5301.0d, 10d)); - - searchResponse = prepareSearch("companies").setQuery(matchAllQuery()) - .addSort( - SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) - .setNestedSort(new NestedSortBuilder("branches")) - .sortMode(SortMode.AVG) - .order(SortOrder.DESC) - ) - .get(); - - assertHitCount(searchResponse, 4); - assertOrderedSearchHits(searchResponse, "4", "2", "3", "1"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(5301.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(2874.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1157.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); - - searchResponse = prepareSearch("companies").setQuery(matchAllQuery()) - .addSort( - SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) - .setNestedSort(new NestedSortBuilder("branches").setFilter(termQuery("branches.name", "brooklyn"))) - .sortMode(SortMode.AVG) - .order(SortOrder.ASC) - ) - .get(); - assertHitCount(searchResponse, 4); - assertFirstHit(searchResponse, hasId("4")); - assertSearchHits(searchResponse, "1", "2", "3", "4"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(8572.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - + assertResponse( + prepareSearch("companies").setQuery(matchAllQuery()) + .addSort( + SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) + .order(SortOrder.DESC) + .sortMode(SortMode.MIN) + .setNestedSort(new NestedSortBuilder("branches")) + ), + response -> { + assertHitCount(response, 4); + assertOrderedSearchHits(response, "4", "3", "2", "1"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(2029.0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(1055.0d, 10d)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(462.1d, 10d)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); + } + ); + assertResponse( + prepareSearch("companies").setQuery(matchAllQuery()) + .addSort( + SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) + .sortMode(SortMode.AVG) + .order(SortOrder.ASC) + .setNestedSort(new NestedSortBuilder("branches")) + ), + response -> { + assertHitCount(response, 4); + assertOrderedSearchHits(response, "1", "3", "2", "4"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(1157.0d, 10d)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(2874.0d, 10d)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(5301.0d, 10d)); + } + ); + assertResponse( + prepareSearch("companies").setQuery(matchAllQuery()) + .addSort( + SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) + .setNestedSort(new NestedSortBuilder("branches")) + .sortMode(SortMode.AVG) + .order(SortOrder.DESC) + ), + response -> { + assertHitCount(response, 4); + assertOrderedSearchHits(response, "4", "2", "3", "1"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(5301.0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(2874.0d, 10d)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1157.0d, 10d)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); + } + ); + assertResponse( + prepareSearch("companies").setQuery(matchAllQuery()) + .addSort( + SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) + .setNestedSort(new NestedSortBuilder("branches").setFilter(termQuery("branches.name", "brooklyn"))) + .sortMode(SortMode.AVG) + .order(SortOrder.ASC) + ), + response -> { + assertHitCount(response, 4); + assertFirstHit(response, hasId("4")); + assertSearchHits(response, "1", "2", "3", "4"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(8572.0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + } + ); try { prepareSearch("companies").setQuery(matchAllQuery()) .addSort( @@ -614,32 +631,36 @@ public void testDistanceSortingWithUnmappedField() throws Exception { refresh(); // Order: Asc - SearchResponse searchResponse = prepareSearch("test1", "test2").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).ignoreUnmapped(true).order(SortOrder.ASC)) - .get(); - - assertHitCount(searchResponse, 2); - assertOrderedSearchHits(searchResponse, "1", "2"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(462.1d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - + assertResponse( + prepareSearch("test1", "test2").setQuery(matchAllQuery()) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).ignoreUnmapped(true).order(SortOrder.ASC)), + response -> { + assertHitCount(response, 2); + assertOrderedSearchHits(response, "1", "2"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(462.1d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + } + ); // Order: Desc - searchResponse = prepareSearch("test1", "test2").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).ignoreUnmapped(true).order(SortOrder.DESC)) - .get(); - - // Doc with missing geo point is first, is consistent with 0.20.x - assertHitCount(searchResponse, 2); - assertOrderedSearchHits(searchResponse, "2", "1"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(5286d, 10d)); - + assertResponse( + prepareSearch("test1", "test2").setQuery(matchAllQuery()) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).ignoreUnmapped(true).order(SortOrder.DESC)), + response -> { + // Doc with missing geo point is first, is consistent with 0.20.x + assertHitCount(response, 2); + assertOrderedSearchHits(response, "2", "1"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(5286d, 10d)); + } + ); // Make sure that by default the unmapped fields continue to fail - searchResponse = prepareSearch("test1", "test2").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.DESC)) - .get(); - assertThat(searchResponse.getFailedShards(), greaterThan(0)); - assertHitCount(searchResponse, 1); + assertResponse( + prepareSearch("test1", "test2").setQuery(matchAllQuery()) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.DESC)), + response -> { + assertThat(response.getFailedShards(), greaterThan(0)); + assertHitCount(response, 1); + } + ); } - } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java index 54d730cec2bc3..265cd77bdbbbc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java @@ -32,6 +32,7 @@ import static org.elasticsearch.search.sort.SortBuilders.fieldSort; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSortValues; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.closeTo; @@ -84,56 +85,65 @@ public void testManyToManyGeoPoints() throws ExecutionException, InterruptedExce q[0] = new GeoPoint(2, 1); } - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MIN).order(SortOrder.ASC)) - .get(); - assertOrderedSearchHits(searchResponse, "d1", "d2"); - assertThat( - (Double) searchResponse.getHits().getAt(0).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(2, 2, 3, 2, DistanceUnit.METERS), 10d) - ); - assertThat( - (Double) searchResponse.getHits().getAt(1).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(2, 1, 5, 1, DistanceUnit.METERS), 10d) - ); - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MIN).order(SortOrder.DESC)) - .get(); - assertOrderedSearchHits(searchResponse, "d2", "d1"); - assertThat( - (Double) searchResponse.getHits().getAt(0).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(2, 1, 5, 1, DistanceUnit.METERS), 10d) - ); - assertThat( - (Double) searchResponse.getHits().getAt(1).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(2, 2, 3, 2, DistanceUnit.METERS), 10d) - ); - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MAX).order(SortOrder.ASC)) - .get(); - assertOrderedSearchHits(searchResponse, "d1", "d2"); - assertThat( - (Double) searchResponse.getHits().getAt(0).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(2, 2, 4, 1, DistanceUnit.METERS), 10d) + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MIN).order(SortOrder.ASC)), + response -> { + assertOrderedSearchHits(response, "d1", "d2"); + assertThat( + (Double) response.getHits().getAt(0).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(2, 2, 3, 2, DistanceUnit.METERS), 10d) + ); + assertThat( + (Double) response.getHits().getAt(1).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(2, 1, 5, 1, DistanceUnit.METERS), 10d) + ); + } ); - assertThat( - (Double) searchResponse.getHits().getAt(1).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(2, 1, 6, 2, DistanceUnit.METERS), 10d) + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MIN).order(SortOrder.DESC)), + response -> { + assertOrderedSearchHits(response, "d2", "d1"); + assertThat( + (Double) response.getHits().getAt(0).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(2, 1, 5, 1, DistanceUnit.METERS), 10d) + ); + assertThat( + (Double) response.getHits().getAt(1).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(2, 2, 3, 2, DistanceUnit.METERS), 10d) + ); + } ); - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MAX).order(SortOrder.DESC)) - .get(); - assertOrderedSearchHits(searchResponse, "d2", "d1"); - assertThat( - (Double) searchResponse.getHits().getAt(0).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(2, 1, 6, 2, DistanceUnit.METERS), 10d) + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MAX).order(SortOrder.ASC)), + response -> { + assertOrderedSearchHits(response, "d1", "d2"); + assertThat( + (Double) response.getHits().getAt(0).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(2, 2, 4, 1, DistanceUnit.METERS), 10d) + ); + assertThat( + (Double) response.getHits().getAt(1).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(2, 1, 6, 2, DistanceUnit.METERS), 10d) + ); + } ); - assertThat( - (Double) searchResponse.getHits().getAt(1).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(2, 2, 4, 1, DistanceUnit.METERS), 10d) + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MAX).order(SortOrder.DESC)), + response -> { + assertOrderedSearchHits(response, "d2", "d1"); + assertThat( + (Double) response.getHits().getAt(0).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(2, 1, 6, 2, DistanceUnit.METERS), 10d) + ); + assertThat( + (Double) response.getHits().getAt(1).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(2, 2, 4, 1, DistanceUnit.METERS), 10d) + ); + } ); } @@ -164,30 +174,35 @@ public void testSingeToManyAvgMedian() throws ExecutionException, InterruptedExc ); GeoPoint q = new GeoPoint(0, 0); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.AVG).order(SortOrder.ASC)) - .get(); - assertOrderedSearchHits(searchResponse, "d2", "d1"); - assertThat( - (Double) searchResponse.getHits().getAt(0).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(0, 0, 0, 4, DistanceUnit.METERS), 10d) - ); - assertThat( - (Double) searchResponse.getHits().getAt(1).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(0, 0, 0, 5, DistanceUnit.METERS), 10d) - ); - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MEDIAN).order(SortOrder.ASC)) - .get(); - assertOrderedSearchHits(searchResponse, "d1", "d2"); - assertThat( - (Double) searchResponse.getHits().getAt(0).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(0, 0, 0, 4, DistanceUnit.METERS), 10d) + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.AVG).order(SortOrder.ASC)), + response -> { + assertOrderedSearchHits(response, "d2", "d1"); + assertThat( + (Double) response.getHits().getAt(0).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(0, 0, 0, 4, DistanceUnit.METERS), 10d) + ); + assertThat( + (Double) response.getHits().getAt(1).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(0, 0, 0, 5, DistanceUnit.METERS), 10d) + ); + } ); - assertThat( - (Double) searchResponse.getHits().getAt(1).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(0, 0, 0, 5, DistanceUnit.METERS), 10d) + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MEDIAN).order(SortOrder.ASC)), + response -> { + assertOrderedSearchHits(response, "d1", "d2"); + assertThat( + (Double) response.getHits().getAt(0).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(0, 0, 0, 4, DistanceUnit.METERS), 10d) + ); + assertThat( + (Double) response.getHits().getAt(1).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(0, 0, 0, 5, DistanceUnit.METERS), 10d) + ); + } ); } @@ -245,30 +260,33 @@ public void testManyToManyGeoPointsWithDifferentFormats() throws ExecutionExcept } } - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC)) - .get(); - assertOrderedSearchHits(searchResponse, "d1", "d2"); - assertThat( - (Double) searchResponse.getHits().getAt(0).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(2.5, 1, 2, 1, DistanceUnit.METERS), 1.e-1) - ); - assertThat( - (Double) searchResponse.getHits().getAt(1).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(4.5, 1, 2, 1, DistanceUnit.METERS), 1.e-1) - ); - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(geoDistanceSortBuilder.sortMode(SortMode.MAX).order(SortOrder.ASC)) - .get(); - assertOrderedSearchHits(searchResponse, "d1", "d2"); - assertThat( - (Double) searchResponse.getHits().getAt(0).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(3.25, 4, 2, 1, DistanceUnit.METERS), 1.e-1) + assertResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC)), + response -> { + assertOrderedSearchHits(response, "d1", "d2"); + assertThat( + (Double) response.getHits().getAt(0).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(2.5, 1, 2, 1, DistanceUnit.METERS), 1.e-1) + ); + assertThat( + (Double) response.getHits().getAt(1).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(4.5, 1, 2, 1, DistanceUnit.METERS), 1.e-1) + ); + } ); - assertThat( - (Double) searchResponse.getHits().getAt(1).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(5.25, 4, 2, 1, DistanceUnit.METERS), 1.e-1) + assertResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(geoDistanceSortBuilder.sortMode(SortMode.MAX).order(SortOrder.ASC)), + response -> { + assertOrderedSearchHits(response, "d1", "d2"); + assertThat( + (Double) response.getHits().getAt(0).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(3.25, 4, 2, 1, DistanceUnit.METERS), 1.e-1) + ); + assertThat( + (Double) response.getHits().getAt(1).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(5.25, 4, 2, 1, DistanceUnit.METERS), 1.e-1) + ); + } ); } @@ -289,42 +307,48 @@ public void testSinglePointGeoDistanceSort() throws ExecutionException, Interrup GeoDistanceSortBuilder geoDistanceSortBuilder = new GeoDistanceSortBuilder(LOCATION_FIELD, hashPoint); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC)) - .get(); - checkCorrectSortOrderForGeoSort(searchResponse); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC)), + response -> checkCorrectSortOrderForGeoSort(response) + ); geoDistanceSortBuilder = new GeoDistanceSortBuilder(LOCATION_FIELD, new GeoPoint(2, 2)); - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC)) - .get(); - checkCorrectSortOrderForGeoSort(searchResponse); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC)), + response -> checkCorrectSortOrderForGeoSort(response) + ); geoDistanceSortBuilder = new GeoDistanceSortBuilder(LOCATION_FIELD, 2, 2); - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC)) - .get(); - checkCorrectSortOrderForGeoSort(searchResponse); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC)), + response -> checkCorrectSortOrderForGeoSort(response) + ); - searchResponse = prepareSearch().setSource(new SearchSourceBuilder().sort(SortBuilders.geoDistanceSort(LOCATION_FIELD, 2.0, 2.0))) - .get(); - checkCorrectSortOrderForGeoSort(searchResponse); + assertResponse( + prepareSearch().setSource(new SearchSourceBuilder().sort(SortBuilders.geoDistanceSort(LOCATION_FIELD, 2.0, 2.0))), + response -> checkCorrectSortOrderForGeoSort(response) + ); - searchResponse = prepareSearch().setSource( - new SearchSourceBuilder().sort(SortBuilders.geoDistanceSort(LOCATION_FIELD, "s037ms06g7h0")) - ).get(); - checkCorrectSortOrderForGeoSort(searchResponse); + assertResponse( + prepareSearch().setSource(new SearchSourceBuilder().sort(SortBuilders.geoDistanceSort(LOCATION_FIELD, "s037ms06g7h0"))), + response -> checkCorrectSortOrderForGeoSort(response) + ); - searchResponse = prepareSearch().setSource(new SearchSourceBuilder().sort(SortBuilders.geoDistanceSort(LOCATION_FIELD, 2.0, 2.0))) - .get(); - checkCorrectSortOrderForGeoSort(searchResponse); + assertResponse( + prepareSearch().setSource(new SearchSourceBuilder().sort(SortBuilders.geoDistanceSort(LOCATION_FIELD, 2.0, 2.0))), + response -> checkCorrectSortOrderForGeoSort(response) + ); - searchResponse = prepareSearch().setSource( - new SearchSourceBuilder().sort(SortBuilders.geoDistanceSort(LOCATION_FIELD, 2.0, 2.0).validation(GeoValidationMethod.COERCE)) - ).get(); - checkCorrectSortOrderForGeoSort(searchResponse); + assertResponse( + prepareSearch().setSource( + new SearchSourceBuilder().sort( + SortBuilders.geoDistanceSort(LOCATION_FIELD, 2.0, 2.0).validation(GeoValidationMethod.COERCE) + ) + ), + response -> checkCorrectSortOrderForGeoSort(response) + ); } private static void checkCorrectSortOrderForGeoSort(SearchResponse searchResponse) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/source/MetadataFetchingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/source/MetadataFetchingIT.java index 1860082c833ad..179778240cc34 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/source/MetadataFetchingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/source/MetadataFetchingIT.java @@ -10,7 +10,6 @@ import org.apache.lucene.search.join.ScoreMode; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.index.query.InnerHitBuilder; import org.elasticsearch.index.query.NestedQueryBuilder; import org.elasticsearch.index.query.TermQueryBuilder; @@ -22,6 +21,7 @@ import java.util.Collections; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -34,14 +34,17 @@ public void testSimple() { client().prepareIndex("test").setId("1").setSource("field", "value").get(); refresh(); - SearchResponse response = prepareSearch("test").storedFields("_none_").setFetchSource(false).setVersion(true).get(); - assertThat(response.getHits().getAt(0).getId(), nullValue()); - assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); - assertThat(response.getHits().getAt(0).getVersion(), notNullValue()); - - response = prepareSearch("test").storedFields("_none_").get(); - assertThat(response.getHits().getAt(0).getId(), nullValue()); - assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); + assertResponse(prepareSearch("test").storedFields("_none_").setFetchSource(false).setVersion(true), response -> { + assertThat(response.getHits().getAt(0).getId(), nullValue()); + assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); + assertThat(response.getHits().getAt(0).getVersion(), notNullValue()); + }); + + assertResponse(prepareSearch("test").storedFields("_none_"), response -> { + assertThat(response.getHits().getAt(0).getId(), nullValue()); + assertThat(response.getHits().getAt(0).getId(), nullValue()); + assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); + }); } public void testInnerHits() { @@ -50,23 +53,26 @@ public void testInnerHits() { client().prepareIndex("test").setId("1").setSource("field", "value", "nested", Collections.singletonMap("title", "foo")).get(); refresh(); - SearchResponse response = prepareSearch("test").storedFields("_none_") - .setFetchSource(false) - .setQuery( - new NestedQueryBuilder("nested", new TermQueryBuilder("nested.title", "foo"), ScoreMode.Total).innerHit( - new InnerHitBuilder().setStoredFieldNames(Collections.singletonList("_none_")) - .setFetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE) - ) - ) - .get(); - assertThat(response.getHits().getTotalHits().value, equalTo(1L)); - assertThat(response.getHits().getAt(0).getId(), nullValue()); - assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); - assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); - SearchHits hits = response.getHits().getAt(0).getInnerHits().get("nested"); - assertThat(hits.getTotalHits().value, equalTo(1L)); - assertThat(hits.getAt(0).getId(), nullValue()); - assertThat(hits.getAt(0).getSourceAsString(), nullValue()); + assertResponse( + prepareSearch("test").storedFields("_none_") + .setFetchSource(false) + .setQuery( + new NestedQueryBuilder("nested", new TermQueryBuilder("nested.title", "foo"), ScoreMode.Total).innerHit( + new InnerHitBuilder().setStoredFieldNames(Collections.singletonList("_none_")) + .setFetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE) + ) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(0).getId(), nullValue()); + assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); + assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); + SearchHits hits = response.getHits().getAt(0).getInnerHits().get("nested"); + assertThat(hits.getTotalHits().value, equalTo(1L)); + assertThat(hits.getAt(0).getId(), nullValue()); + assertThat(hits.getAt(0).getSourceAsString(), nullValue()); + } + ); } public void testWithRouting() { @@ -76,14 +82,15 @@ public void testWithRouting() { client().prepareIndex("test").setId("1").setSource("field", "value").setRouting("toto").get(); refresh(); - SearchResponse response = prepareSearch("test").storedFields("_none_").setFetchSource(false).get(); - assertThat(response.getHits().getAt(0).getId(), nullValue()); - assertThat(response.getHits().getAt(0).field("_routing"), nullValue()); - assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); + assertResponse(prepareSearch("test").storedFields("_none_").setFetchSource(false), response -> { + assertThat(response.getHits().getAt(0).getId(), nullValue()); + assertThat(response.getHits().getAt(0).field("_routing"), nullValue()); + assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); - response = prepareSearch("test").storedFields("_none_").get(); - assertThat(response.getHits().getAt(0).getId(), nullValue()); - assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); + response = prepareSearch("test").storedFields("_none_").get(); + assertThat(response.getHits().getAt(0).getId(), nullValue()); + assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); + }); } public void testInvalid() { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/source/SourceFetchingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/source/SourceFetchingIT.java index 3fcbc5cf4add6..5e7847c744040 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/source/SourceFetchingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/source/SourceFetchingIT.java @@ -8,9 +8,9 @@ package org.elasticsearch.search.source; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.test.ESIntegTestCase; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.core.IsEqual.equalTo; @@ -23,14 +23,17 @@ public void testSourceDefaultBehavior() { indexDoc("test", "1", "field", "value"); refresh(); - SearchResponse response = prepareSearch("test").get(); - assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); + assertResponse(prepareSearch("test"), response -> assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue())); - response = prepareSearch("test").addStoredField("bla").get(); - assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); + assertResponse( + prepareSearch("test").addStoredField("bla"), + response -> assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()) + ); - response = prepareSearch("test").addStoredField("_source").get(); - assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); + assertResponse( + prepareSearch("test").addStoredField("_source"), + response -> assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()) + ); } @@ -41,26 +44,30 @@ public void testSourceFiltering() { client().prepareIndex("test").setId("1").setSource("field1", "value", "field2", "value2").get(); refresh(); - SearchResponse response = prepareSearch("test").setFetchSource(false).get(); - assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); - - response = prepareSearch("test").setFetchSource(true).get(); - assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); - - response = prepareSearch("test").setFetchSource("field1", null).get(); - assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); - assertThat((String) response.getHits().getAt(0).getSourceAsMap().get("field1"), equalTo("value")); - - response = prepareSearch("test").setFetchSource("hello", null).get(); - assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(0)); - - response = prepareSearch("test").setFetchSource(new String[] { "*" }, new String[] { "field2" }).get(); - assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); - assertThat((String) response.getHits().getAt(0).getSourceAsMap().get("field1"), equalTo("value")); - + assertResponse( + prepareSearch("test").setFetchSource(false), + response -> assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()) + ); + + assertResponse( + prepareSearch("test").setFetchSource(true), + response -> assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()) + ); + + assertResponse(prepareSearch("test").setFetchSource("field1", null), response -> { + assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); + assertThat((String) response.getHits().getAt(0).getSourceAsMap().get("field1"), equalTo("value")); + }); + assertResponse(prepareSearch("test").setFetchSource("hello", null), response -> { + assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(0)); + }); + assertResponse(prepareSearch("test").setFetchSource(new String[] { "*" }, new String[] { "field2" }), response -> { + assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); + assertThat((String) response.getHits().getAt(0).getSourceAsMap().get("field1"), equalTo("value")); + }); } /** @@ -74,14 +81,15 @@ public void testSourceWithWildcardFiltering() { client().prepareIndex("test").setId("1").setSource("field", "value").get(); refresh(); - SearchResponse response = prepareSearch("test").setFetchSource(new String[] { "*.notexisting", "field" }, null).get(); - assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); - assertThat((String) response.getHits().getAt(0).getSourceAsMap().get("field"), equalTo("value")); - - response = prepareSearch("test").setFetchSource(new String[] { "field.notexisting.*", "field" }, null).get(); - assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); - assertThat((String) response.getHits().getAt(0).getSourceAsMap().get("field"), equalTo("value")); + assertResponse(prepareSearch("test").setFetchSource(new String[] { "*.notexisting", "field" }, null), response -> { + assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); + assertThat((String) response.getHits().getAt(0).getSourceAsMap().get("field"), equalTo("value")); + }); + assertResponse(prepareSearch("test").setFetchSource(new String[] { "field.notexisting.*", "field" }, null), response -> { + assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); + assertThat((String) response.getHits().getAt(0).getSourceAsMap().get("field"), equalTo("value")); + }); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/stats/FieldUsageStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/stats/FieldUsageStatsIT.java index 32f5e14b944a2..e3b5301cb1999 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/stats/FieldUsageStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/stats/FieldUsageStatsIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.admin.indices.stats.FieldUsageShardResponse; import org.elasticsearch.action.admin.indices.stats.FieldUsageStatsAction; import org.elasticsearch.action.admin.indices.stats.FieldUsageStatsRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; @@ -30,6 +29,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; public class FieldUsageStatsIT extends ESIntegTestCase { @@ -73,16 +73,18 @@ public void testFieldUsageStats() throws ExecutionException, InterruptedExceptio assertFalse(stats.hasField("field2")); assertFalse(stats.hasField("date_field")); - SearchResponse searchResponse = prepareSearch().setSearchType(SearchType.DEFAULT) - .setQuery(QueryBuilders.termQuery("field", "value")) - .addAggregation(AggregationBuilders.terms("agg1").field("field.keyword")) - .addAggregation(AggregationBuilders.filter("agg2", QueryBuilders.spanTermQuery("field2", "value2"))) - .setSize(between(5, 100)) - .setPreference("fixed") - .get(); - - assertHitCount(searchResponse, 30); - assertAllSuccessful(searchResponse); + assertResponse( + prepareSearch().setSearchType(SearchType.DEFAULT) + .setQuery(QueryBuilders.termQuery("field", "value")) + .addAggregation(AggregationBuilders.terms("agg1").field("field.keyword")) + .addAggregation(AggregationBuilders.filter("agg2", QueryBuilders.spanTermQuery("field2", "value2"))) + .setSize(between(5, 100)) + .setPreference("fixed"), + response -> { + assertHitCount(response, 30); + assertAllSuccessful(response); + } + ); stats = aggregated(client().execute(FieldUsageStatsAction.INSTANCE, new FieldUsageStatsRequest()).get().getStats().get("test")); logger.info("Stats after first query: {}", stats); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/stats/SearchStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/stats/SearchStatsIT.java index 07e8c516eda41..81c776d0893c1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/stats/SearchStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/stats/SearchStatsIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardIterator; @@ -39,7 +38,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -103,16 +103,22 @@ public void testSimpleStats() throws Exception { refresh(); int iters = scaledRandomIntBetween(100, 150); for (int i = 0; i < iters; i++) { - SearchResponse searchResponse = internalCluster().coordOnlyNodeClient() - .prepareSearch() - .setQuery(QueryBuilders.termQuery("field", "value")) - .setStats("group1", "group2") - .highlighter(new HighlightBuilder().field("field")) - .addScriptField("script1", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.field", Collections.emptyMap())) - .setSize(100) - .get(); - assertHitCount(searchResponse, docsTest1 + docsTest2); - assertAllSuccessful(searchResponse); + assertResponse( + internalCluster().coordOnlyNodeClient() + .prepareSearch() + .setQuery(QueryBuilders.termQuery("field", "value")) + .setStats("group1", "group2") + .highlighter(new HighlightBuilder().field("field")) + .addScriptField( + "script1", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.field", Collections.emptyMap()) + ) + .setSize(100), + response -> { + assertHitCount(response, docsTest1 + docsTest2); + assertAllSuccessful(response); + } + ); } IndicesStatsResponse indicesStats = indicesAdmin().prepareStats().get(); @@ -188,11 +194,15 @@ public void testOpenContexts() { assertThat(indicesStats.getTotal().getSearch().getOpenContexts(), equalTo(0L)); int size = scaledRandomIntBetween(1, docs); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .setSize(size) - .setScroll(TimeValue.timeValueMinutes(2)) - .get(); - assertNoFailures(searchResponse); + final String[] scroll = new String[1]; + final int[] total = new int[1]; + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()).setSize(size).setScroll(TimeValue.timeValueMinutes(2)), + response -> { + scroll[0] = response.getScrollId(); + total[0] = response.getHits().getHits().length; + } + ); // refresh the stats now that scroll contexts are opened indicesStats = indicesAdmin().prepareStats(index).get(); @@ -202,11 +212,14 @@ public void testOpenContexts() { int hits = 0; while (true) { - if (searchResponse.getHits().getHits().length == 0) { + if (total[0] == 0) { break; } - hits += searchResponse.getHits().getHits().length; - searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueMinutes(2)).get(); + hits += total[0]; + assertResponse(client().prepareSearchScroll(scroll[0]).setScroll(TimeValue.timeValueMinutes(2)), response -> { + scroll[0] = response.getScrollId(); + total[0] = response.getHits().getHits().length; + }); } long expected = 0; @@ -220,7 +233,7 @@ public void testOpenContexts() { assertEquals(hits, docs * numAssignedShards(index)); assertThat(stats.getQueryCount(), greaterThanOrEqualTo(expected)); - clearScroll(searchResponse.getScrollId()); + clearScroll(scroll[0]); indicesStats = indicesAdmin().prepareStats().get(); stats = indicesStats.getTotal().getSearch().getTotal(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index e188c11125c42..4b02e26815524 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -1166,7 +1166,7 @@ public void testDeleteIndexDuringSnapshot() throws Exception { final int concurrentLoops = randomIntBetween(2, 5); final List> futures = new ArrayList<>(concurrentLoops); for (int i = 0; i < concurrentLoops; i++) { - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); futures.add(future); startSnapshotDeleteLoop(repoName, indexName, "test-snap-" + i, future); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/FeatureStateResetApiIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/FeatureStateResetApiIT.java index 32a1d6724e0fd..1f86d4cb39ea4 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/FeatureStateResetApiIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/FeatureStateResetApiIT.java @@ -48,7 +48,6 @@ protected Collection> nodePlugins() { } /** Check that the reset method cleans up a feature */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/97780") public void testResetSystemIndices() throws Exception { String systemIndex1 = ".test-system-idx-1"; String systemIndex2 = ".second-test-system-idx-1"; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java index 4721b1a186a99..7eaa49b27007d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java @@ -55,6 +55,7 @@ import java.nio.file.Path; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.Comparator; import java.util.HashSet; @@ -496,6 +497,11 @@ private void restoreSnapshot(SnapshotInfo snapshotInfo, Releasable releasePrevio final String[] indicesToRestore = indicesToRestoreList.toArray(new String[0]); final String[] indicesToClose = indicesToCloseList.toArray(new String[0]); final String[] indicesToDelete = indicesToDeleteList.toArray(new String[0]); + final String indicesToRestoreDescription = (restoreSpecificIndices ? "" : "*=") + Arrays.toString(indicesToRestore); + + if (restoreSpecificIndices == false) { + assertEquals(Set.copyOf(snapshotInfo.indices()), Set.of(indicesToRestore)); + } final ListenableFuture closeIndicesStep = new ListenableFuture<>(); final ListenableFuture deleteIndicesStep = new ListenableFuture<>(); @@ -515,15 +521,17 @@ private void restoreSnapshot(SnapshotInfo snapshotInfo, Releasable releasePrevio ); logger.info( - "--> closing indices {} in preparation for restoring from [{}:{}]", - indicesToRestoreList, + "--> closing indices {} in preparation for restoring {} from [{}:{}]", + indicesToClose, + indicesToRestoreDescription, snapshotInfo.repository(), snapshotInfo.snapshotId().getName() ); indicesAdmin().prepareClose(indicesToClose).execute(mustSucceed(closeIndexResponse -> { logger.info( - "--> finished closing indices {} in preparation for restoring from [{}:{}]", - indicesToRestoreList, + "--> finished closing indices {} in preparation for restoring {} from [{}:{}]", + indicesToClose, + indicesToRestoreDescription, snapshotInfo.repository(), snapshotInfo.snapshotId().getName() ); @@ -538,15 +546,17 @@ private void restoreSnapshot(SnapshotInfo snapshotInfo, Releasable releasePrevio if (indicesToDelete.length > 0) { logger.info( - "--> deleting indices {} in preparation for restoring from [{}:{}]", - indicesToRestoreList, + "--> deleting indices {} in preparation for restoring {} from [{}:{}]", + indicesToDelete, + indicesToRestore, snapshotInfo.repository(), snapshotInfo.snapshotId().getName() ); indicesAdmin().prepareDelete(indicesToDelete).execute(mustSucceed(deleteIndicesResponse -> { logger.info( - "--> finished deleting indices {} in preparation for restoring from [{}:{}]", - indicesToRestoreList, + "--> finished deleting indices {} in preparation for restoring {} from [{}:{}]", + indicesToDelete, + indicesToRestoreDescription, snapshotInfo.repository(), snapshotInfo.snapshotId().getName() ); @@ -569,9 +579,8 @@ private void restoreSnapshot(SnapshotInfo snapshotInfo, Releasable releasePrevio } logger.info( - "--> restoring indices {}{} from [{}:{}]", - restoreSpecificIndices ? "" : "*=", - indicesToRestoreList, + "--> restoring indices {} from [{}:{}]", + indicesToRestoreDescription, snapshotInfo.repository(), snapshotInfo.snapshotId().getName() ); @@ -579,7 +588,7 @@ private void restoreSnapshot(SnapshotInfo snapshotInfo, Releasable releasePrevio restoreSnapshotRequestBuilder.execute(mustSucceed(restoreSnapshotResponse -> { logger.info( "--> triggered restore of indices {} from [{}:{}], waiting for green health", - indicesToRestoreList, + indicesToRestoreDescription, snapshotInfo.repository(), snapshotInfo.snapshotId().getName() ); @@ -590,7 +599,7 @@ private void restoreSnapshot(SnapshotInfo snapshotInfo, Releasable releasePrevio logger.info( "--> indices {} successfully restored from [{}:{}]", - indicesToRestoreList, + indicesToRestoreDescription, snapshotInfo.repository(), snapshotInfo.snapshotId().getName() ); diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index 5898a9bdbfb53..0b899d3bafc3e 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -407,7 +407,9 @@ with org.elasticsearch.features.FeatureInfrastructureFeatures, org.elasticsearch.health.HealthFeatures, - org.elasticsearch.rest.RestFeatures; + org.elasticsearch.cluster.metadata.MetadataFeatures, + org.elasticsearch.rest.RestFeatures, + org.elasticsearch.indices.IndicesFeatures; uses org.elasticsearch.plugins.internal.SettingsExtension; uses RestExtension; @@ -418,4 +420,6 @@ org.elasticsearch.index.codec.bloomfilter.ES85BloomFilterPostingsFormat, org.elasticsearch.index.codec.bloomfilter.ES87BloomFilterPostingsFormat; provides org.apache.lucene.codecs.DocValuesFormat with ES87TSDBDocValuesFormat; + + exports org.elasticsearch.cluster.routing.allocation.shards to org.elasticsearch.shardhealth, org.elasticsearch.serverless.shardhealth; } diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index 4bbfe994f7f6d..5c5133e478ee1 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -1131,12 +1131,7 @@ private enum ElasticsearchExceptionHandle { UNKNOWN_VERSION_ADDED ), // 26 was BatchOperationException - SNAPSHOT_CREATION_EXCEPTION( - org.elasticsearch.snapshots.SnapshotCreationException.class, - org.elasticsearch.snapshots.SnapshotCreationException::new, - 27, - UNKNOWN_VERSION_ADDED - ), + // 27 was SnapshotCreationException // 28 was DeleteFailedEngineException, deprecated in 6.0, removed in 7.0 DOCUMENT_MISSING_EXCEPTION( org.elasticsearch.index.engine.DocumentMissingException.class, diff --git a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java index d625da5df9cc7..b67b59aeee076 100644 --- a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java +++ b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java @@ -264,8 +264,13 @@ public static void maybeDieOnAnotherThread(final Throwable throwable) { /** * Deduplicate the failures by exception message and index. + * @param failures array to deduplicate + * @return deduplicated array; if failures is null or empty, it will be returned without modification */ public static ShardOperationFailedException[] groupBy(ShardOperationFailedException[] failures) { + if (failures == null || failures.length == 0) { + return failures; + } List uniqueFailures = new ArrayList<>(); Set reasons = new HashSet<>(); for (ShardOperationFailedException failure : failures) { diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 0c7145730e447..5bdc74b8f2545 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -161,6 +161,14 @@ static TransportVersion def(int id) { public static final TransportVersion UNDESIRED_SHARD_ALLOCATIONS_COUNT_ADDED = def(8_530_00_0); public static final TransportVersion ML_INFERENCE_TASK_SETTINGS_OPTIONAL_ADDED = def(8_531_00_0); public static final TransportVersion DEPRECATED_COMPONENT_TEMPLATES_ADDED = def(8_532_00_0); + public static final TransportVersion UPDATE_NON_DYNAMIC_SETTINGS_ADDED = def(8_533_00_0); + public static final TransportVersion REPO_ANALYSIS_REGISTER_OP_COUNT_ADDED = def(8_534_00_0); + public static final TransportVersion ML_TRAINED_MODEL_PREFIX_STRINGS_ADDED = def(8_535_00_0); + public static final TransportVersion COUNTED_KEYWORD_ADDED = def(8_536_00_0); + public static final TransportVersion SHAPE_VALUE_SERIALIZATION_ADDED = def(8_537_00_0); + public static final TransportVersion INFERENCE_MULTIPLE_INPUTS = def(8_538_00_0); + public static final TransportVersion ADDITIONAL_DESIRED_BALANCE_RECONCILIATION_STATS = def(8_539_00_0); + public static final TransportVersion ML_STATE_CHANGE_TIMESTAMPS = def(8_540_00_0); /* * STOP! READ THIS FIRST! No, really, @@ -202,15 +210,17 @@ static TransportVersion def(int id) { * If your git checkout has the expected minor-version-numbered branches and the expected release-version tags then you can find the * transport versions known by a particular release ... * - * git show v8.9.1:server/src/main/java/org/elasticsearch/TransportVersions.java | grep def + * git show v8.11.0:server/src/main/java/org/elasticsearch/TransportVersions.java | grep '= def' * * ... or by a particular branch ... * - * git show 8.10:server/src/main/java/org/elasticsearch/TransportVersions.java | grep def + * git show 8.11:server/src/main/java/org/elasticsearch/TransportVersions.java | grep '= def' * * ... and you can see which versions were added in between two versions too ... * - * git diff 8.10..main -- server/src/main/java/org/elasticsearch/TransportVersions.java + * git diff v8.11.0..main -- server/src/main/java/org/elasticsearch/TransportVersions.java + * + * In branches 8.7-8.10 see server/src/main/java/org/elasticsearch/TransportVersion.java for the equivalent definitions. */ /** diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 928297397f15c..5dd9a3a055043 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -115,6 +115,7 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_7_17_13 = new Version(7_17_13_99); public static final Version V_7_17_14 = new Version(7_17_14_99); public static final Version V_7_17_15 = new Version(7_17_15_99); + public static final Version V_7_17_16 = new Version(7_17_16_99); public static final Version V_8_0_0 = new Version(8_00_00_99); public static final Version V_8_0_1 = new Version(8_00_01_99); public static final Version V_8_1_0 = new Version(8_01_00_99); @@ -153,8 +154,9 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_8_10_2 = new Version(8_10_02_99); public static final Version V_8_10_3 = new Version(8_10_03_99); public static final Version V_8_10_4 = new Version(8_10_04_99); - public static final Version V_8_10_5 = new Version(8_10_05_99); public static final Version V_8_11_0 = new Version(8_11_00_99); + public static final Version V_8_11_1 = new Version(8_11_01_99); + public static final Version V_8_11_2 = new Version(8_11_02_99); public static final Version V_8_12_0 = new Version(8_12_00_99); public static final Version CURRENT = V_8_12_0; diff --git a/server/src/main/java/org/elasticsearch/action/ActionListener.java b/server/src/main/java/org/elasticsearch/action/ActionListener.java index 30ad4fdeaf04f..b0e18d5ef9b55 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionListener.java +++ b/server/src/main/java/org/elasticsearch/action/ActionListener.java @@ -327,7 +327,12 @@ private void assertFirstRun() { @Override public void onResponse(Response response) { assertFirstRun(); - delegate.onResponse(response); + try { + delegate.onResponse(response); + } catch (Exception e) { + assert false : new AssertionError("listener [" + delegate + "] must handle its own exceptions", e); + throw e; + } } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java index 0120718361877..ef04198c7374b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java @@ -20,6 +20,7 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.desirednodes.DesiredNodesSettingsValidator; import org.elasticsearch.cluster.desirednodes.VersionConflictException; +import org.elasticsearch.cluster.metadata.DesiredNode; import org.elasticsearch.cluster.metadata.DesiredNodes; import org.elasticsearch.cluster.metadata.DesiredNodesMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -30,6 +31,7 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -41,6 +43,7 @@ public class TransportUpdateDesiredNodesAction extends TransportMasterNodeAction { private static final Logger logger = LogManager.getLogger(TransportUpdateDesiredNodesAction.class); + private final FeatureService featureService; private final DesiredNodesSettingsValidator settingsValidator; private final MasterServiceTaskQueue taskQueue; @@ -48,6 +51,7 @@ public class TransportUpdateDesiredNodesAction extends TransportMasterNodeAction public TransportUpdateDesiredNodesAction( TransportService transportService, ClusterService clusterService, + FeatureService featureService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, @@ -66,6 +70,7 @@ public TransportUpdateDesiredNodesAction( UpdateDesiredNodesResponse::new, EsExecutors.DIRECT_EXECUTOR_SERVICE ); + this.featureService = featureService; this.settingsValidator = settingsValidator; this.taskQueue = clusterService.createTaskQueue( "update-desired-nodes", @@ -94,13 +99,12 @@ protected void masterOperation( @Override protected void doExecute(Task task, UpdateDesiredNodesRequest request, ActionListener listener) { - final var minNodeVersion = clusterService.state().nodes().getMinNodeVersion(); - if (request.isCompatibleWithVersion(minNodeVersion) == false) { + if (request.clusterHasRequiredFeatures(nf -> featureService.clusterHasFeature(clusterService.state(), nf)) == false) { listener.onFailure( new IllegalArgumentException( "Unable to use processor ranges, floating-point (with greater precision) processors " - + "in mixed-clusters with nodes in version: " - + minNodeVersion + + "in mixed-clusters with nodes that do not support feature " + + DesiredNode.RANGE_FLOAT_PROCESSORS_SUPPORTED.id() ) ); return; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequest.java index 4b33a12d68b1f..c7c2b9a290a2e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequest.java @@ -10,13 +10,13 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ValidateActions; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.cluster.metadata.DesiredNode; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentParser; @@ -24,6 +24,7 @@ import java.io.IOException; import java.util.List; import java.util.Objects; +import java.util.function.Predicate; public class UpdateDesiredNodesRequest extends AcknowledgedRequest { private static final TransportVersion DRY_RUN_VERSION = TransportVersions.V_8_4_0; @@ -100,12 +101,9 @@ public boolean isDryRun() { return dryRun; } - public boolean isCompatibleWithVersion(Version version) { - if (version.onOrAfter(DesiredNode.RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION)) { - return true; - } - - return nodes.stream().allMatch(desiredNode -> desiredNode.isCompatibleWithVersion(version)); + public boolean clusterHasRequiredFeatures(Predicate clusterHasFeature) { + return clusterHasFeature.test(DesiredNode.RANGE_FLOAT_PROCESSORS_SUPPORTED) + || nodes.stream().allMatch(n -> n.clusterHasRequiredFeatures(clusterHasFeature)); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesAction.java index c05b19043e88b..a04c7c2c2af60 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesAction.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.admin.indices.alias.get; import org.elasticsearch.action.ActionType; +import org.elasticsearch.common.io.stream.Writeable; public class GetAliasesAction extends ActionType { @@ -16,6 +17,6 @@ public class GetAliasesAction extends ActionType { public static final String NAME = "indices:admin/aliases/get"; private GetAliasesAction() { - super(NAME, GetAliasesResponse::new); + super(NAME, Writeable.Reader.localOnly()); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java index d801b441fecea..ee6797ca58fb9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java @@ -10,12 +10,17 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.AliasesRequest; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.action.support.master.MasterNodeReadRequest; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; import java.io.IOException; +import java.util.Map; public class GetAliasesRequest extends MasterNodeReadRequest implements AliasesRequest { @@ -33,6 +38,11 @@ public GetAliasesRequest(String... aliases) { public GetAliasesRequest() {} + /** + * NB prior to 8.12 get-aliases was a TransportMasterNodeReadAction so for BwC we must remain able to read these requests until we no + * longer need to support {@link org.elasticsearch.TransportVersions#CLUSTER_FEATURES_ADDED} and earlier. Once we remove this we can + * also make this class a regular ActionRequest instead of a MasterNodeReadRequest. + */ public GetAliasesRequest(StreamInput in) throws IOException { super(in); indices = in.readStringArray(); @@ -43,11 +53,7 @@ public GetAliasesRequest(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeStringArray(indices); - out.writeStringArray(aliases); - indicesOptions.writeIndicesOptions(out); - out.writeStringArray(originalAliases); + TransportAction.localOnly(); } @Override @@ -108,4 +114,9 @@ public ActionRequestValidationException validate() { public boolean includeDataStreams() { return true; } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new CancellableTask(id, type, action, "", parentTaskId, headers); + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java index 91c6f49101e85..c0e26b16585c4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.metadata.AliasMetadata; import org.elasticsearch.cluster.metadata.DataStreamAlias; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; @@ -29,12 +28,6 @@ public GetAliasesResponse(Map> aliases, Map i.readCollectionAsList(AliasMetadata::new)); - dataStreamAliases = in.readMap(in1 -> in1.readCollectionAsList(DataStreamAlias::new)); - } - public Map> getAliases() { return aliases; } @@ -43,6 +36,10 @@ public Map> getDataStreamAliases() { return dataStreamAliases; } + /** + * NB prior to 8.12 get-aliases was a TransportMasterNodeReadAction so for BwC we must remain able to write these responses until we no + * longer need to support {@link org.elasticsearch.TransportVersions#CLUSTER_FEATURES_ADDED} and earlier. + */ @Override public void writeTo(StreamOutput out) throws IOException { out.writeMap(aliases, StreamOutput::writeCollection); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java index 456b2cc7b899f..e43d1a825c233 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java @@ -9,7 +9,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; +import org.elasticsearch.action.support.TransportLocalClusterStateAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -26,6 +26,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.indices.SystemIndices.SystemIndexAccessLevel; +import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -38,32 +39,37 @@ import java.util.Map; import java.util.function.Predicate; -public class TransportGetAliasesAction extends TransportMasterNodeReadAction { +/** + * NB prior to 8.12 this was a TransportMasterNodeReadAction so for BwC it must be registered with the TransportService (i.e. a + * HandledTransportAction) until we no longer need to support {@link org.elasticsearch.TransportVersions#CLUSTER_FEATURES_ADDED} and + * earlier. + */ +public class TransportGetAliasesAction extends TransportLocalClusterStateAction { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(TransportGetAliasesAction.class); + private final IndexNameExpressionResolver indexNameExpressionResolver; private final SystemIndices systemIndices; + private final ThreadContext threadContext; @Inject public TransportGetAliasesAction( TransportService transportService, - ClusterService clusterService, - ThreadPool threadPool, ActionFilters actionFilters, + ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, SystemIndices systemIndices ) { super( GetAliasesAction.NAME, - transportService, clusterService, - threadPool, + transportService, actionFilters, GetAliasesRequest::new, - indexNameExpressionResolver, - GetAliasesResponse::new, - threadPool.executor(ThreadPool.Names.MANAGEMENT) + clusterService.threadPool().executor(ThreadPool.Names.MANAGEMENT) ); + this.indexNameExpressionResolver = indexNameExpressionResolver; this.systemIndices = systemIndices; + this.threadContext = clusterService.threadPool().getThreadContext(); } @Override @@ -77,15 +83,22 @@ protected ClusterBlockException checkBlock(GetAliasesRequest request, ClusterSta } @Override - protected void masterOperation(Task task, GetAliasesRequest request, ClusterState state, ActionListener listener) { + protected void localClusterStateOperation( + Task task, + GetAliasesRequest request, + ClusterState state, + ActionListener listener + ) { assert Transports.assertNotTransportThread("no need to avoid the context switch and may be expensive if there are many aliases"); + final var cancellableTask = (CancellableTask) task; // resolve all concrete indices upfront and warn/error later final String[] concreteIndices = indexNameExpressionResolver.concreteIndexNamesWithSystemIndexAccess(state, request); final SystemIndexAccessLevel systemIndexAccessLevel = indexNameExpressionResolver.getSystemIndexAccessLevel(); Map> aliases = state.metadata().findAliases(request.aliases(), concreteIndices); + cancellableTask.ensureNotCancelled(); listener.onResponse( new GetAliasesResponse( - postProcess(request, concreteIndices, aliases, state, systemIndexAccessLevel, threadPool.getThreadContext(), systemIndices), + postProcess(request, concreteIndices, aliases, state, systemIndexAccessLevel, threadContext, systemIndices), postProcess(indexNameExpressionResolver, request, state) ) ); @@ -122,7 +135,7 @@ static Map> postProcess( } final Map> finalResponse = Collections.unmodifiableMap(mapBuilder); if (systemIndexAccessLevel != SystemIndexAccessLevel.ALL) { - checkSystemIndexAccess(request, systemIndices, state, finalResponse, systemIndexAccessLevel, threadContext); + checkSystemIndexAccess(systemIndices, state, finalResponse, systemIndexAccessLevel, threadContext); } return finalResponse; } @@ -151,7 +164,6 @@ static Map> postProcess( } private static void checkSystemIndexAccess( - GetAliasesRequest request, SystemIndices systemIndices, ClusterState state, Map> aliasesMap, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java index 0bd51eba85ff9..c74981d475389 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java @@ -20,7 +20,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.core.Nullable; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; @@ -102,7 +101,6 @@ protected RecoveryRequest readRequestFrom(StreamInput in) throws IOException { protected void shardOperation(RecoveryRequest request, ShardRouting shardRouting, Task task, ActionListener listener) { ActionListener.completeWith(listener, () -> { assert task instanceof CancellableTask; - runOnShardOperation(); IndexService indexService = indicesService.indexServiceSafe(shardRouting.shardId().getIndex()); IndexShard indexShard = indexService.getShard(shardRouting.shardId().id()); return indexShard.recoveryState(); @@ -123,19 +121,4 @@ protected ClusterBlockException checkGlobalBlock(ClusterState state, RecoveryReq protected ClusterBlockException checkRequestBlock(ClusterState state, RecoveryRequest request, String[] concreteIndices) { return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, concreteIndices); } - - @Nullable // unless running tests that inject extra behaviour - private volatile Runnable onShardOperation; - - private void runOnShardOperation() { - final Runnable onShardOperation = this.onShardOperation; - if (onShardOperation != null) { - onShardOperation.run(); - } - } - - // exposed for tests: inject some extra behaviour that runs when shardOperation() is called - void setOnShardOperation(@Nullable Runnable onShardOperation) { - this.onShardOperation = onShardOperation; - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java index b613eab0d731c..19fa9c3d359fb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java @@ -126,6 +126,7 @@ protected void masterOperation( ) .settings(requestSettings) .setPreserveExisting(request.isPreserveExisting()) + .reopenShards(request.reopen()) .ackTimeout(request.timeout()) .masterNodeTimeout(request.masterNodeTimeout()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsClusterStateUpdateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsClusterStateUpdateRequest.java index f52c659ea55f4..99a43c6594c62 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsClusterStateUpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsClusterStateUpdateRequest.java @@ -22,6 +22,8 @@ public class UpdateSettingsClusterStateUpdateRequest extends IndicesClusterState private boolean preserveExisting = false; + private boolean reopenShards = false; + /** * Returns true iff the settings update should only add but not update settings. If the setting already exists * it should not be overwritten by this update. The default is false @@ -30,6 +32,20 @@ public boolean isPreserveExisting() { return preserveExisting; } + /** + * Returns true if non-dynamic setting updates should go through, by automatically unassigning shards in the same cluster + * state change as the setting update. The shards will be automatically reassigned after the cluster state update is made. The + * default is false. + */ + public boolean reopenShards() { + return reopenShards; + } + + public UpdateSettingsClusterStateUpdateRequest reopenShards(boolean reopenShards) { + this.reopenShards = reopenShards; + return this; + } + /** * Iff set to true this settings update will only add settings not already set on an index. Existing settings remain * unchanged. diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java index 4e31fbc2b5732..013e568eff7c9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java @@ -47,6 +47,7 @@ public class UpdateSettingsRequest extends AcknowledgedRequesttrue if non-dynamic setting updates should go through, by automatically unassigning shards in the same cluster + * state change as the setting update. The shards will be automatically reassigned after the cluster state update is made. The + * default is false. + */ + public boolean reopen() { + return reopen; + } + + public void reopen(boolean reopen) { + this.reopen = reopen; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -186,6 +203,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_12_0)) { out.writeString(origin); } + if (out.getTransportVersion().onOrAfter(TransportVersions.UPDATE_NON_DYNAMIC_SETTINGS_ADDED)) { + out.writeBoolean(reopen); + } } @Override @@ -243,12 +263,13 @@ public boolean equals(Object o) { && Objects.equals(settings, that.settings) && Objects.equals(indicesOptions, that.indicesOptions) && Objects.equals(preserveExisting, that.preserveExisting) + && Objects.equals(reopen, that.reopen) && Arrays.equals(indices, that.indices); } @Override public int hashCode() { - return Objects.hash(masterNodeTimeout, timeout, settings, indicesOptions, preserveExisting, Arrays.hashCode(indices)); + return Objects.hash(masterNodeTimeout, timeout, settings, indicesOptions, preserveExisting, reopen, Arrays.hashCode(indices)); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java index 4ab0b6bd221e9..af40637db6703 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java @@ -53,6 +53,7 @@ import java.util.stream.Collectors; import static java.util.Collections.emptyMap; +import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.isDataStreamsLifecycleOnlyMode; import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.findConflictingV1Templates; import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.findConflictingV2Templates; import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.findV2Template; @@ -69,6 +70,7 @@ public class TransportSimulateIndexTemplateAction extends TransportMasterNodeRea private final SystemIndices systemIndices; private final Set indexSettingProviders; private final ClusterSettings clusterSettings; + private final boolean isDslOnlyMode; @Inject public TransportSimulateIndexTemplateAction( @@ -100,6 +102,7 @@ public TransportSimulateIndexTemplateAction( this.systemIndices = systemIndices; this.indexSettingProviders = indexSettingProviders.getIndexSettingProviders(); this.clusterSettings = clusterService.getClusterSettings(); + this.isDslOnlyMode = isDataStreamsLifecycleOnlyMode(clusterService.getSettings()); } @Override @@ -146,6 +149,7 @@ protected void masterOperation( matchingTemplate, request.getIndexName(), stateWithTemplate, + isDslOnlyMode, xContentRegistry, indicesService, systemIndices, @@ -218,6 +222,7 @@ public static Template resolveTemplate( final String matchingTemplate, final String indexName, final ClusterState simulatedState, + final boolean isDslOnlyMode, final NamedXContentRegistry xContentRegistry, final IndicesService indicesService, final SystemIndices systemIndices, @@ -304,6 +309,9 @@ public static Template resolveTemplate( Settings settings = Settings.builder().put(templateSettings).put(additionalSettings.build()).build(); DataStreamLifecycle lifecycle = resolveLifecycle(simulatedState.metadata(), matchingTemplate); + if (template.getDataStreamTemplate() != null && lifecycle == null && isDslOnlyMode) { + lifecycle = DataStreamLifecycle.DEFAULT; + } return new Template(settings, mergedMapping, aliasesByName, lifecycle); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java index b99f436dd86f9..1f35d0b8a1268 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java @@ -39,6 +39,7 @@ import java.util.Map; import java.util.Set; +import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.isDataStreamsLifecycleOnlyMode; import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.findConflictingV1Templates; import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.findConflictingV2Templates; @@ -56,6 +57,7 @@ public class TransportSimulateTemplateAction extends TransportMasterNodeReadActi private final SystemIndices systemIndices; private final Set indexSettingProviders; private final ClusterSettings clusterSettings; + private final boolean isDslOnlyMode; @Inject public TransportSimulateTemplateAction( @@ -87,6 +89,7 @@ public TransportSimulateTemplateAction( this.systemIndices = systemIndices; this.indexSettingProviders = indexSettingProviders.getIndexSettingProviders(); this.clusterSettings = clusterService.getClusterSettings(); + this.isDslOnlyMode = isDataStreamsLifecycleOnlyMode(clusterService.getSettings()); } @Override @@ -162,6 +165,7 @@ protected void masterOperation( matchingTemplate, temporaryIndexName, stateWithTemplate, + isDslOnlyMode, xContentRegistry, indicesService, systemIndices, diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java index 3b6e69d16bae3..f1280587a0c55 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java @@ -8,7 +8,6 @@ package org.elasticsearch.action.bulk; -import org.elasticsearch.Version; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; @@ -19,6 +18,7 @@ import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.RestApiVersion; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.rest.action.document.RestBulkAction; @@ -430,32 +430,32 @@ public void parse( } } + @UpdateForV9 + // Warnings will need to be replaced with XContentEOFException from 9.x + private static void warnBulkActionNotProperlyClosed(String message) { + deprecationLogger.compatibleCritical(STRICT_ACTION_PARSING_WARNING_KEY, message); + } + private static void checkBulkActionIsProperlyClosed(XContentParser parser) throws IOException { XContentParser.Token token; try { token = parser.nextToken(); } catch (XContentEOFException ignore) { - assert Version.CURRENT.major == Version.V_7_17_0.major + 1; - deprecationLogger.compatibleCritical( - STRICT_ACTION_PARSING_WARNING_KEY, + warnBulkActionNotProperlyClosed( "A bulk action wasn't closed properly with the closing brace. Malformed objects are currently accepted but will be " + "rejected in a future version." ); return; } if (token != XContentParser.Token.END_OBJECT) { - assert Version.CURRENT.major == Version.V_7_17_0.major + 1; - deprecationLogger.compatibleCritical( - STRICT_ACTION_PARSING_WARNING_KEY, + warnBulkActionNotProperlyClosed( "A bulk action object contained multiple keys. Additional keys are currently ignored but will be rejected in a " + "future version." ); return; } if (parser.nextToken() != null) { - assert Version.CURRENT.major == Version.V_7_17_0.major + 1; - deprecationLogger.compatibleCritical( - STRICT_ACTION_PARSING_WARNING_KEY, + warnBulkActionNotProperlyClosed( "A bulk action contained trailing data after the closing brace. This is currently ignored but will be rejected in a " + "future version." ); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/Retry.java b/server/src/main/java/org/elasticsearch/action/bulk/Retry.java index 6503c207e8290..33fb81a6520cb 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/Retry.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/Retry.java @@ -66,7 +66,7 @@ public PlainActionFuture withBackoff( BiConsumer> consumer, BulkRequest bulkRequest ) { - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); withBackoff(consumer, bulkRequest, future); return future; } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 13d10be86bd68..f11baec87de9b 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -15,7 +15,6 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.DocWriteRequest; @@ -272,7 +271,6 @@ protected void doInternalExecute(Task task, BulkRequest bulkRequest, String exec boolean hasIndexRequestsWithPipelines = false; final Metadata metadata = clusterService.state().getMetadata(); - final Version minNodeVersion = clusterService.state().getNodes().getMinNodeVersion(); for (DocWriteRequest actionRequest : bulkRequest.requests) { IndexRequest indexRequest = getIndexWriteRequest(actionRequest); if (indexRequest != null) { @@ -281,7 +279,6 @@ protected void doInternalExecute(Task task, BulkRequest bulkRequest, String exec } if (actionRequest instanceof IndexRequest ir) { - ir.checkAutoIdWithOpTypeCreateSupportedByVersion(minNodeVersion); if (ir.getAutoGeneratedTimestamp() != IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP) { throw new IllegalArgumentException("autoGeneratedTimestamp should not be set externally"); } diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java index 2f202dd21ad7c..8b5e077fd85b8 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -676,35 +676,14 @@ public void reset() { autoGeneratedTimestamp = UNSET_AUTO_GENERATED_TIMESTAMP; } - public void checkAutoIdWithOpTypeCreateSupportedByVersion(TransportVersion version) { - if (id == null && opType == OpType.CREATE && version.before(TransportVersions.V_7_5_0)) { - throw new IllegalArgumentException( - "optype create not supported for indexing requests without explicit id below transport version 7500099, current version " - + version - ); - } - } - - public void checkAutoIdWithOpTypeCreateSupportedByVersion(Version version) { - if (id == null && opType == OpType.CREATE && version.before(Version.V_7_5_0)) { - throw new IllegalArgumentException( - "optype create not supported for indexing requests without explicit id until all nodes are on version 7.5.0 or higher," - + " current version " - + version - ); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { - checkAutoIdWithOpTypeCreateSupportedByVersion(out.getTransportVersion()); super.writeTo(out); writeBody(out); } @Override public void writeThin(StreamOutput out) throws IOException { - checkAutoIdWithOpTypeCreateSupportedByVersion(out.getTransportVersion()); super.writeThin(out); writeBody(out); } diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index 2f3266f9e0099..b56cb0ca5926c 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -228,27 +228,7 @@ public final void run() { skipShard(iterator); } if (shardsIts.size() > 0) { - assert request.allowPartialSearchResults() != null : "SearchRequest missing setting for allowPartialSearchResults"; - if (request.allowPartialSearchResults() == false) { - final StringBuilder missingShards = new StringBuilder(); - // Fail-fast verification of all shards being available - for (int index = 0; index < shardsIts.size(); index++) { - final SearchShardIterator shardRoutings = shardsIts.get(index); - if (shardRoutings.size() == 0) { - if (missingShards.length() > 0) { - missingShards.append(", "); - } - missingShards.append(shardRoutings.shardId()); - } - } - if (missingShards.length() > 0) { - // Status red - shard is missing all copies and would produce partial results for an index search - final String msg = "Search rejected due to missing shards [" - + missingShards - + "]. Consider using `allow_partial_search_results` setting to bypass this error."; - throw new SearchPhaseExecutionException(getName(), msg, null, ShardSearchFailure.EMPTY_ARRAY); - } - } + doCheckNoMissingShards(getName(), request, shardsIts); Version version = request.minCompatibleShardNode(); if (version != null && Version.CURRENT.minimumCompatibilityVersion().equals(version) == false) { if (checkMinimumVersion(shardsIts) == false) { @@ -434,7 +414,6 @@ public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPha logger.debug(() -> format("%s shards failed for phase: [%s]", numShardFailures, currentPhase.getName()), cause); } onPhaseFailure(currentPhase, "Partial shards failure", null); - return; } else { int discrepancy = getNumShards() - successfulOps.get(); assert discrepancy > 0 : "discrepancy: " + discrepancy; @@ -449,8 +428,8 @@ public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPha ); } onPhaseFailure(currentPhase, "Partial shards failure (" + discrepancy + " shards unavailable)", null); - return; } + return; } if (logger.isTraceEnabled()) { final String resultsFrom = results.getSuccessfulResults() @@ -840,7 +819,7 @@ void executeNext(Runnable runnable, Thread originalThread) { private static final class PendingExecutions { private final int permits; private int permitsTaken = 0; - private ArrayDeque queue = new ArrayDeque<>(); + private final ArrayDeque queue = new ArrayDeque<>(); PendingExecutions(int permits) { assert permits > 0 : "not enough permits: " + permits; diff --git a/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java index cef6bf92cc5e6..6e553f254ee8b 100644 --- a/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java @@ -31,7 +31,6 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.Transport; -import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; @@ -127,7 +126,7 @@ private static boolean assertSearchCoordinationThread() { } @Override - public void run() throws IOException { + public void run() { assert assertSearchCoordinationThread(); checkNoMissingShards(); Version version = request.minCompatibleShardNode(); @@ -159,9 +158,7 @@ private void runCoordinatorRewritePhase() { ); final ShardSearchRequest request = canMatchNodeRequest.createShardSearchRequest(buildShardLevelRequest(searchShardIterator)); if (searchShardIterator.prefiltered()) { - CanMatchShardResponse result = new CanMatchShardResponse(searchShardIterator.skip() == false, null); - result.setShardIndex(request.shardRequestIndex()); - results.consumeResult(result, () -> {}); + consumeResult(searchShardIterator.skip() == false, request); continue; } boolean canMatch = true; @@ -178,9 +175,7 @@ private void runCoordinatorRewritePhase() { if (canMatch) { matchedShardLevelRequests.add(searchShardIterator); } else { - CanMatchShardResponse result = new CanMatchShardResponse(canMatch, null); - result.setShardIndex(request.shardRequestIndex()); - results.consumeResult(result, () -> {}); + consumeResult(false, request); } } if (matchedShardLevelRequests.isEmpty()) { @@ -190,29 +185,15 @@ private void runCoordinatorRewritePhase() { } } + private void consumeResult(boolean canMatch, ShardSearchRequest request) { + CanMatchShardResponse result = new CanMatchShardResponse(canMatch, null); + result.setShardIndex(request.shardRequestIndex()); + results.consumeResult(result, () -> {}); + } + private void checkNoMissingShards() { assert assertSearchCoordinationThread(); - assert request.allowPartialSearchResults() != null : "SearchRequest missing setting for allowPartialSearchResults"; - if (request.allowPartialSearchResults() == false) { - final StringBuilder missingShards = new StringBuilder(); - // Fail-fast verification of all shards being available - for (int index = 0; index < shardsIts.size(); index++) { - final SearchShardIterator shardRoutings = shardsIts.get(index); - if (shardRoutings.size() == 0) { - if (missingShards.length() > 0) { - missingShards.append(", "); - } - missingShards.append(shardRoutings.shardId()); - } - } - if (missingShards.length() > 0) { - // Status red - shard is missing all copies and would produce partial results for an index search - final String msg = "Search rejected due to missing shards [" - + missingShards - + "]. Consider using `allow_partial_search_results` setting to bypass this error."; - throw new SearchPhaseExecutionException(getName(), msg, null, ShardSearchFailure.EMPTY_ARRAY); - } - } + doCheckNoMissingShards(getName(), request, shardsIts); } private Map> groupByNode(GroupShardsIterator shards) { @@ -425,7 +406,7 @@ public void onFailure(Exception e) { } @Override - protected void doRun() throws IOException { + protected void doRun() { CanMatchPreFilterSearchPhase.this.run(); } }); diff --git a/server/src/main/java/org/elasticsearch/action/search/ClearScrollResponse.java b/server/src/main/java/org/elasticsearch/action/search/ClearScrollResponse.java index 0a7b53ea8b9c4..8b1116951df82 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ClearScrollResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/ClearScrollResponse.java @@ -85,7 +85,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * Parse the clear scroll response body into a new {@link ClearScrollResponse} object */ - public static ClosePointInTimeResponse fromXContent(XContentParser parser) throws IOException { + public static ClosePointInTimeResponse fromXContent(XContentParser parser) { return PARSER.apply(parser, null); } diff --git a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java index dca269f06a3d3..e010e840d3f2d 100644 --- a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java @@ -24,7 +24,6 @@ import org.elasticsearch.search.vectors.KnnScoreDocQueryBuilder; import org.elasticsearch.transport.Transport; -import java.io.IOException; import java.util.ArrayList; import java.util.Comparator; import java.util.List; @@ -71,7 +70,7 @@ final class DfsQueryPhase extends SearchPhase { } @Override - public void run() throws IOException { + public void run() { // TODO we can potentially also consume the actual per shard results from the initial phase here in the aggregateDfs // to free up memory early final CountedCollector counter = new CountedCollector<>( diff --git a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java index e7d6eca23498f..cadcd6ca57334 100644 --- a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java @@ -51,7 +51,6 @@ */ public class MultiSearchRequest extends ActionRequest implements CompositeIndicesRequest { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestSearchAction.class); - public static final String TYPES_DEPRECATION_MESSAGE = "[types removal]" + " Specifying types in search requests is deprecated."; public static final String FIRST_LINE_EMPTY_DEPRECATION_MESSAGE = "support for empty first line before any action metadata in msearch API is deprecated " + "and will be removed in the next major version"; diff --git a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequestBuilder.java index 6f1e8d429edab..57c536f3d371e 100644 --- a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequestBuilder.java @@ -63,11 +63,4 @@ public MultiSearchRequestBuilder setIndicesOptions(IndicesOptions indicesOptions return this; } - /** - * Sets how many search requests specified in this multi search requests are allowed to be ran concurrently. - */ - public MultiSearchRequestBuilder setMaxConcurrentSearchRequests(int maxConcurrentSearchRequests) { - request().maxConcurrentSearchRequests(maxConcurrentSearchRequests); - return this; - } } diff --git a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java index c6463bcb00f67..92a2a1503aefc 100644 --- a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java @@ -11,27 +11,16 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Objects; -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; - public final class OpenPointInTimeResponse extends ActionResponse implements ToXContentObject { private static final ParseField ID = new ParseField("id"); - private static final ConstructingObjectParser PARSER; - - static { - PARSER = new ConstructingObjectParser<>("open_point_in_time", true, a -> new OpenPointInTimeResponse((String) a[0])); - PARSER.declareField(constructorArg(), (parser, context) -> parser.text(), ID, ObjectParser.ValueType.STRING); - } private final String pointInTimeId; public OpenPointInTimeResponse(String pointInTimeId) { @@ -60,7 +49,4 @@ public String getPointInTimeId() { return pointInTimeId; } - public static OpenPointInTimeResponse fromXContent(XContentParser parser) throws IOException { - return PARSER.parse(parser, null); - } } diff --git a/server/src/main/java/org/elasticsearch/action/search/ParsedScrollId.java b/server/src/main/java/org/elasticsearch/action/search/ParsedScrollId.java index ca68b1865495d..a9f3502bfa631 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ParsedScrollId.java +++ b/server/src/main/java/org/elasticsearch/action/search/ParsedScrollId.java @@ -16,22 +16,15 @@ public class ParsedScrollId { public static final String QUERY_AND_FETCH_TYPE = "queryAndFetch"; - private final String source; - private final String type; private final SearchContextIdForNode[] context; - ParsedScrollId(String source, String type, SearchContextIdForNode[] context) { - this.source = source; + ParsedScrollId(String type, SearchContextIdForNode[] context) { this.type = type; this.context = context; } - public String getSource() { - return source; - } - public String getType() { return type; } diff --git a/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java b/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java index f78d5f4005755..73061298d8f7e 100644 --- a/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java +++ b/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java @@ -39,7 +39,6 @@ import java.util.function.Consumer; import java.util.function.Supplier; -import static java.util.stream.Collectors.toCollection; import static org.elasticsearch.action.search.SearchPhaseController.getTopDocsSize; import static org.elasticsearch.action.search.SearchPhaseController.mergeTopDocs; import static org.elasticsearch.action.search.SearchPhaseController.setShardIndex; @@ -106,7 +105,7 @@ public QueryPhaseResultConsumer( @Override public void close() { - Releasables.close(pendingMerges); + pendingMerges.close(); } @Override @@ -269,12 +268,9 @@ public synchronized void close() { assert circuitBreakerBytes >= 0; } - List toRelease = buffer.stream().map(b -> b::releaseAggs).collect(toCollection(ArrayList::new)); - toRelease.add(() -> { - circuitBreaker.addWithoutBreaking(-circuitBreakerBytes); - circuitBreakerBytes = 0; - }); - Releasables.close(toRelease); + releaseBuffer(); + circuitBreaker.addWithoutBreaking(-circuitBreakerBytes); + circuitBreakerBytes = 0; if (hasPendingMerges()) { // This is a theoretically unreachable exception. @@ -350,8 +346,7 @@ public void consume(QuerySearchResult result, Runnable next) { addEstimateAndMaybeBreak(aggsSize); } catch (Exception exc) { result.releaseAggs(); - buffer.forEach(QuerySearchResult::releaseAggs); - buffer.clear(); + releaseBuffer(); onMergeFailure(exc); next.run(); return; @@ -379,6 +374,11 @@ public void consume(QuerySearchResult result, Runnable next) { } } + private void releaseBuffer() { + buffer.forEach(QuerySearchResult::releaseAggs); + buffer.clear(); + } + private synchronized void onMergeFailure(Exception exc) { if (hasFailure()) { assert circuitBreakerBytes == 0; @@ -520,7 +520,7 @@ private record MergeResult( private static class MergeTask { private final List emptyResults; private QuerySearchResult[] buffer; - private long aggsBufferSize; + private final long aggsBufferSize; private Runnable next; private MergeTask(QuerySearchResult[] buffer, long aggsBufferSize, List emptyResults, Runnable next) { diff --git a/server/src/main/java/org/elasticsearch/action/search/RestOpenPointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/RestOpenPointInTimeAction.java index 5de59cc6ce878..815deac07dfcd 100644 --- a/server/src/main/java/org/elasticsearch/action/search/RestOpenPointInTimeAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/RestOpenPointInTimeAction.java @@ -18,7 +18,6 @@ import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; -import java.io.IOException; import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; @@ -37,7 +36,7 @@ public List routes() { } @Override - public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) { final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); final OpenPointInTimeRequest openRequest = new OpenPointInTimeRequest(indices); openRequest.indicesOptions(IndicesOptions.fromRequest(request, OpenPointInTimeRequest.DEFAULT_INDICES_OPTIONS)); diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java b/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java index 2b7105cffe2bb..f10650a6401d6 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java @@ -41,7 +41,7 @@ public final class SearchContextId { private final Map shards; private final Map aliasFilter; - private transient Set contextIds; + private final transient Set contextIds; SearchContextId(Map shards, Map aliasFilter) { this.shards = shards; diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java index 88da2fdfa3a9e..9d3eadcc42bf9 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java @@ -7,6 +7,7 @@ */ package org.elasticsearch.action.search; +import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.core.CheckedRunnable; import java.io.IOException; @@ -37,4 +38,28 @@ public void start() { throw new UncheckedIOException(e); } } + + static void doCheckNoMissingShards(String phaseName, SearchRequest request, GroupShardsIterator shardsIts) { + assert request.allowPartialSearchResults() != null : "SearchRequest missing setting for allowPartialSearchResults"; + if (request.allowPartialSearchResults() == false) { + final StringBuilder missingShards = new StringBuilder(); + // Fail-fast verification of all shards being available + for (int index = 0; index < shardsIts.size(); index++) { + final SearchShardIterator shardRoutings = shardsIts.get(index); + if (shardRoutings.size() == 0) { + if (missingShards.isEmpty() == false) { + missingShards.append(", "); + } + missingShards.append(shardRoutings.shardId()); + } + } + if (missingShards.isEmpty() == false) { + // Status red - shard is missing all copies and would produce partial results for an index search + final String msg = "Search rejected due to missing shards [" + + missingShards + + "]. Consider using `allow_partial_search_results` setting to bypass this error."; + throw new SearchPhaseExecutionException(phaseName, msg, null, ShardSearchFailure.EMPTY_ARRAY); + } + } + } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index fb554232503f2..5af5c4c2ec602 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -667,7 +667,7 @@ private static void validateMergeSortValueFormats(Collection statsGroups) { - sourceBuilder().stats(statsGroups); - return this; - } - /** * Indicates whether the response should contain the stored _source for every hit */ diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java index b6a9179b1e956..56b58cd8ced6c 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java @@ -144,10 +144,6 @@ public RestStatus status() { return RestStatus.status(successfulShards, totalShards, shardFailures); } - public SearchResponseSections getInternalResponse() { - return internalResponse; - } - /** * The search hits. */ @@ -387,7 +383,7 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE } } else if (token == Token.START_ARRAY) { if (RestActions.FAILURES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - while ((token = parser.nextToken()) != Token.END_ARRAY) { + while (parser.nextToken() != Token.END_ARRAY) { failures.add(ShardSearchFailure.fromXContent(parser)); } } else { @@ -479,7 +475,7 @@ public static final class Clusters implements ToXContentFragment, Writeable { private final Map clusterInfo; // not Writeable since it is only needed on the (primary) CCS coordinator - private transient Boolean ccsMinimizeRoundtrips; + private final transient Boolean ccsMinimizeRoundtrips; /** * For use with cross-cluster searches. @@ -985,7 +981,7 @@ public static class Builder { private List failures; private TimeValue took; private Boolean timedOut; - private Cluster original; + private final Cluster original; public Builder(Cluster copyFrom) { this.original = copyFrom; @@ -1167,7 +1163,7 @@ public static Cluster fromXContent(String clusterAlias, XContentParser parser) t } } else if (token == Token.START_ARRAY) { if (RestActions.FAILURES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - while ((token = parser.nextToken()) != Token.END_ARRAY) { + while (parser.nextToken() != Token.END_ARRAY) { failures.add(ShardSearchFailure.fromXContent(parser)); } } else { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java index 35aae0764e251..df16c107a2619 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java @@ -23,7 +23,6 @@ import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.Transport; -import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; @@ -230,7 +229,7 @@ protected SearchPhase sendResponsePhase( ) { return new SearchPhase("fetch") { @Override - public void run() throws IOException { + public void run() { sendResponse(queryPhase, fetchResults); } }; diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index d02958567a873..800ad7afbb8db 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -51,6 +51,7 @@ import org.elasticsearch.transport.TransportActionProxy; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportResponseHandler; @@ -366,7 +367,7 @@ public Map getPendingSearchRequests() { } static class ScrollFreeContextRequest extends TransportRequest { - private ShardSearchContextId contextId; + private final ShardSearchContextId contextId; ScrollFreeContextRequest(ShardSearchContextId contextId) { this.contextId = Objects.requireNonNull(contextId); @@ -390,7 +391,7 @@ public ShardSearchContextId id() { } static class SearchFreeContextRequest extends ScrollFreeContextRequest implements IndicesRequest { - private OriginalIndices originalIndices; + private final OriginalIndices originalIndices; SearchFreeContextRequest(OriginalIndices originalIndices, ShardSearchContextId id) { super(id); @@ -428,7 +429,7 @@ public IndicesOptions indicesOptions() { public static class SearchFreeContextResponse extends TransportResponse { - private boolean freed; + private final boolean freed; SearchFreeContextResponse(StreamInput in) throws IOException { freed = in.readBoolean(); @@ -541,13 +542,16 @@ public static void registerRequestHandler(TransportService transportService, Sea ); TransportActionProxy.registerProxyAction(transportService, QUERY_FETCH_SCROLL_ACTION_NAME, true, ScrollQueryFetchSearchResult::new); + TransportRequestHandler shardFetchHandler = (request, channel, task) -> searchService.executeFetchPhase( + request, + (SearchShardTask) task, + new ChannelActionListener<>(channel) + ); transportService.registerRequestHandler( FETCH_ID_SCROLL_ACTION_NAME, EsExecutors.DIRECT_EXECUTOR_SERVICE, ShardFetchRequest::new, - (request, channel, task) -> { - searchService.executeFetchPhase(request, (SearchShardTask) task, new ChannelActionListener<>(channel)); - } + shardFetchHandler ); TransportActionProxy.registerProxyAction(transportService, FETCH_ID_SCROLL_ACTION_NAME, true, FetchSearchResult::new); @@ -557,9 +561,7 @@ public static void registerRequestHandler(TransportService transportService, Sea true, true, ShardFetchSearchRequest::new, - (request, channel, task) -> { - searchService.executeFetchPhase(request, (SearchShardTask) task, new ChannelActionListener<>(channel)); - } + shardFetchHandler ); TransportActionProxy.registerProxyAction(transportService, FETCH_ID_ACTION_NAME, true, FetchSearchResult::new); diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchType.java b/server/src/main/java/org/elasticsearch/action/search/SearchType.java index 519f1ce98a7b6..8e6511db62136 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchType.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchType.java @@ -39,7 +39,7 @@ public enum SearchType { */ public static final SearchType[] CURRENTLY_SUPPORTED = { QUERY_THEN_FETCH, DFS_QUERY_THEN_FETCH }; - private byte id; + private final byte id; SearchType(byte id) { this.id = id; diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java index a4a35789db258..f51c700c8c8c9 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java @@ -8,6 +8,9 @@ package org.elasticsearch.action.search; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; @@ -16,7 +19,6 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.EsExecutors; @@ -32,6 +34,7 @@ public class TransportMultiSearchAction extends HandledTransportAction { + private static final Logger logger = LogManager.getLogger(TransportMultiSearchAction.class); private final int allocatedProcessors; private final ThreadPool threadPool; private final ClusterService clusterService; @@ -47,13 +50,7 @@ public TransportMultiSearchAction( ActionFilters actionFilters, NodeClient client ) { - super( - MultiSearchAction.NAME, - transportService, - actionFilters, - (Writeable.Reader) MultiSearchRequest::new, - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + super(MultiSearchAction.NAME, transportService, actionFilters, MultiSearchRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.threadPool = threadPool; this.clusterService = clusterService; this.allocatedProcessors = EsExecutors.allocatedProcessors(settings); @@ -70,13 +67,7 @@ public TransportMultiSearchAction( LongSupplier relativeTimeProvider, NodeClient client ) { - super( - MultiSearchAction.NAME, - transportService, - actionFilters, - (Writeable.Reader) MultiSearchRequest::new, - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + super(MultiSearchAction.NAME, transportService, actionFilters, MultiSearchRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.threadPool = threadPool; this.clusterService = clusterService; this.allocatedProcessors = allocatedProcessors; @@ -168,6 +159,9 @@ public void onResponse(final SearchResponse searchResponse) { @Override public void onFailure(final Exception e) { + if (ExceptionsHelper.status(e).getStatus() >= 500 && ExceptionsHelper.isNodeOrShardUnavailableTypeException(e) == false) { + logger.warn("TransportMultiSearchAction failure", e); + } handleResponse(request.responseSlot, new MultiSearchResponse.Item(null, e)); } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java index aeb71a3b03d8f..ae3c735e079e9 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java @@ -268,7 +268,7 @@ public void writeTo(StreamOutput out) throws IOException { private class ShardOpenReaderRequestHandler implements TransportRequestHandler { @Override - public void messageReceived(ShardOpenReaderRequest request, TransportChannel channel, Task task) throws Exception { + public void messageReceived(ShardOpenReaderRequest request, TransportChannel channel, Task task) { searchService.openReaderContext( request.getShardId(), request.keepAlive, diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index a2739e2c2a85e..a2d01e226b4ed 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -10,11 +10,13 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.OriginalIndices; +import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsAction; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; @@ -39,7 +41,6 @@ import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Setting; @@ -159,13 +160,7 @@ public TransportSearchAction( NamedWriteableRegistry namedWriteableRegistry, ExecutorSelector executorSelector ) { - super( - SearchAction.NAME, - transportService, - actionFilters, - (Writeable.Reader) SearchRequest::new, - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + super(SearchAction.NAME, transportService, actionFilters, SearchRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.threadPool = threadPool; this.circuitBreaker = circuitBreakerService.getBreaker(CircuitBreaker.REQUEST); this.searchPhaseController = searchPhaseController; @@ -285,7 +280,24 @@ public long buildTookInMillis() { @Override protected void doExecute(Task task, SearchRequest searchRequest, ActionListener listener) { - executeRequest((SearchTask) task, searchRequest, listener, AsyncSearchActionProvider::new); + ActionListener loggingListener = listener.delegateFailureAndWrap((l, searchResponse) -> { + if (searchResponse.getShardFailures() != null && searchResponse.getShardFailures().length > 0) { + // Deduplicate failures by exception message and index + ShardOperationFailedException[] groupedFailures = ExceptionsHelper.groupBy(searchResponse.getShardFailures()); + for (ShardOperationFailedException f : groupedFailures) { + boolean causeHas500Status = false; + if (f.getCause() != null) { + causeHas500Status = ExceptionsHelper.status(f.getCause()).getStatus() >= 500; + } + if ((f.status().getStatus() >= 500 || causeHas500Status) + && ExceptionsHelper.isNodeOrShardUnavailableTypeException(f.getCause()) == false) { + logger.warn("TransportSearchAction shard failure (partial results response)", f); + } + } + } + l.onResponse(searchResponse); + }); + executeRequest((SearchTask) task, searchRequest, loggingListener, AsyncSearchActionProvider::new); } void executeRequest( @@ -514,7 +526,7 @@ static void ccsRemoteReduce( clusterAlias, remoteClientResponseExecutor ); - remoteClusterClient.search(ccsSearchRequest, new ActionListener() { + remoteClusterClient.search(ccsSearchRequest, new ActionListener<>() { @Override public void onResponse(SearchResponse searchResponse) { // TODO: in CCS fail fast ticket we may need to fail the query if the cluster is marked as FAILED @@ -749,14 +761,7 @@ private static ActionListener createCCSListener( SearchResponse.Clusters clusters, ActionListener originalListener ) { - return new CCSActionListener( - clusterAlias, - skipUnavailable, - countDown, - exceptions, - clusters, - originalListener - ) { + return new CCSActionListener<>(clusterAlias, skipUnavailable, countDown, exceptions, clusters, originalListener) { @Override void innerOnResponse(SearchResponse searchResponse) { // TODO: in CCS fail fast ticket we may need to fail the query if the cluster gets marked as FAILED @@ -1417,7 +1422,6 @@ abstract static class CCSActionListener implements Acti private final AtomicReference exceptions; protected final SearchResponse.Clusters clusters; private final ActionListener originalListener; - protected final long startTime; /** * Used by both minimize_roundtrips true and false @@ -1436,7 +1440,6 @@ abstract static class CCSActionListener implements Acti this.exceptions = exceptions; this.clusters = clusters; this.originalListener = originalListener; - this.startTime = System.currentTimeMillis(); } @Override @@ -1454,12 +1457,12 @@ public final void onFailure(Exception e) { SearchResponse.Cluster cluster = clusters.getCluster(clusterAlias); if (skipUnavailable) { if (cluster != null) { - ccsClusterInfoUpdate(f, clusters, clusterAlias, skipUnavailable); + ccsClusterInfoUpdate(f, clusters, clusterAlias, true); } // skippedClusters.incrementAndGet(); } else { if (cluster != null) { - ccsClusterInfoUpdate(f, clusters, clusterAlias, skipUnavailable); + ccsClusterInfoUpdate(f, clusters, clusterAlias, false); } Exception exception = e; if (RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY.equals(clusterAlias) == false) { diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java index 632fbafa0536b..ffaecedb62bba 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java @@ -93,7 +93,7 @@ static ParsedScrollId parseScrollId(String scrollId) { if (in.available() > 0) { throw new IllegalArgumentException("Not all bytes were read"); } - return new ParsedScrollId(scrollId, type, context); + return new ParsedScrollId(type, context); } catch (Exception e) { throw new IllegalArgumentException("Cannot parse scroll id", e); } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java index d097b10b7162d..9874bcfb56c6a 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java @@ -8,7 +8,11 @@ package org.elasticsearch.action.search; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.cluster.service.ClusterService; @@ -22,7 +26,7 @@ import static org.elasticsearch.action.search.TransportSearchHelper.parseScrollId; public class TransportSearchScrollAction extends HandledTransportAction { - + private static final Logger logger = LogManager.getLogger(TransportSearchScrollAction.class); private final ClusterService clusterService; private final SearchTransportService searchTransportService; @@ -40,6 +44,19 @@ public TransportSearchScrollAction( @Override protected void doExecute(Task task, SearchScrollRequest request, ActionListener listener) { + ActionListener loggingListener = listener.delegateFailureAndWrap((l, searchResponse) -> { + if (searchResponse.getShardFailures() != null && searchResponse.getShardFailures().length > 0) { + ShardOperationFailedException[] groupedFailures = ExceptionsHelper.groupBy(searchResponse.getShardFailures()); + for (ShardOperationFailedException f : groupedFailures) { + Throwable cause = f.getCause() == null ? f : f.getCause(); + if (ExceptionsHelper.status(cause).getStatus() >= 500 + && ExceptionsHelper.isNodeOrShardUnavailableTypeException(cause) == false) { + logger.warn("TransportSearchScrollAction shard failure (partial results response)", f); + } + } + } + l.onResponse(searchResponse); + }); try { ParsedScrollId scrollId = parseScrollId(request.scrollId()); Runnable action = switch (scrollId.getType()) { @@ -50,7 +67,7 @@ protected void doExecute(Task task, SearchScrollRequest request, ActionListener< request, (SearchTask) task, scrollId, - listener + loggingListener ); case QUERY_AND_FETCH_TYPE -> // TODO can we get rid of this? new SearchScrollQueryAndFetchAsyncAction( @@ -60,7 +77,7 @@ protected void doExecute(Task task, SearchScrollRequest request, ActionListener< request, (SearchTask) task, scrollId, - listener + loggingListener ); default -> throw new IllegalArgumentException("Scroll id type [" + scrollId.getType() + "] unrecognized"); }; diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchShardsAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchShardsAction.java index 4c8ade4d78ead..47b98270dfbfc 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchShardsAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchShardsAction.java @@ -79,7 +79,6 @@ public TransportSearchShardsAction( @Override protected void doExecute(Task task, SearchShardsRequest searchShardsRequest, ActionListener listener) { - assert ThreadPool.assertCurrentThreadPool(ThreadPool.Names.SEARCH_COORDINATION); final long relativeStartNanos = System.nanoTime(); SearchRequest original = new SearchRequest(searchShardsRequest.indices()).indicesOptions(searchShardsRequest.indicesOptions()) .routing(searchShardsRequest.routing()) diff --git a/server/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java b/server/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java index b360443a396d1..721983b6af0e7 100644 --- a/server/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java +++ b/server/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java @@ -30,10 +30,6 @@ public class PlainActionFuture implements ActionFuture, ActionListener { - public static PlainActionFuture newFuture() { - return new PlainActionFuture<>(); - } - @Override public void onResponse(T result) { set(result); @@ -442,13 +438,13 @@ private static RuntimeException unwrapEsException(ElasticsearchException esEx) { } public static T get(CheckedConsumer, E> e) throws E { - PlainActionFuture fut = newFuture(); + PlainActionFuture fut = new PlainActionFuture<>(); e.accept(fut); return fut.actionGet(); } public static T get(CheckedConsumer, E> e, long timeout, TimeUnit unit) throws E { - PlainActionFuture fut = newFuture(); + PlainActionFuture fut = new PlainActionFuture<>(); e.accept(fut); return fut.actionGet(timeout, unit); } diff --git a/server/src/main/java/org/elasticsearch/action/support/RefCountingListener.java b/server/src/main/java/org/elasticsearch/action/support/RefCountingListener.java index f4d580a44621f..ff5c3115e569b 100644 --- a/server/src/main/java/org/elasticsearch/action/support/RefCountingListener.java +++ b/server/src/main/java/org/elasticsearch/action/support/RefCountingListener.java @@ -10,13 +10,13 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.Releasable; import java.util.Objects; import java.util.concurrent.Semaphore; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Consumer; /** * A mechanism to complete a listener on the completion of some (dynamic) collection of other actions. Basic usage is as follows: @@ -176,7 +176,7 @@ public String toString() { * It is also invalid to complete the returned listener more than once. Doing so will trip an assertion if assertions are enabled, but * will be ignored otherwise. */ - public ActionListener acquire(Consumer consumer) { + public ActionListener acquire(CheckedConsumer consumer) { final var ref = refs.acquire(); final var consumerRef = new AtomicReference<>(Objects.requireNonNull(consumer)); return new ActionListener<>() { @@ -187,10 +187,12 @@ public void onResponse(Response response) { if (acquiredConsumer == null) { assert false : "already closed"; } else { - acquiredConsumer.accept(response); + try { + acquiredConsumer.accept(response); + } catch (Exception e) { + addException(e); + } } - } catch (Exception e) { - addException(e); } } diff --git a/server/src/main/java/org/elasticsearch/action/support/TransportLocalClusterStateAction.java b/server/src/main/java/org/elasticsearch/action/support/TransportLocalClusterStateAction.java new file mode 100644 index 0000000000000..6af5a3a1e8384 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/support/TransportLocalClusterStateAction.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.support; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.transport.TransportService; + +import java.util.concurrent.Executor; + +/** + * Analogue of {@link org.elasticsearch.action.support.master.TransportMasterNodeReadAction} except that it runs on the local node rather + * than delegating to the master. + */ +public abstract class TransportLocalClusterStateAction extends + HandledTransportAction { + + protected final ClusterService clusterService; + protected final Executor executor; + + protected TransportLocalClusterStateAction( + String actionName, + ClusterService clusterService, + TransportService transportService, + ActionFilters actionFilters, + Writeable.Reader requestReader, + Executor executor + ) { + // TODO replace DIRECT_EXECUTOR_SERVICE when removing workaround for https://github.com/elastic/elasticsearch/issues/97916 + super(actionName, transportService, actionFilters, requestReader, EsExecutors.DIRECT_EXECUTOR_SERVICE); + this.clusterService = clusterService; + this.executor = executor; + } + + protected abstract ClusterBlockException checkBlock(Request request, ClusterState state); + + @Override + protected final void doExecute(Task task, Request request, ActionListener listener) { + final var state = clusterService.state(); + final var clusterBlockException = checkBlock(request, state); + if (clusterBlockException != null) { + throw clusterBlockException; + } + + // Workaround for https://github.com/elastic/elasticsearch/issues/97916 - TODO remove this when we can + executor.execute(ActionRunnable.wrap(listener, l -> localClusterStateOperation(task, request, state, l))); + } + + protected abstract void localClusterStateOperation(Task task, Request request, ClusterState state, ActionListener listener) + throws Exception; +} diff --git a/server/src/main/java/org/elasticsearch/client/internal/Client.java b/server/src/main/java/org/elasticsearch/client/internal/Client.java index 89cb764549767..5ae3870338c35 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/Client.java +++ b/server/src/main/java/org/elasticsearch/client/internal/Client.java @@ -55,7 +55,6 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Nullable; -import org.elasticsearch.core.Releasable; import java.util.Map; import java.util.concurrent.Executor; @@ -71,7 +70,7 @@ * * @see org.elasticsearch.node.Node#client() */ -public interface Client extends ElasticsearchClient, Releasable { +public interface Client extends ElasticsearchClient { // Note: This setting is registered only for bwc. The value is never read. Setting CLIENT_TYPE_SETTING_S = new Setting<>("client.type", "node", (s) -> { diff --git a/server/src/main/java/org/elasticsearch/client/internal/FilterClient.java b/server/src/main/java/org/elasticsearch/client/internal/FilterClient.java index 235fc0a150066..53a8e2e189244 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/FilterClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/FilterClient.java @@ -45,11 +45,6 @@ protected FilterClient(Settings settings, ThreadPool threadPool, Client in) { this.in = in; } - @Override - public void close() { - in().close(); - } - @Override protected void doExecute( ActionType action, diff --git a/server/src/main/java/org/elasticsearch/client/internal/node/NodeClient.java b/server/src/main/java/org/elasticsearch/client/internal/node/NodeClient.java index b1dfc22cf27d3..f75997d92b678 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/node/NodeClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/node/NodeClient.java @@ -75,11 +75,6 @@ public List getActionNames() { return actions.keySet().stream().map(ActionType::name).toList(); } - @Override - public void close() { - // nothing really to do - } - @Override public void doExecute( ActionType action, @@ -115,7 +110,7 @@ public Task exe transportAction(action), request, localConnection, - new SafelyWrappedActionListener<>(listener) + ActionListener.assertOnce(listener) ); } @@ -153,27 +148,4 @@ public NamedWriteableRegistry getNamedWriteableRegistry() { return namedWriteableRegistry; } - private record SafelyWrappedActionListener(ActionListener listener) implements ActionListener { - - @Override - public void onResponse(Response response) { - try { - listener.onResponse(response); - } catch (Exception e) { - assert false : new AssertionError("callback must handle its own exceptions", e); - throw e; - } - } - - @Override - public void onFailure(Exception e) { - try { - listener.onFailure(e); - } catch (Exception ex) { - ex.addSuppressed(e); - assert false : new AssertionError("callback must handle its own exceptions", ex); - throw ex; - } - } - } } diff --git a/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java b/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java index 98bad4d3dd74c..c1946e33c157f 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java @@ -361,7 +361,7 @@ public final Ac ActionType action, Request request ) { - PlainActionFuture actionFuture = PlainActionFuture.newFuture(); + PlainActionFuture actionFuture = new PlainActionFuture<>(); execute(action, request, actionFuture); return actionFuture; } diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterFeatures.java b/server/src/main/java/org/elasticsearch/cluster/ClusterFeatures.java index ae68bfafdd6c5..95cc53376af59 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterFeatures.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterFeatures.java @@ -12,12 +12,14 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ChunkedToXContentObject; +import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.xcontent.ToXContent; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.IdentityHashMap; @@ -50,24 +52,24 @@ public ClusterFeatures(Map> nodeFeatures) { .collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, e -> Set.copyOf(e.getValue()))); } - private Set calculateAllNodeFeatures() { + public static Set calculateAllNodeFeatures(Collection> nodeFeatures) { if (nodeFeatures.isEmpty()) { return Set.of(); } Set allNodeFeatures = null; - for (Set featureSet : nodeFeatures.values()) { + for (Set featureSet : nodeFeatures) { if (allNodeFeatures == null) { allNodeFeatures = new HashSet<>(featureSet); } else { allNodeFeatures.retainAll(featureSet); } } - return Set.copyOf(allNodeFeatures); + return allNodeFeatures; } /** - * Returns the features reported by each node in the cluster. + * The features reported by each node in the cluster. *

    * NOTE: This should not be used directly. * Please use {@link org.elasticsearch.features.FeatureService#clusterHasFeature} instead. @@ -76,17 +78,28 @@ public Map> nodeFeatures() { return nodeFeatures; } + /** + * The features in all nodes in the cluster. + *

    + * NOTE: This should not be used directly. + * Please use {@link org.elasticsearch.features.FeatureService#clusterHasFeature} instead. + */ + public Set allNodeFeatures() { + if (allNodeFeatures == null) { + allNodeFeatures = Set.copyOf(calculateAllNodeFeatures(nodeFeatures.values())); + } + return allNodeFeatures; + } + /** * {@code true} if {@code feature} is present on all nodes in the cluster. *

    * NOTE: This should not be used directly, as it does not read historical features. * Please use {@link org.elasticsearch.features.FeatureService#clusterHasFeature} instead. */ + @SuppressForbidden(reason = "directly reading cluster features") public boolean clusterHasFeature(NodeFeature feature) { - if (allNodeFeatures == null) { - allNodeFeatures = calculateAllNodeFeatures(); - } - return allNodeFeatures.contains(feature.id()); + return allNodeFeatures().contains(feature.id()); } /** diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java index ee94008372dab..b109f67b7fa41 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -8,7 +8,6 @@ package org.elasticsearch.cluster; -import org.elasticsearch.Version; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.ComponentTemplateMetadata; @@ -65,6 +64,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.health.metadata.HealthMetadataService; import org.elasticsearch.health.node.selection.HealthNodeTaskExecutor; @@ -373,6 +373,7 @@ private static void addAllocationDecider(Map, AllocationDecider> decide } } + @UpdateForV9 // in v9 there is only one allocator private static ShardsAllocator createShardsAllocator( Settings settings, ClusterSettings clusterSettings, @@ -404,7 +405,6 @@ private static ShardsAllocator createShardsAllocator( }); } String allocatorName = SHARDS_ALLOCATOR_TYPE_SETTING.get(settings); - assert Version.CURRENT.major == Version.V_7_17_0.major + 1; // in v9 there is only one allocator Supplier allocatorSupplier = allocators.get(allocatorName); if (allocatorSupplier == null) { throw new IllegalArgumentException("Unknown ShardsAllocator [" + allocatorName + "]"); diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java index 565e43455d8d7..884441aa41798 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -1047,7 +1047,7 @@ private static TransportVersion inferTransportVersion(DiscoveryNode node) { TransportVersion tv; if (node.getVersion().before(Version.V_8_8_0)) { // 1-to-1 mapping between Version and TransportVersion - tv = TransportVersion.fromId(node.getVersion().id); + tv = TransportVersion.fromId(node.getPre811VersionId().getAsInt()); } else { // use the lowest value it could be for now tv = INFERRED_TRANSPORT_VERSION; diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java index 113e8b0a7f388..402e170f1ea53 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java @@ -183,7 +183,7 @@ public Join handleStartJoin(StartJoinRequest startJoinRequest) { final String reason; if (electionWon == false) { reason = "failed election"; - } else if (startJoinRequest.getSourceNode().equals(localNode)) { + } else if (startJoinRequest.getMasterCandidateNode().equals(localNode)) { reason = "bumping term"; } else { reason = "standing down as leader"; @@ -200,7 +200,13 @@ public Join handleStartJoin(StartJoinRequest startJoinRequest) { joinVotes = new VoteCollection(); publishVotes = new VoteCollection(); - return new Join(localNode, startJoinRequest.getSourceNode(), getCurrentTerm(), getLastAcceptedTerm(), getLastAcceptedVersion()); + return new Join( + localNode, + startJoinRequest.getMasterCandidateNode(), + getCurrentTerm(), + getLastAcceptedTerm(), + getLastAcceptedVersion() + ); } /** @@ -211,12 +217,12 @@ public Join handleStartJoin(StartJoinRequest startJoinRequest) { * @throws CoordinationStateRejectedException if the arguments were incompatible with the current state of this object. */ public boolean handleJoin(Join join) { - assert join.targetMatches(localNode) : "handling join " + join + " for the wrong node " + localNode; + assert join.masterCandidateMatches(localNode) : "handling join " + join + " for the wrong node " + localNode; - if (join.getTerm() != getCurrentTerm()) { - logger.debug("handleJoin: ignored join due to term mismatch (expected: [{}], actual: [{}])", getCurrentTerm(), join.getTerm()); + if (join.term() != getCurrentTerm()) { + logger.debug("handleJoin: ignored join due to term mismatch (expected: [{}], actual: [{}])", getCurrentTerm(), join.term()); throw new CoordinationStateRejectedException( - "incoming term " + join.getTerm() + " does not match current term " + getCurrentTerm() + "incoming term " + join.term() + " does not match current term " + getCurrentTerm() ); } @@ -226,30 +232,30 @@ public boolean handleJoin(Join join) { } final long lastAcceptedTerm = getLastAcceptedTerm(); - if (join.getLastAcceptedTerm() > lastAcceptedTerm) { + if (join.lastAcceptedTerm() > lastAcceptedTerm) { logger.debug( "handleJoin: ignored join as joiner has a better last accepted term (expected: <=[{}], actual: [{}])", lastAcceptedTerm, - join.getLastAcceptedTerm() + join.lastAcceptedTerm() ); throw new CoordinationStateRejectedException( "incoming last accepted term " - + join.getLastAcceptedTerm() + + join.lastAcceptedTerm() + " of join higher than current last accepted term " + lastAcceptedTerm ); } - if (join.getLastAcceptedTerm() == lastAcceptedTerm && join.getLastAcceptedVersion() > getLastAcceptedVersion()) { + if (join.lastAcceptedTerm() == lastAcceptedTerm && join.lastAcceptedVersion() > getLastAcceptedVersion()) { logger.debug( "handleJoin: ignored join as joiner has a better last accepted version (expected: <=[{}], actual: [{}]) in term {}", getLastAcceptedVersion(), - join.getLastAcceptedVersion(), + join.lastAcceptedVersion(), lastAcceptedTerm ); throw new CoordinationStateRejectedException( "incoming last accepted version " - + join.getLastAcceptedVersion() + + join.lastAcceptedVersion() + " of join higher than current last accepted version " + getLastAcceptedVersion() + " in term " @@ -274,7 +280,7 @@ public boolean handleJoin(Join join) { logger.debug( "handleJoin: added join {} from [{}] for election, electionWon={} lastAcceptedTerm={} lastAcceptedVersion={}", join, - join.getSourceNode(), + join.votingNode(), electionWon, lastAcceptedTerm, getLastAcceptedVersion() @@ -592,7 +598,7 @@ public boolean addVote(DiscoveryNode sourceNode) { } public boolean addJoinVote(Join join) { - final boolean added = addVote(join.getSourceNode()); + final boolean added = addVote(join.votingNode()); if (added) { joins.add(join); } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 08e31e11ae256..c3c757bb335e4 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -63,6 +63,7 @@ import org.elasticsearch.discovery.SeedHostsProvider; import org.elasticsearch.discovery.SeedHostsResolver; import org.elasticsearch.discovery.TransportAddressConnector; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.monitor.NodeHealthService; import org.elasticsearch.monitor.StatusInfo; @@ -212,7 +213,7 @@ public Coordinator( LeaderHeartbeatService leaderHeartbeatService, PreVoteCollector.Factory preVoteCollectorFactory, CompatibilityVersions compatibilityVersions, - Set features + FeatureService featureService ) { this.settings = settings; this.transportService = transportService; @@ -238,7 +239,7 @@ public Coordinator( reconfigurator::maybeReconfigureAfterNewMasterIsElected, this::getLatestStoredStateAfterWinningAnElection, compatibilityVersions, - features + featureService ); this.joinValidationService = new JoinValidationService( settings, @@ -488,7 +489,7 @@ PublishWithJoinResponse handlePublishRequest(PublishRequest publishRequest) { } private static Optional joinWithDestination(Optional lastJoin, DiscoveryNode leader, long term) { - if (lastJoin.isPresent() && lastJoin.get().targetMatches(leader) && lastJoin.get().getTerm() == term) { + if (lastJoin.isPresent() && lastJoin.get().masterCandidateMatches(leader) && lastJoin.get().term() == term) { return lastJoin; } @@ -565,6 +566,10 @@ public void onFailure(Exception e) { }); } + /** + * Attempts to abdicate master position to a new master-eligible node in the cluster. + * Broadcasts {@link StartJoinRequest} for {@param newMaster} to each member of the cluster. + */ private void abdicateTo(DiscoveryNode newMaster) { assert Thread.holdsLock(mutex); assert mode == Mode.LEADER : "expected to be leader on abdication but was " + mode; @@ -594,7 +599,7 @@ private Optional ensureTermAtLeast(DiscoveryNode sourceNode, long targetTe private Join joinLeaderInTerm(StartJoinRequest startJoinRequest) { synchronized (mutex) { - logger.debug("joinLeaderInTerm: for [{}] with term {}", startJoinRequest.getSourceNode(), startJoinRequest.getTerm()); + logger.debug("joinLeaderInTerm: for [{}] with term {}", startJoinRequest.getMasterCandidateNode(), startJoinRequest.getTerm()); final Join join = coordinationState.get().handleStartJoin(startJoinRequest); lastJoin = Optional.of(join); peerFinder.setCurrentTerm(getCurrentTerm()); @@ -775,7 +780,7 @@ private void processJoinRequest(JoinRequest joinRequest, ActionListener jo final CoordinationState coordState = coordinationState.get(); final boolean prevElectionWon = coordState.electionWon() - && optionalJoin.stream().allMatch(j -> j.getTerm() <= getCurrentTerm()); + && optionalJoin.stream().allMatch(j -> j.term() <= getCurrentTerm()); optionalJoin.ifPresent(this::handleJoin); joinAccumulator.handleJoinRequest( @@ -1394,7 +1399,7 @@ boolean missingJoinVoteFrom(DiscoveryNode node) { private void handleJoin(Join join) { synchronized (mutex) { - ensureTermAtLeast(getLocalNode(), join.getTerm()).ifPresent(this::handleJoin); + ensureTermAtLeast(getLocalNode(), join.term()).ifPresent(this::handleJoin); if (coordinationState.get().electionWon()) { // If we have already won the election then the actual join does not matter for election purposes, so swallow any exception @@ -2070,7 +2075,7 @@ private void cancelTimeoutHandlers() { } private void handleAssociatedJoin(Join join) { - if (join.getTerm() == getCurrentTerm() && missingJoinVoteFrom(join.getSourceNode())) { + if (join.term() == getCurrentTerm() && missingJoinVoteFrom(join.votingNode())) { logger.trace("handling {}", join); handleJoin(join); } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Join.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Join.java index aacbed61b095a..d1fe472278f12 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Join.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Join.java @@ -16,110 +16,44 @@ /** * Triggered by a {@link StartJoinRequest}, instances of this class represent join votes, - * and have a source and target node. The source node is the node that provides the vote, - * and the target node is the node for which this vote is cast. A node will only cast - * a single vote per term, and this for a unique target node. The vote also carries - * information about the current state of the node that provided the vote, so that - * the receiver of the vote can determine if it has a more up-to-date state than the - * source node. + * and have a voting and master-candidate node. The voting node is the node that provides + * the vote, and the master-candidate node is the node for which this vote is cast. A join + * vote is cast to reform the cluster around a particular master-eligible node, to elect + * that node as the new master in a new term. + * + * A voting node will only cast a single vote per term. The vote includes information about + * the current state of the node casting the vote, so that the candidate for the vote can + * determine whether it has a more up-to-date state than the voting node. + * + * @param votingNode The node casting a vote for a master candidate. + * @param masterCandidateNode The master candidate node receiving the vote for election. + * @param term + * @param lastAcceptedTerm + * @param lastAcceptedVersion */ -public class Join implements Writeable { - private final DiscoveryNode sourceNode; - private final DiscoveryNode targetNode; - private final long term; - private final long lastAcceptedTerm; - private final long lastAcceptedVersion; - - public Join(DiscoveryNode sourceNode, DiscoveryNode targetNode, long term, long lastAcceptedTerm, long lastAcceptedVersion) { +public record Join(DiscoveryNode votingNode, DiscoveryNode masterCandidateNode, long term, long lastAcceptedTerm, long lastAcceptedVersion) + implements + Writeable { + public Join { assert term >= 0; assert lastAcceptedTerm >= 0; assert lastAcceptedVersion >= 0; - - this.sourceNode = sourceNode; - this.targetNode = targetNode; - this.term = term; - this.lastAcceptedTerm = lastAcceptedTerm; - this.lastAcceptedVersion = lastAcceptedVersion; } public Join(StreamInput in) throws IOException { - sourceNode = new DiscoveryNode(in); - targetNode = new DiscoveryNode(in); - term = in.readLong(); - lastAcceptedTerm = in.readLong(); - lastAcceptedVersion = in.readLong(); + this(new DiscoveryNode(in), new DiscoveryNode(in), in.readLong(), in.readLong(), in.readLong()); } @Override public void writeTo(StreamOutput out) throws IOException { - sourceNode.writeTo(out); - targetNode.writeTo(out); + votingNode.writeTo(out); + masterCandidateNode.writeTo(out); out.writeLong(term); out.writeLong(lastAcceptedTerm); out.writeLong(lastAcceptedVersion); } - public DiscoveryNode getSourceNode() { - return sourceNode; - } - - public DiscoveryNode getTargetNode() { - return targetNode; - } - - public boolean targetMatches(DiscoveryNode matchingNode) { - return targetNode.getId().equals(matchingNode.getId()); - } - - public long getLastAcceptedVersion() { - return lastAcceptedVersion; - } - - public long getTerm() { - return term; - } - - public long getLastAcceptedTerm() { - return lastAcceptedTerm; - } - - @Override - public String toString() { - return "Join{" - + "term=" - + term - + ", lastAcceptedTerm=" - + lastAcceptedTerm - + ", lastAcceptedVersion=" - + lastAcceptedVersion - + ", sourceNode=" - + sourceNode - + ", targetNode=" - + targetNode - + '}'; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - Join join = (Join) o; - - if (sourceNode.equals(join.sourceNode) == false) return false; - if (targetNode.equals(join.targetNode) == false) return false; - if (lastAcceptedVersion != join.lastAcceptedVersion) return false; - if (term != join.term) return false; - return lastAcceptedTerm == join.lastAcceptedTerm; - } - - @Override - public int hashCode() { - int result = (int) (lastAcceptedVersion ^ (lastAcceptedVersion >>> 32)); - result = 31 * result + sourceNode.hashCode(); - result = 31 * result + targetNode.hashCode(); - result = 31 * result + (int) (term ^ (term >>> 32)); - result = 31 * result + (int) (lastAcceptedTerm ^ (lastAcceptedTerm >>> 32)); - return result; + public boolean masterCandidateMatches(DiscoveryNode matchingNode) { + return masterCandidateNode.getId().equals(matchingNode.getId()); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java index e5dee6aeb67e2..d11d8ade2a036 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java @@ -33,6 +33,7 @@ import org.elasticsearch.core.Releasables; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.monitor.NodeHealthService; import org.elasticsearch.monitor.StatusInfo; @@ -62,6 +63,12 @@ import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.monitor.StatusInfo.Status.UNHEALTHY; +/** + * Handler for cluster join commands. A master-eligible node running for election will + * send a {@link StartJoinRequest} to each voting node in the cluster. A node that becomes + * aware of a new term and master will send a {@link Join} request to the new master, to + * re-form the cluster around the new master node. + */ public class JoinHelper { private static final Logger logger = LogManager.getLogger(JoinHelper.class); @@ -100,12 +107,12 @@ public class JoinHelper { Function maybeReconfigureAfterMasterElection, ObjLongConsumer> latestStoredStateSupplier, CompatibilityVersions compatibilityVersions, - Set features + FeatureService featureService ) { this.joinTaskQueue = masterService.createTaskQueue( "node-join", Priority.URGENT, - new NodeJoinExecutor(allocationService, rerouteService, maybeReconfigureAfterMasterElection) + new NodeJoinExecutor(allocationService, rerouteService, featureService, maybeReconfigureAfterMasterElection) ); this.clusterApplier = clusterApplier; this.transportService = transportService; @@ -115,7 +122,7 @@ public class JoinHelper { this.joinReasonService = joinReasonService; this.latestStoredStateSupplier = latestStoredStateSupplier; this.compatibilityVersions = compatibilityVersions; - this.features = features; + this.features = featureService.getNodeFeatures().keySet(); transportService.registerRequestHandler( JOIN_ACTION_NAME, @@ -136,7 +143,7 @@ public class JoinHelper { false, StartJoinRequest::new, (request, channel, task) -> { - final DiscoveryNode destination = request.getSourceNode(); + final DiscoveryNode destination = request.getMasterCandidateNode(); sendJoinRequest(destination, currentTermSupplier.getAsLong(), Optional.of(joinLeaderInTerm.apply(request))); channel.sendResponse(Empty.INSTANCE); } @@ -368,8 +375,8 @@ public void onFailure(Exception e) { } void sendStartJoinRequest(final StartJoinRequest startJoinRequest, final DiscoveryNode destination) { - assert startJoinRequest.getSourceNode().isMasterNode() - : "sending start-join request for master-ineligible " + startJoinRequest.getSourceNode(); + assert startJoinRequest.getMasterCandidateNode().isMasterNode() + : "sending start-join request for master-ineligible " + startJoinRequest.getMasterCandidateNode(); transportService.sendRequest(destination, START_JOIN_ACTION_NAME, startJoinRequest, new TransportResponseHandler.Empty() { @Override public Executor executor(ThreadPool threadPool) { diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinRequest.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinRequest.java index 2ba65873738a0..a6a2f454694ae 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinRequest.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinRequest.java @@ -60,7 +60,7 @@ public JoinRequest( long minimumTerm, Optional optionalJoin ) { - assert optionalJoin.isPresent() == false || optionalJoin.get().getSourceNode().equals(sourceNode); + assert optionalJoin.isPresent() == false || optionalJoin.get().votingNode().equals(sourceNode); this.sourceNode = sourceNode; this.compatibilityVersions = compatibilityVersions; this.features = features; @@ -76,7 +76,10 @@ public JoinRequest(StreamInput in) throws IOException { } else { // there's a 1-1 mapping from Version to TransportVersion before 8.8.0 // no known mapping versions here - compatibilityVersions = new CompatibilityVersions(TransportVersion.fromId(sourceNode.getVersion().id), Map.of()); + compatibilityVersions = new CompatibilityVersions( + TransportVersion.fromId(sourceNode.getPre811VersionId().getAsInt()), + Map.of() + ); } if (in.getTransportVersion().onOrAfter(TransportVersions.CLUSTER_FEATURES_ADDED)) { features = in.readCollectionAsSet(StreamInput::readString); @@ -121,7 +124,7 @@ public long getTerm() { // If the join is also present then its term will normally equal the corresponding term, but we do not require callers to // obtain the term and the join in a synchronized fashion so it's possible that they disagree. Also older nodes do not share the // minimum term, so for BWC we can take it from the join if present. - return Math.max(minimumTerm, optionalJoin.map(Join::getTerm).orElse(0L)); + return Math.max(minimumTerm, optionalJoin.map(Join::term).orElse(0L)); } public Optional getOptionalJoin() { diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java index 00086c42ed4ae..480f1d5503d61 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java @@ -12,6 +12,7 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.ClusterFeatures; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.NotMasterException; @@ -25,6 +26,7 @@ import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.Priority; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; @@ -34,6 +36,7 @@ import java.util.Collections; import java.util.Comparator; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; @@ -55,19 +58,22 @@ public class NodeJoinExecutor implements ClusterStateTaskExecutor { private final AllocationService allocationService; private final RerouteService rerouteService; + private final FeatureService featureService; private final Function maybeReconfigureAfterMasterElection; - public NodeJoinExecutor(AllocationService allocationService, RerouteService rerouteService) { - this(allocationService, rerouteService, Function.identity()); + public NodeJoinExecutor(AllocationService allocationService, RerouteService rerouteService, FeatureService featureService) { + this(allocationService, rerouteService, featureService, Function.identity()); } public NodeJoinExecutor( AllocationService allocationService, RerouteService rerouteService, + FeatureService featureService, Function maybeReconfigureAfterMasterElection ) { this.allocationService = allocationService; this.rerouteService = rerouteService; + this.featureService = featureService; this.maybeReconfigureAfterMasterElection = maybeReconfigureAfterMasterElection; } @@ -123,6 +129,7 @@ public ClusterState execute(BatchExecutionContext batchExecutionContex DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(newState.nodes()); Map compatibilityVersionsMap = new HashMap<>(newState.compatibilityVersions()); Map> nodeFeatures = new HashMap<>(newState.nodeFeatures()); + Set allNodesFeatures = ClusterFeatures.calculateAllNodeFeatures(nodeFeatures.values()); assert nodesBuilder.isLocalNodeElectedMaster(); @@ -155,16 +162,17 @@ public ClusterState execute(BatchExecutionContext batchExecutionContex if (enforceVersionBarrier) { ensureVersionBarrier(node.getVersion(), minClusterNodeVersion); CompatibilityVersions.ensureVersionsCompatibility(compatibilityVersions, compatibilityVersionsMap.values()); - // TODO: enforce feature ratchet barrier } blockForbiddenVersions(compatibilityVersions.transportVersion()); ensureNodesCompatibility(node.getVersion(), minClusterNodeVersion, maxClusterNodeVersion); + enforceNodeFeatureBarrier(node.getId(), allNodesFeatures, features); // we do this validation quite late to prevent race conditions between nodes joining and importing dangling indices // we have to reject nodes that don't support all indices we have in this cluster ensureIndexCompatibility(node.getMinIndexVersion(), node.getMaxIndexVersion(), initialState.getMetadata()); nodesBuilder.add(node); compatibilityVersionsMap.put(node.getId(), compatibilityVersions); nodeFeatures.put(node.getId(), features); + allNodesFeatures.retainAll(features); nodesChanged = true; minClusterNodeVersion = Version.min(minClusterNodeVersion, node.getVersion()); maxClusterNodeVersion = Version.max(maxClusterNodeVersion, node.getVersion()); @@ -444,6 +452,16 @@ public static void ensureVersionBarrier(Version joiningNodeVersion, Version minC } } + private void enforceNodeFeatureBarrier(String nodeId, Set existingNodesFeatures, Set newNodeFeatures) { + // prevent join if it does not have one or more features that all other nodes have + Set missingFeatures = new HashSet<>(existingNodesFeatures); + missingFeatures.removeAll(newNodeFeatures); + + if (missingFeatures.isEmpty() == false) { + throw new IllegalStateException("Node " + nodeId + " is missing required features " + missingFeatures); + } + } + public static Collection> addBuiltInJoinValidators( Collection> onJoinValidators ) { diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java index 9e7383a4c3f14..6afb85bdf629e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java @@ -366,8 +366,8 @@ public void onResponse(PublishWithJoinResponse response) { if (response.getJoin().isPresent()) { final Join join = response.getJoin().get(); - assert discoveryNode.equals(join.getSourceNode()); - assert join.getTerm() == response.getPublishResponse().getTerm() : response; + assert discoveryNode.equals(join.votingNode()); + assert join.term() == response.getPublishResponse().getTerm() : response; logger.trace("handling join within publish response: {}", join); onJoin(join); } else { diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/StartJoinRequest.java b/server/src/main/java/org/elasticsearch/cluster/coordination/StartJoinRequest.java index df26646d154c6..cb492f39f9337 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/StartJoinRequest.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/StartJoinRequest.java @@ -16,34 +16,38 @@ /** * Represents the action of requesting a join vote (see {@link Join}) from a node. - * The source node represents the node that is asking for join votes. + * + * A {@link StartJoinRequest} is broadcast to each node in the cluster, requesting + * that each node join the new cluster formed around the master candidate node in a + * new term. The sender is either the new master candidate or the current master + * abdicating to another eligible node in the cluster. */ public class StartJoinRequest extends TransportRequest { - private final DiscoveryNode sourceNode; + private final DiscoveryNode masterCandidateNode; private final long term; - public StartJoinRequest(DiscoveryNode sourceNode, long term) { - this.sourceNode = sourceNode; + public StartJoinRequest(DiscoveryNode masterCandidateNode, long term) { + this.masterCandidateNode = masterCandidateNode; this.term = term; } public StartJoinRequest(StreamInput input) throws IOException { super(input); - this.sourceNode = new DiscoveryNode(input); + this.masterCandidateNode = new DiscoveryNode(input); this.term = input.readLong(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - sourceNode.writeTo(out); + masterCandidateNode.writeTo(out); out.writeLong(term); } - public DiscoveryNode getSourceNode() { - return sourceNode; + public DiscoveryNode getMasterCandidateNode() { + return masterCandidateNode; } public long getTerm() { @@ -52,7 +56,7 @@ public long getTerm() { @Override public String toString() { - return "StartJoinRequest{" + "term=" + term + ",node=" + sourceNode + "}"; + return "StartJoinRequest{" + "term=" + term + ",node=" + masterCandidateNode + "}"; } @Override @@ -63,12 +67,12 @@ public boolean equals(Object o) { StartJoinRequest that = (StartJoinRequest) o; if (term != that.term) return false; - return sourceNode.equals(that.sourceNode); + return masterCandidateNode.equals(that.masterCandidateNode); } @Override public int hashCode() { - int result = sourceNode.hashCode(); + int result = masterCandidateNode.hashCode(); result = 31 * result + (int) (term ^ (term >>> 32)); return result; } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java index 47ab1d099c037..4a97d79380013 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java @@ -107,68 +107,11 @@ public static ComposableIndexTemplate parse(XContentParser parser) throws IOExce return PARSER.parse(parser, null); } - public ComposableIndexTemplate( - List indexPatterns, - @Nullable Template template, - @Nullable List componentTemplates, - @Nullable Long priority, - @Nullable Long version, - @Nullable Map metadata - ) { - this(indexPatterns, template, componentTemplates, priority, version, metadata, null, null, null); + public static Builder builder() { + return new Builder(); } - public ComposableIndexTemplate( - List indexPatterns, - @Nullable Template template, - @Nullable List componentTemplates, - @Nullable Long priority, - @Nullable Long version, - @Nullable Map metadata, - @Nullable DataStreamTemplate dataStreamTemplate - ) { - this(indexPatterns, template, componentTemplates, priority, version, metadata, dataStreamTemplate, null, null); - } - - public ComposableIndexTemplate( - List indexPatterns, - @Nullable Template template, - @Nullable List componentTemplates, - @Nullable Long priority, - @Nullable Long version, - @Nullable Map metadata, - @Nullable DataStreamTemplate dataStreamTemplate, - @Nullable Boolean allowAutoCreate - ) { - this(indexPatterns, template, componentTemplates, priority, version, metadata, dataStreamTemplate, allowAutoCreate, null); - } - - ComposableIndexTemplate( - List indexPatterns, - @Nullable Template template, - @Nullable List componentTemplates, - @Nullable Long priority, - @Nullable Long version, - @Nullable Map metadata, - @Nullable DataStreamTemplate dataStreamTemplate, - @Nullable Boolean allowAutoCreate, - @Nullable List ignoreMissingComponentTemplates - ) { - this( - indexPatterns, - template, - componentTemplates, - priority, - version, - metadata, - dataStreamTemplate, - allowAutoCreate, - ignoreMissingComponentTemplates, - null - ); - } - - public ComposableIndexTemplate( + private ComposableIndexTemplate( List indexPatterns, @Nullable Template template, @Nullable List componentTemplates, @@ -287,10 +230,6 @@ public List getIgnoreMissingComponentTemplates() { return ignoreMissingComponentTemplates; } - public Boolean deprecated() { - return deprecated; - } - public boolean isDeprecated() { return Boolean.TRUE.equals(deprecated); } @@ -412,6 +351,10 @@ static boolean componentTemplatesEquals(List c1, List c2) { return false; } + public Builder toBuilder() { + return new Builder(this); + } + @Override public String toString() { return Strings.toString(this); @@ -535,8 +478,25 @@ public static class Builder { private List ignoreMissingComponentTemplates; private Boolean deprecated; + /** + * @deprecated use {@link ComposableIndexTemplate#builder()} + */ + @Deprecated(forRemoval = true) public Builder() {} + private Builder(ComposableIndexTemplate template) { + this.indexPatterns = template.indexPatterns; + this.template = template.template; + this.componentTemplates = template.componentTemplates; + this.priority = template.priority; + this.version = template.version; + this.metadata = template.metadata; + this.dataStreamTemplate = template.dataStreamTemplate; + this.allowAutoCreate = template.allowAutoCreate; + this.ignoreMissingComponentTemplates = template.ignoreMissingComponentTemplates; + this.deprecated = template.deprecated; + } + public Builder indexPatterns(List indexPatterns) { this.indexPatterns = indexPatterns; return this; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java index a3a6e34d445d2..8d7ce0525e943 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; @@ -49,6 +50,19 @@ public class DataStreamLifecycle implements SimpleDiffable, // Versions over the wire public static final TransportVersion ADDED_ENABLED_FLAG_VERSION = TransportVersions.V_8_500_057; + public static final String DATA_STREAMS_LIFECYCLE_ONLY_SETTING_NAME = "data_streams.lifecycle_only.mode"; + + /** + * Check if {@link #DATA_STREAMS_LIFECYCLE_ONLY_SETTING_NAME} is present and set to {@code true}, indicating that + * we're running in a cluster configuration that is only expecting to use data streams lifecycles. + * + * @param settings the node settings + * @return true if {@link #DATA_STREAMS_LIFECYCLE_ONLY_SETTING_NAME} is present and set + */ + public static boolean isDataStreamsLifecycleOnlyMode(final Settings settings) { + return settings.getAsBoolean(DATA_STREAMS_LIFECYCLE_ONLY_SETTING_NAME, false); + } + public static final Setting CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING = new Setting<>( "cluster.lifecycle.default.rollover", "max_age=auto,max_primary_shard_size=50gb,min_docs=1,max_primary_shard_docs=200000000", @@ -57,6 +71,8 @@ public class DataStreamLifecycle implements SimpleDiffable, Setting.Property.NodeScope ); + public static final DataStreamLifecycle DEFAULT = new DataStreamLifecycle(); + public static final String DATA_STREAM_LIFECYCLE_ORIGIN = "data_stream_lifecycle"; public static final ParseField ENABLED_FIELD = new ParseField("enabled"); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java index de46e04ea6215..27beffe56b97a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java @@ -20,6 +20,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.Processors; import org.elasticsearch.core.Nullable; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; @@ -33,6 +34,7 @@ import java.util.Objects; import java.util.Set; import java.util.TreeSet; +import java.util.function.Predicate; import static java.lang.String.format; import static org.elasticsearch.node.Node.NODE_EXTERNAL_ID_SETTING; @@ -40,7 +42,10 @@ import static org.elasticsearch.node.NodeRoleSettings.NODE_ROLES_SETTING; public final class DesiredNode implements Writeable, ToXContentObject, Comparable { - public static final Version RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION = Version.V_8_3_0; + + public static final NodeFeature RANGE_FLOAT_PROCESSORS_SUPPORTED = new NodeFeature("desired_node.range_float_processors"); + public static final NodeFeature DOUBLE_PROCESSORS_SUPPORTED = new NodeFeature("desired_node.double_processors"); + public static final TransportVersion RANGE_FLOAT_PROCESSORS_SUPPORT_TRANSPORT_VERSION = TransportVersions.V_8_3_0; private static final ParseField SETTINGS_FIELD = new ParseField("settings"); @@ -299,12 +304,8 @@ public Set getRoles() { return roles; } - public boolean isCompatibleWithVersion(Version version) { - if (version.onOrAfter(RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION)) { - return true; - } - - return processorsRange == null && processors.isCompatibleWithVersion(version); + public boolean clusterHasRequiredFeatures(Predicate clusterHasFeature) { + return (processorsRange == null && processors.hasDecimals() == false) || clusterHasFeature.test(RANGE_FLOAT_PROCESSORS_SUPPORTED); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java index 52b522ec5ddaa..ca885632a08c4 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java @@ -46,6 +46,8 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; +import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.isDataStreamsLifecycleOnlyMode; + public class MetadataCreateDataStreamService { private static final Logger logger = LogManager.getLogger(MetadataCreateDataStreamService.class); @@ -53,6 +55,7 @@ public class MetadataCreateDataStreamService { private final ThreadPool threadPool; private final ClusterService clusterService; private final MetadataCreateIndexService metadataCreateIndexService; + private final boolean isDslOnlyMode; public MetadataCreateDataStreamService( ThreadPool threadPool, @@ -62,6 +65,7 @@ public MetadataCreateDataStreamService( this.threadPool = threadPool; this.clusterService = clusterService; this.metadataCreateIndexService = metadataCreateIndexService; + this.isDslOnlyMode = isDataStreamsLifecycleOnlyMode(clusterService.getSettings()); } public void createDataStream(CreateDataStreamClusterStateUpdateRequest request, ActionListener finalListener) { @@ -87,7 +91,13 @@ public void createDataStream(CreateDataStreamClusterStateUpdateRequest request, new AckedClusterStateUpdateTask(Priority.HIGH, request, delegate.clusterStateUpdate()) { @Override public ClusterState execute(ClusterState currentState) throws Exception { - ClusterState clusterState = createDataStream(metadataCreateIndexService, currentState, request, delegate.reroute()); + ClusterState clusterState = createDataStream( + metadataCreateIndexService, + currentState, + isDslOnlyMode, + request, + delegate.reroute() + ); firstBackingIndexRef.set(clusterState.metadata().dataStreams().get(request.name).getIndices().get(0).getName()); return clusterState; } @@ -105,7 +115,7 @@ public ClusterState createDataStream( ClusterState current, ActionListener listener ) throws Exception { - return createDataStream(metadataCreateIndexService, current, request, listener); + return createDataStream(metadataCreateIndexService, current, isDslOnlyMode, request, listener); } public static final class CreateDataStreamClusterStateUpdateRequest extends ClusterStateUpdateRequest< @@ -162,10 +172,11 @@ public SystemDataStreamDescriptor getSystemDataStreamDescriptor() { static ClusterState createDataStream( MetadataCreateIndexService metadataCreateIndexService, ClusterState currentState, + boolean isDslOnlyMode, CreateDataStreamClusterStateUpdateRequest request, ActionListener listener ) throws Exception { - return createDataStream(metadataCreateIndexService, currentState, request, List.of(), null, listener); + return createDataStream(metadataCreateIndexService, currentState, isDslOnlyMode, request, List.of(), null, listener); } /** @@ -181,6 +192,7 @@ static ClusterState createDataStream( static ClusterState createDataStream( MetadataCreateIndexService metadataCreateIndexService, ClusterState currentState, + boolean isDslOnlyMode, CreateDataStreamClusterStateUpdateRequest request, List backingIndices, IndexMetadata writeIndex, @@ -277,7 +289,7 @@ static ClusterState createDataStream( isSystem, template.getDataStreamTemplate().isAllowCustomRouting(), indexMode, - lifecycle + lifecycle == null && isDslOnlyMode ? DataStreamLifecycle.DEFAULT : lifecycle ); Metadata.Builder builder = Metadata.builder(currentState.metadata()).put(newDataStream); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataFeatures.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataFeatures.java new file mode 100644 index 0000000000000..b24e7fbcfefab --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataFeatures.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.Version; +import org.elasticsearch.features.FeatureSpecification; +import org.elasticsearch.features.NodeFeature; + +import java.util.Map; + +public class MetadataFeatures implements FeatureSpecification { + @Override + public Map getHistoricalFeatures() { + return Map.of( + DesiredNode.RANGE_FLOAT_PROCESSORS_SUPPORTED, + Version.V_8_3_0, + DesiredNode.DOUBLE_PROCESSORS_SUPPORTED, + Version.V_8_5_0 + ); + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java index e0dc1728eab6a..0c78d497d1194 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java @@ -615,18 +615,7 @@ public ClusterState addIndexTemplateV2( CompressedXContent mappings = innerTemplate.mappings(); CompressedXContent wrappedMappings = wrapMappingsIfNecessary(mappings, xContentRegistry); final Template finalTemplate = new Template(finalSettings, wrappedMappings, innerTemplate.aliases(), innerTemplate.lifecycle()); - finalIndexTemplate = new ComposableIndexTemplate( - template.indexPatterns(), - finalTemplate, - template.composedOf(), - template.priority(), - template.version(), - template.metadata(), - template.getDataStreamTemplate(), - template.getAllowAutoCreate(), - template.getIgnoreMissingComponentTemplates(), - template.deprecated() - ); + finalIndexTemplate = template.toBuilder().template(finalTemplate).build(); } if (finalIndexTemplate.equals(existing)) { @@ -713,23 +702,16 @@ private void validateIndexTemplateV2(String name, ComposableIndexTemplate indexT // Then apply settings resolved from templates: finalSettings.put(finalTemplate.map(Template::settings).orElse(Settings.EMPTY)); - var templateToValidate = new ComposableIndexTemplate( - indexTemplate.indexPatterns(), - new Template( - finalSettings.build(), - finalTemplate.map(Template::mappings).orElse(null), - finalTemplate.map(Template::aliases).orElse(null), - finalTemplate.map(Template::lifecycle).orElse(null) - ), - indexTemplate.composedOf(), - indexTemplate.priority(), - indexTemplate.version(), - indexTemplate.metadata(), - indexTemplate.getDataStreamTemplate(), - indexTemplate.getAllowAutoCreate(), - indexTemplate.getIgnoreMissingComponentTemplates(), - indexTemplate.deprecated() - ); + var templateToValidate = indexTemplate.toBuilder() + .template( + new Template( + finalSettings.build(), + finalTemplate.map(Template::mappings).orElse(null), + finalTemplate.map(Template::aliases).orElse(null), + finalTemplate.map(Template::lifecycle).orElse(null) + ) + ) + .build(); validate(name, templateToValidate); validateDataStreamsStillReferenced(currentState, name, templateToValidate); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java index b268be27e17ac..f7fa34d76498a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java @@ -41,6 +41,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; +import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.isDataStreamsLifecycleOnlyMode; import static org.elasticsearch.cluster.metadata.MetadataCreateDataStreamService.createDataStream; public class MetadataMigrateToDataStreamService { @@ -63,6 +64,7 @@ public class MetadataMigrateToDataStreamService { private final IndicesService indexServices; private final ThreadContext threadContext; private final MetadataCreateIndexService metadataCreateIndexService; + private final boolean isDslOnlyMode; public MetadataMigrateToDataStreamService( ThreadPool threadPool, @@ -74,6 +76,7 @@ public MetadataMigrateToDataStreamService( this.indexServices = indexServices; this.threadContext = threadPool.getThreadContext(); this.metadataCreateIndexService = metadataCreateIndexService; + this.isDslOnlyMode = isDataStreamsLifecycleOnlyMode(clusterService.getSettings()); } public void migrateToDataStream( @@ -104,7 +107,7 @@ public void migrateToDataStream( @Override public ClusterState execute(ClusterState currentState) throws Exception { - ClusterState clusterState = migrateToDataStream(currentState, indexMetadata -> { + ClusterState clusterState = migrateToDataStream(currentState, isDslOnlyMode, indexMetadata -> { try { return indexServices.createIndexMapperServiceForValidation(indexMetadata); } catch (IOException e) { @@ -125,6 +128,7 @@ private void submitUnbatchedTask(@SuppressWarnings("SameParameterValue") String static ClusterState migrateToDataStream( ClusterState currentState, + boolean isDslOnlyMode, Function mapperSupplier, MigrateToDataStreamClusterStateUpdateRequest request, MetadataCreateIndexService metadataCreateIndexService, @@ -155,6 +159,7 @@ static ClusterState migrateToDataStream( return createDataStream( metadataCreateIndexService, currentState, + isDslOnlyMode, req, backingIndices, currentState.metadata().index(writeIndex), diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java index 1310c0f7ec5c9..5891b953acfca 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java @@ -20,7 +20,11 @@ import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.allocator.AllocationActionMultiListener; import org.elasticsearch.cluster.service.ClusterService; @@ -40,7 +44,9 @@ import java.io.IOException; import java.util.Arrays; import java.util.HashSet; +import java.util.List; import java.util.Locale; +import java.util.Objects; import java.util.Set; import java.util.function.BiFunction; @@ -192,9 +198,57 @@ ClusterState execute(ClusterState currentState) { } if (skippedSettings.isEmpty() == false && openIndices.isEmpty() == false) { - throw new IllegalArgumentException( - String.format(Locale.ROOT, "Can't update non dynamic settings [%s] for open indices %s", skippedSettings, openIndices) - ); + if (request.reopenShards()) { + // We have non-dynamic settings and open indices. We will unassign all of the shards in these indices so that the new + // changed settings are applied when the shards are re-assigned. + routingTableBuilder = RoutingTable.builder( + allocationService.getShardRoutingRoleStrategy(), + currentState.routingTable() + ); + for (Index index : openIndices) { + // We only want to take on the expense of reopening all shards for an index if the setting is really changing + Settings existingSettings = currentState.getMetadata().index(index).getSettings(); + boolean needToReopenIndex = false; + for (String setting : skippedSettings) { + String newValue = request.settings().get(setting); + if (Objects.equals(newValue, existingSettings.get(setting)) == false) { + needToReopenIndex = true; + break; + } + } + if (needToReopenIndex) { + List shardRoutingList = currentState.routingTable().allShards(index.getName()); + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index); + for (ShardRouting shardRouting : shardRoutingList) { + if (ShardRoutingState.UNASSIGNED.equals(shardRouting.state()) == false) { + indexRoutingTableBuilder.addShard( + shardRouting.moveToUnassigned( + new UnassignedInfo( + UnassignedInfo.Reason.INDEX_REOPENED, + "Unassigning shards to update static settings" + ) + ) + ); + } else { + indexRoutingTableBuilder.addShard(shardRouting); + } + } + routingTableBuilder.add(indexRoutingTableBuilder.build()); + openIndices.remove(index); + closedIndices.add(index); + } + } + } else { + throw new IllegalArgumentException( + String.format( + Locale.ROOT, + "Can't update non dynamic settings [%s] for open indices %s unless the `reopen` query parameter is set to " + + "true. Alternatively, close the indices, apply the settings changes, and reopen the indices", + skippedSettings, + openIndices + ) + ); + } } if (IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.exists(openSettings)) { @@ -209,10 +263,12 @@ ClusterState execute(ClusterState currentState) { * * TODO: should we update the in-sync allocation IDs once the data is deleted by the node? */ - routingTableBuilder = RoutingTable.builder( - allocationService.getShardRoutingRoleStrategy(), - currentState.routingTable() - ); + if (routingTableBuilder == null) { + routingTableBuilder = RoutingTable.builder( + allocationService.getShardRoutingRoleStrategy(), + currentState.routingTable() + ); + } routingTableBuilder.updateNumberOfReplicas(updatedNumberOfReplicas, actualIndices); metadataBuilder.updateNumberOfReplicas(updatedNumberOfReplicas, actualIndices); logger.info("updating number_of_replicas to [{}] for indices {}", updatedNumberOfReplicas, actualIndices); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/ExpectedShardSizeEstimator.java b/server/src/main/java/org/elasticsearch/cluster/routing/ExpectedShardSizeEstimator.java new file mode 100644 index 0000000000000..05c0876669732 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/routing/ExpectedShardSizeEstimator.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.routing; + +import org.elasticsearch.cluster.ClusterInfo; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.snapshots.SnapshotShardSizeInfo; + +import java.util.Set; + +public class ExpectedShardSizeEstimator { + + public static long getExpectedShardSize(ShardRouting shardRouting, long defaultSize, RoutingAllocation allocation) { + return getExpectedShardSize( + shardRouting, + defaultSize, + allocation.clusterInfo(), + allocation.snapshotShardSizeInfo(), + allocation.metadata(), + allocation.routingTable() + ); + } + + /** + * Returns the expected shard size for the given shard or the default value provided if not enough information are available + * to estimate the shards size. + */ + public static long getExpectedShardSize( + ShardRouting shard, + long defaultValue, + ClusterInfo clusterInfo, + SnapshotShardSizeInfo snapshotShardSizeInfo, + Metadata metadata, + RoutingTable routingTable + ) { + final IndexMetadata indexMetadata = metadata.getIndexSafe(shard.index()); + if (indexMetadata.getResizeSourceIndex() != null + && shard.active() == false + && shard.recoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS) { + return getExpectedSizeOfResizedShard(shard, defaultValue, indexMetadata, clusterInfo, metadata, routingTable); + } else if (shard.unassigned() && shard.recoverySource().getType() == RecoverySource.Type.SNAPSHOT) { + return snapshotShardSizeInfo.getShardSize(shard, defaultValue); + } else { + return clusterInfo.getShardSize(shard, defaultValue); + } + } + + private static long getExpectedSizeOfResizedShard( + ShardRouting shard, + long defaultValue, + IndexMetadata indexMetadata, + ClusterInfo clusterInfo, + Metadata metadata, + RoutingTable routingTable + ) { + // in the shrink index case we sum up the source index shards since we basically make a copy of the shard in the worst case + long targetShardSize = 0; + final Index mergeSourceIndex = indexMetadata.getResizeSourceIndex(); + final IndexMetadata sourceIndexMetadata = metadata.index(mergeSourceIndex); + if (sourceIndexMetadata != null) { + final Set shardIds = IndexMetadata.selectRecoverFromShards( + shard.id(), + sourceIndexMetadata, + indexMetadata.getNumberOfShards() + ); + final IndexRoutingTable indexRoutingTable = routingTable.index(mergeSourceIndex.getName()); + for (int i = 0; i < indexRoutingTable.size(); i++) { + IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(i); + if (shardIds.contains(shardRoutingTable.shardId())) { + targetShardSize += clusterInfo.getShardSize(shardRoutingTable.primaryShard(), 0); + } + } + } + return targetShardSize == 0 ? defaultValue : targetShardSize; + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java index 5f477a9ca66df..ea0ee630ef073 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java @@ -8,7 +8,6 @@ package org.elasticsearch.cluster.routing.allocation; -import org.elasticsearch.Version; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -16,6 +15,7 @@ import org.elasticsearch.common.unit.RelativeByteSizeValue; import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.UpdateForV9; import java.util.Iterator; import java.util.List; @@ -152,7 +152,11 @@ public class DiskThresholdSettings { private volatile TimeValue rerouteInterval; static { - assert Version.CURRENT.major == Version.V_7_0_0.major + 1; // this check is unnecessary in v9 + checkAutoReleaseIndexEnabled(); + } + + @UpdateForV9 // this check is unnecessary in v9 + private static void checkAutoReleaseIndexEnabled() { final String AUTO_RELEASE_INDEX_ENABLED_KEY = "es.disk.auto_release_flood_stage_block"; final String property = System.getProperty(AUTO_RELEASE_INDEX_ENABLED_KEY); if (property != null) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 8d336d2147e11..438c81b5fbb98 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -31,7 +31,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type; -import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; @@ -57,6 +56,7 @@ import java.util.stream.StreamSupport; import static org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata.Type.REPLACE; +import static org.elasticsearch.cluster.routing.ExpectedShardSizeEstimator.getExpectedShardSize; import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; import static org.elasticsearch.common.settings.ClusterSettings.createBuiltInClusterSettings; @@ -1037,11 +1037,7 @@ private void allocateUnassigned() { logger.trace("Assigned shard [{}] to [{}]", shard, minNode.getNodeId()); } - final long shardSize = DiskThresholdDecider.getExpectedShardSize( - shard, - ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, - allocation - ); + final long shardSize = getExpectedShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, allocation); shard = routingNodes.initializeShard(shard, minNode.getNodeId(), null, shardSize, allocation.changes()); minNode.addShard(shard); if (shard.primary() == false) { @@ -1064,11 +1060,7 @@ private void allocateUnassigned() { if (minNode != null) { // throttle decision scenario assert allocationDecision.getAllocationStatus() == AllocationStatus.DECIDERS_THROTTLED; - final long shardSize = DiskThresholdDecider.getExpectedShardSize( - shard, - ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, - allocation - ); + final long shardSize = getExpectedShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, allocation); minNode.addShard(shard.initialize(minNode.getNodeId(), null, shardSize)); } else { if (logger.isTraceEnabled()) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java index 7d24872cf51dc..60a6ec2e49899 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java @@ -51,6 +51,7 @@ public class DesiredBalanceComputer { private final ThreadPool threadPool; private final ShardsAllocator delegateAllocator; + // stats protected final MeanMetric iterations = new MeanMetric(); public static final Setting PROGRESS_LOG_INTERVAL_SETTING = Setting.timeSetting( diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java index 625591ba8b90b..8df50196c5d4b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java @@ -21,7 +21,6 @@ import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.Decision; -import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; @@ -35,11 +34,13 @@ import java.util.Comparator; import java.util.Iterator; import java.util.Set; +import java.util.concurrent.atomic.AtomicLong; import java.util.function.BiFunction; import java.util.stream.Collectors; import java.util.stream.IntStream; import static org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata.Type.REPLACE; +import static org.elasticsearch.cluster.routing.ExpectedShardSizeEstimator.getExpectedShardSize; /** * Given the current allocation of shards and the desired balance, performs the next (legal) shard movements towards the goal. @@ -69,6 +70,20 @@ public class DesiredBalanceReconciler { private final NodeAllocationOrdering allocationOrdering = new NodeAllocationOrdering(); private final NodeAllocationOrdering moveOrdering = new NodeAllocationOrdering(); + // stats + /** + * Number of unassigned shards during last reconciliation + */ + protected final AtomicLong unassignedShards = new AtomicLong(); + /** + * Total number of assigned shards during last reconciliation + */ + protected final AtomicLong totalAllocations = new AtomicLong(); + /** + * Number of assigned shards during last reconciliation that are not allocated on desired node and need to be moved + */ + protected final AtomicLong undesiredAllocations = new AtomicLong(); + public DesiredBalanceReconciler(ClusterSettings clusterSettings, ThreadPool threadPool) { this.undesiredAllocationLogInterval = new FrequencyCappedAction(threadPool); clusterSettings.initializeAndWatch(UNDESIRED_ALLOCATIONS_LOG_INTERVAL_SETTING, this.undesiredAllocationLogInterval::setMinInterval); @@ -261,18 +276,17 @@ private void allocateUnassigned() { // desired node no longer exists continue; } + if (routingNode.getByShardId(shard.shardId()) != null) { + // node already contains same shard. + // Skipping it allows us to exclude NO decisions from SameShardAllocationDecider and only log more relevant + // NO or THROTTLE decisions of the preventing shard from starting on assigned node + continue; + } final var decision = allocation.deciders().canAllocate(shard, routingNode, allocation); switch (decision.type()) { case YES -> { logger.debug("Assigning shard [{}] to {} [{}]", shard, nodeIdsIterator.source, nodeId); - final long shardSize = DiskThresholdDecider.getExpectedShardSize( - shard, - ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, - allocation.clusterInfo(), - allocation.snapshotShardSizeInfo(), - allocation.metadata(), - allocation.routingTable() - ); + long shardSize = getExpectedShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, allocation); routingNodes.initializeShard(shard, nodeId, null, shardSize, allocation.changes()); allocationOrdering.recordAllocation(nodeId); if (shard.primary() == false) { @@ -287,10 +301,10 @@ private void allocateUnassigned() { case THROTTLE -> { nodeIdsIterator.wasThrottled = true; unallocatedStatus = AllocationStatus.DECIDERS_THROTTLED; - logger.trace("Couldn't assign shard [{}] to [{}]: {}", shard.shardId(), nodeId, decision); + logger.debug("Couldn't assign shard [{}] to [{}]: {}", shard.shardId(), nodeId, decision); } case NO -> { - logger.trace("Couldn't assign shard [{}] to [{}]: {}", shard.shardId(), nodeId, decision); + logger.debug("Couldn't assign shard [{}] to [{}]: {}", shard.shardId(), nodeId, decision); } } } @@ -446,8 +460,9 @@ private void balance() { return; } - long allAllocations = 0; - long undesiredAllocations = 0; + int unassignedShards = routingNodes.unassigned().size() + routingNodes.unassigned().ignored().size(); + int totalAllocations = 0; + int undesiredAllocations = 0; // Iterate over all started shards and try to move any which are on undesired nodes. In the presence of throttling shard // movements, the goal of this iteration order is to achieve a fairer movement of shards from the nodes that are offloading the @@ -455,7 +470,7 @@ private void balance() { for (final var iterator = OrderedShardsIterator.create(routingNodes, moveOrdering); iterator.hasNext();) { final var shardRouting = iterator.next(); - allAllocations++; + totalAllocations++; if (shardRouting.started() == false) { // can only rebalance started shards @@ -505,11 +520,18 @@ private void balance() { } } - maybeLogUndesiredAllocationsWarning(allAllocations, undesiredAllocations); + DesiredBalanceReconciler.this.unassignedShards.set(unassignedShards); + DesiredBalanceReconciler.this.undesiredAllocations.set(undesiredAllocations); + DesiredBalanceReconciler.this.totalAllocations.set(totalAllocations); + + maybeLogUndesiredAllocationsWarning(totalAllocations, undesiredAllocations, routingNodes.size()); } - private void maybeLogUndesiredAllocationsWarning(long allAllocations, long undesiredAllocations) { - if (allAllocations > 0 && undesiredAllocations > undesiredAllocationsLogThreshold * allAllocations) { + private void maybeLogUndesiredAllocationsWarning(int allAllocations, int undesiredAllocations, int nodeCount) { + // more shards than cluster can relocate with one reroute + final boolean nonEmptyRelocationBacklog = undesiredAllocations > 2L * nodeCount; + final boolean warningThresholdReached = undesiredAllocations > undesiredAllocationsLogThreshold * allAllocations; + if (allAllocations > 0 && nonEmptyRelocationBacklog && warningThresholdReached) { undesiredAllocationLogInterval.maybeExecute( () -> logger.warn( "[{}] of assigned shards ({}/{}) are not on their desired nodes, which exceeds the warn threshold of [{}]", diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java index ee95074b8a730..2319bcbef3383 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java @@ -264,7 +264,7 @@ public void resetDesiredBalance() { public DesiredBalanceStats getStats() { return new DesiredBalanceStats( - currentDesiredBalance.lastConvergedIndex(), + Math.max(currentDesiredBalance.lastConvergedIndex(), 0L), desiredBalanceComputation.isActive(), computationsSubmitted.count(), computationsExecuted.count(), @@ -272,7 +272,10 @@ public DesiredBalanceStats getStats() { desiredBalanceComputer.iterations.sum(), computedShardMovements.sum(), cumulativeComputationTime.count(), - cumulativeReconciliationTime.count() + cumulativeReconciliationTime.count(), + desiredBalanceReconciler.unassignedShards.get(), + desiredBalanceReconciler.totalAllocations.get(), + desiredBalanceReconciler.undesiredAllocations.get() ); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceStats.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceStats.java index c017d77362427..6a08b896136d2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceStats.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceStats.java @@ -19,6 +19,8 @@ import java.io.IOException; +import static org.elasticsearch.TransportVersions.ADDITIONAL_DESIRED_BALANCE_RECONCILIATION_STATS; + public record DesiredBalanceStats( long lastConvergedIndex, boolean computationActive, @@ -28,11 +30,21 @@ public record DesiredBalanceStats( long computationIterations, long computedShardMovements, long cumulativeComputationTime, - long cumulativeReconciliationTime + long cumulativeReconciliationTime, + long unassignedShards, + long totalAllocations, + long undesiredAllocations ) implements Writeable, ToXContentObject { private static final TransportVersion COMPUTED_SHARD_MOVEMENTS_VERSION = TransportVersions.V_8_8_0; + public DesiredBalanceStats { + if (lastConvergedIndex < 0) { + assert false : lastConvergedIndex; + throw new IllegalStateException("lastConvergedIndex must be nonnegative, but got [" + lastConvergedIndex + ']'); + } + } + public static DesiredBalanceStats readFrom(StreamInput in) throws IOException { return new DesiredBalanceStats( in.readVLong(), @@ -43,7 +55,10 @@ public static DesiredBalanceStats readFrom(StreamInput in) throws IOException { in.readVLong(), in.getTransportVersion().onOrAfter(COMPUTED_SHARD_MOVEMENTS_VERSION) ? in.readVLong() : -1, in.readVLong(), - in.readVLong() + in.readVLong(), + in.getTransportVersion().onOrAfter(ADDITIONAL_DESIRED_BALANCE_RECONCILIATION_STATS) ? in.readVLong() : -1, + in.getTransportVersion().onOrAfter(ADDITIONAL_DESIRED_BALANCE_RECONCILIATION_STATS) ? in.readVLong() : -1, + in.getTransportVersion().onOrAfter(ADDITIONAL_DESIRED_BALANCE_RECONCILIATION_STATS) ? in.readVLong() : -1 ); } @@ -60,6 +75,11 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeVLong(cumulativeComputationTime); out.writeVLong(cumulativeReconciliationTime); + if (out.getTransportVersion().onOrAfter(ADDITIONAL_DESIRED_BALANCE_RECONCILIATION_STATS)) { + out.writeVLong(unassignedShards); + out.writeVLong(totalAllocations); + out.writeVLong(undesiredAllocations); + } } @Override @@ -74,7 +94,21 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("computed_shard_movements", computedShardMovements); builder.humanReadableField("computation_time_in_millis", "computation_time", new TimeValue(cumulativeComputationTime)); builder.humanReadableField("reconciliation_time_in_millis", "reconciliation_time", new TimeValue(cumulativeReconciliationTime)); + builder.field("unassigned_shards", unassignedShards); + builder.field("total_allocations", totalAllocations); + builder.field("undesired_allocations", undesiredAllocations); + builder.field("undesired_allocations_fraction", undesiredAllocationsFraction()); builder.endObject(); return builder; } + + public double undesiredAllocationsFraction() { + if (unassignedShards == -1 || totalAllocations == -1 || undesiredAllocations == -1) { + return -1.0; + } else if (totalAllocations == 0) { + return 0.0; + } else { + return (double) undesiredAllocations / totalAllocations; + } + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index d916aa7638786..0e0d15a02d042 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -10,13 +10,10 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.DiskUsage; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.cluster.routing.IndexRoutingTable; -import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingTable; @@ -29,12 +26,11 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.snapshots.SnapshotShardSizeInfo; +import org.elasticsearch.core.UpdateForV9; import java.util.Map; -import java.util.Set; + +import static org.elasticsearch.cluster.routing.ExpectedShardSizeEstimator.getExpectedShardSize; /** * The {@link DiskThresholdDecider} checks that the node a shard is potentially @@ -73,6 +69,7 @@ public class DiskThresholdDecider extends AllocationDecider { public static final String NAME = "disk_threshold"; + @UpdateForV9 public static final Setting ENABLE_FOR_SINGLE_DATA_NODE = Setting.boolSetting( "cluster.routing.allocation.disk.watermark.enable_for_single_data_node", true, @@ -102,7 +99,6 @@ public void validate(Boolean value) { public DiskThresholdDecider(Settings settings, ClusterSettings clusterSettings) { this.diskThresholdSettings = new DiskThresholdSettings(settings, clusterSettings); - assert Version.CURRENT.major < 9 : "remove enable_for_single_data_node in 9"; // get deprecation warnings. boolean enabledForSingleDataNode = ENABLE_FOR_SINGLE_DATA_NODE.get(settings); assert enabledForSingleDataNode; @@ -541,61 +537,6 @@ private Decision earlyTerminate(Map usages) { return null; } - public static long getExpectedShardSize(ShardRouting shardRouting, long defaultSize, RoutingAllocation allocation) { - return DiskThresholdDecider.getExpectedShardSize( - shardRouting, - defaultSize, - allocation.clusterInfo(), - allocation.snapshotShardSizeInfo(), - allocation.metadata(), - allocation.routingTable() - ); - } - - /** - * Returns the expected shard size for the given shard or the default value provided if not enough information are available - * to estimate the shards size. - */ - public static long getExpectedShardSize( - ShardRouting shard, - long defaultValue, - ClusterInfo clusterInfo, - SnapshotShardSizeInfo snapshotShardSizeInfo, - Metadata metadata, - RoutingTable routingTable - ) { - final IndexMetadata indexMetadata = metadata.getIndexSafe(shard.index()); - if (indexMetadata.getResizeSourceIndex() != null - && shard.active() == false - && shard.recoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS) { - // in the shrink index case we sum up the source index shards since we basically make a copy of the shard in - // the worst case - long targetShardSize = 0; - final Index mergeSourceIndex = indexMetadata.getResizeSourceIndex(); - final IndexMetadata sourceIndexMeta = metadata.index(mergeSourceIndex); - if (sourceIndexMeta != null) { - final Set shardIds = IndexMetadata.selectRecoverFromShards( - shard.id(), - sourceIndexMeta, - indexMetadata.getNumberOfShards() - ); - final IndexRoutingTable indexRoutingTable = routingTable.index(mergeSourceIndex.getName()); - for (int i = 0; i < indexRoutingTable.size(); i++) { - IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(i); - if (shardIds.contains(shardRoutingTable.shardId())) { - targetShardSize += clusterInfo.getShardSize(shardRoutingTable.primaryShard(), 0); - } - } - } - return targetShardSize == 0 ? defaultValue : targetShardSize; - } else { - if (shard.unassigned() && shard.recoverySource().getType() == RecoverySource.Type.SNAPSHOT) { - return snapshotShardSizeInfo.getShardSize(shard, defaultValue); - } - return clusterInfo.getShardSize(shard, defaultValue); - } - } - record DiskUsageWithRelocations(DiskUsage diskUsage, long relocatingShardSize) { double getFreeDiskAsPercentage() { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorService.java similarity index 97% rename from server/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorService.java rename to server/src/main/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorService.java index d17f3a297e805..76ca9f88b4b58 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorService.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.cluster.routing.allocation; +package org.elasticsearch.cluster.routing.allocation.shards; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -25,6 +25,13 @@ import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.routing.allocation.AllocateUnassignedDecision; +import org.elasticsearch.cluster.routing.allocation.AllocationDecision; +import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.cluster.routing.allocation.DataTier; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationResult; +import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; @@ -127,7 +134,7 @@ public String name() { * @param metadata Metadata for the cluster * @return A new ShardAllocationStatus that has not yet been filled. */ - ShardAllocationStatus createNewStatus(Metadata metadata) { + public ShardAllocationStatus createNewStatus(Metadata metadata) { return new ShardAllocationStatus(metadata); } @@ -415,18 +422,18 @@ static void updateShardAllocationStatus( ) ); - class ShardAllocationCounts { + public class ShardAllocationCounts { int unassigned = 0; int unassigned_new = 0; int unassigned_restarting = 0; int initializing = 0; int started = 0; int relocating = 0; - final Set indicesWithUnavailableShards = new HashSet<>(); - final Set indicesWithAllShardsUnavailable = new HashSet<>(); + public final Set indicesWithUnavailableShards = new HashSet<>(); + public final Set indicesWithAllShardsUnavailable = new HashSet<>(); // We keep the searchable snapshots separately as long as the original index is still available // This is checked during the post-processing - SearchableSnapshotsState searchableSnapshotsState = new SearchableSnapshotsState(); + public SearchableSnapshotsState searchableSnapshotsState = new SearchableSnapshotsState(); final Map> diagnosisDefinitions = new HashMap<>(); public void increment(ShardRouting routing, ClusterState state, NodesShutdownMetadata shutdowns, boolean verbose) { @@ -675,7 +682,7 @@ List checkIsAllocationDisabled(IndexMetadata indexMetadata * @param clusterState the current cluster state. * @return A list of diagnoses for the provided unassigned shard */ - List checkDataTierRelatedIssues( + public List checkDataTierRelatedIssues( IndexMetadata indexMetadata, List nodeAllocationResults, ClusterState clusterState @@ -849,12 +856,12 @@ private static Optional checkNotEnoughNodesInDataTier( } } - class ShardAllocationStatus { - final ShardAllocationCounts primaries = new ShardAllocationCounts(); - final ShardAllocationCounts replicas = new ShardAllocationCounts(); - final Metadata clusterMetadata; + public class ShardAllocationStatus { + protected final ShardAllocationCounts primaries = new ShardAllocationCounts(); + protected final ShardAllocationCounts replicas = new ShardAllocationCounts(); + protected final Metadata clusterMetadata; - ShardAllocationStatus(Metadata clusterMetadata) { + public ShardAllocationStatus(Metadata clusterMetadata) { this.clusterMetadata = clusterMetadata; } @@ -1149,7 +1156,7 @@ static List getRestoreFromSnapshotAffectedResources( } } - static class SearchableSnapshotsState { + public static class SearchableSnapshotsState { private final Set searchableSnapshotWithUnavailableShard = new HashSet<>(); private final Set searchableSnapshotWithOriginalIndexAvailable = new HashSet<>(); @@ -1161,7 +1168,7 @@ void addSearchableSnapshotWithOriginalIndexAvailable(String indexName) { searchableSnapshotWithOriginalIndexAvailable.add(indexName); } - Set getRedSearchableSnapshots() { + public Set getRedSearchableSnapshots() { return Sets.difference(searchableSnapshotWithUnavailableShard, searchableSnapshotWithOriginalIndexAvailable); } diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/MasterNodeFileWatchingService.java b/server/src/main/java/org/elasticsearch/common/file/MasterNodeFileWatchingService.java similarity index 97% rename from server/src/main/java/org/elasticsearch/reservedstate/service/MasterNodeFileWatchingService.java rename to server/src/main/java/org/elasticsearch/common/file/MasterNodeFileWatchingService.java index 444cde45d6961..6da0845a7c7ba 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/MasterNodeFileWatchingService.java +++ b/server/src/main/java/org/elasticsearch/common/file/MasterNodeFileWatchingService.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.reservedstate.service; +package org.elasticsearch.common.file; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -15,7 +15,6 @@ import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.file.AbstractFileWatchingService; import java.io.IOException; import java.nio.file.Files; diff --git a/server/src/main/java/org/elasticsearch/common/inject/ModulesBuilder.java b/server/src/main/java/org/elasticsearch/common/inject/ModulesBuilder.java index 6b702f41e7c5d..60e6fa5fff22a 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/ModulesBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/inject/ModulesBuilder.java @@ -22,6 +22,11 @@ public ModulesBuilder add(Module... newModules) { return this; } + public T bindToInstance(Class cls, T instance) { + modules.add(b -> b.bind(cls).toInstance(instance)); + return instance; + } + @Override public Iterator iterator() { return modules.iterator(); diff --git a/server/src/main/java/org/elasticsearch/common/network/CloseableChannel.java b/server/src/main/java/org/elasticsearch/common/network/CloseableChannel.java index 3e184c41ef006..b0d1ec931b0be 100644 --- a/server/src/main/java/org/elasticsearch/common/network/CloseableChannel.java +++ b/server/src/main/java/org/elasticsearch/common/network/CloseableChannel.java @@ -83,7 +83,7 @@ static void closeChannels(List channels, boolean if (blocking) { ArrayList> futures = new ArrayList<>(channels.size()); for (final C channel : channels) { - PlainActionFuture closeFuture = PlainActionFuture.newFuture(); + PlainActionFuture closeFuture = new PlainActionFuture<>(); channel.addCloseListener(closeFuture); futures.add(closeFuture); } diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 69e61e7e70001..eb81fe01e57c2 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -80,6 +80,7 @@ import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexingPressure; +import org.elasticsearch.index.MergePolicyConfig; import org.elasticsearch.indices.IndexingMemoryController; import org.elasticsearch.indices.IndicesQueryCache; import org.elasticsearch.indices.IndicesRequestCache; @@ -577,6 +578,8 @@ public void apply(Settings value, Settings current, Settings previous) { IndicesClusterStateService.SHARD_LOCK_RETRY_TIMEOUT_SETTING, IngestSettings.GROK_WATCHDOG_INTERVAL, IngestSettings.GROK_WATCHDOG_MAX_EXECUTION_TIME, - TDigestExecutionHint.SETTING + TDigestExecutionHint.SETTING, + MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT_SETTING, + MergePolicyConfig.DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT_SETTING ).filter(Objects::nonNull).collect(Collectors.toSet()); } diff --git a/server/src/main/java/org/elasticsearch/common/unit/Processors.java b/server/src/main/java/org/elasticsearch/common/unit/Processors.java index d5421e1475655..c71005678fc0d 100644 --- a/server/src/main/java/org/elasticsearch/common/unit/Processors.java +++ b/server/src/main/java/org/elasticsearch/common/unit/Processors.java @@ -10,7 +10,6 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -31,9 +30,7 @@ public class Processors implements Writeable, Comparable, ToXContent public static final Processors ZERO = new Processors(0.0); public static final Processors MAX_PROCESSORS = new Processors(Double.MAX_VALUE); - public static final Version FLOAT_PROCESSORS_SUPPORT_VERSION = Version.V_8_3_0; public static final TransportVersion FLOAT_PROCESSORS_SUPPORT_TRANSPORT_VERSION = TransportVersions.V_8_3_0; - public static final Version DOUBLE_PROCESSORS_SUPPORT_VERSION = Version.V_8_5_0; public static final TransportVersion DOUBLE_PROCESSORS_SUPPORT_TRANSPORT_VERSION = TransportVersions.V_8_5_0; static final int NUMBER_OF_DECIMAL_PLACES = 5; private static final double MIN_REPRESENTABLE_PROCESSORS = 1E-5; @@ -143,18 +140,10 @@ private static boolean validNumberOfProcessors(double processors) { return Double.isFinite(processors) && processors > 0.0; } - private boolean hasDecimals() { + public boolean hasDecimals() { return ((int) count) != Math.ceil(count); } - public boolean isCompatibleWithVersion(Version version) { - if (version.onOrAfter(FLOAT_PROCESSORS_SUPPORT_VERSION)) { - return true; - } - - return hasDecimals() == false; - } - @Override public int compareTo(Processors o) { return Double.compare(count, o.count); diff --git a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java index fabc10e336368..08a07241a9ebb 100644 --- a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java +++ b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.coordination.Coordinator; @@ -35,6 +34,8 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.core.UpdateForV9; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.gateway.GatewayMetaState; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.monitor.NodeHealthService; @@ -112,7 +113,7 @@ public DiscoveryModule( NodeHealthService nodeHealthService, CircuitBreakerService circuitBreakerService, CompatibilityVersions compatibilityVersions, - Set features + FeatureService featureService ) { final Collection> joinValidators = new ArrayList<>(); final Map> hostProviders = new HashMap<>(); @@ -172,19 +173,7 @@ public DiscoveryModule( throw new IllegalArgumentException("Unknown election strategy " + ELECTION_STRATEGY_SETTING.get(settings)); } - if (LEGACY_MULTI_NODE_DISCOVERY_TYPE.equals(discoveryType)) { - assert Version.CURRENT.major == Version.V_7_0_0.major + 1; - DeprecationLogger.getLogger(DiscoveryModule.class) - .critical( - DeprecationCategory.SETTINGS, - "legacy-discovery-type", - "Support for setting [{}] to [{}] is deprecated and will be removed in a future version. Set this setting to [{}] " - + "instead.", - DISCOVERY_TYPE_SETTING.getKey(), - LEGACY_MULTI_NODE_DISCOVERY_TYPE, - MULTI_NODE_DISCOVERY_TYPE - ); - } + checkLegacyMultiNodeDiscoveryType(discoveryType); this.reconfigurator = getReconfigurator(settings, clusterSettings, clusterCoordinationPlugins); var preVoteCollectorFactory = getPreVoteCollectorFactory(clusterCoordinationPlugins); @@ -215,7 +204,7 @@ public DiscoveryModule( leaderHeartbeatService, preVoteCollectorFactory, compatibilityVersions, - features + featureService ); } else { throw new IllegalArgumentException("Unknown discovery type [" + discoveryType + "]"); @@ -224,6 +213,22 @@ public DiscoveryModule( logger.info("using discovery type [{}] and seed hosts providers {}", discoveryType, seedProviderNames); } + @UpdateForV9 + private static void checkLegacyMultiNodeDiscoveryType(String discoveryType) { + if (LEGACY_MULTI_NODE_DISCOVERY_TYPE.equals(discoveryType)) { + DeprecationLogger.getLogger(DiscoveryModule.class) + .critical( + DeprecationCategory.SETTINGS, + "legacy-discovery-type", + "Support for setting [{}] to [{}] is deprecated and will be removed in a future version. Set this setting to [{}] " + + "instead.", + DISCOVERY_TYPE_SETTING.getKey(), + LEGACY_MULTI_NODE_DISCOVERY_TYPE, + MULTI_NODE_DISCOVERY_TYPE + ); + } + } + // visible for testing static Reconfigurator getReconfigurator( Settings settings, diff --git a/server/src/main/java/org/elasticsearch/env/NodeMetadata.java b/server/src/main/java/org/elasticsearch/env/NodeMetadata.java index 77415bbaea949..f810612bf4666 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeMetadata.java +++ b/server/src/main/java/org/elasticsearch/env/NodeMetadata.java @@ -10,6 +10,7 @@ import org.elasticsearch.Build; import org.elasticsearch.Version; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.gateway.MetadataStateFormat; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; @@ -162,15 +163,15 @@ public void setOldestIndexVersion(int oldestIndexVersion) { this.oldestIndexVersion = IndexVersion.fromId(oldestIndexVersion); } + private Version getVersionOrFallbackToEmpty() { + return Objects.requireNonNullElse(this.nodeVersion, Version.V_EMPTY); + } + public NodeMetadata build() { - final Version nodeVersion; + @UpdateForV9 // version is required in the node metadata from v9 onwards + final Version nodeVersion = getVersionOrFallbackToEmpty(); final IndexVersion oldestIndexVersion; - if (this.nodeVersion == null) { - assert Version.CURRENT.major <= Version.V_7_0_0.major + 1 : "version is required in the node metadata from v9 onwards"; - nodeVersion = Version.V_EMPTY; - } else { - nodeVersion = this.nodeVersion; - } + if (this.previousNodeVersion == null) { previousNodeVersion = nodeVersion; } diff --git a/server/src/main/java/org/elasticsearch/features/FeatureService.java b/server/src/main/java/org/elasticsearch/features/FeatureService.java index 5d7632a91b0b8..d88589ac1ede8 100644 --- a/server/src/main/java/org/elasticsearch/features/FeatureService.java +++ b/server/src/main/java/org/elasticsearch/features/FeatureService.java @@ -39,13 +39,13 @@ public class FeatureService { public static final Version CLUSTER_FEATURES_ADDED_VERSION = Version.V_8_12_0; private final NavigableMap> historicalFeatures; - private final Set nodeFeatures; + private final Map nodeFeatures; public FeatureService(List specs) { Map allFeatures = new HashMap<>(); NavigableMap> historicalFeatures = new TreeMap<>(); - Set nodeFeatures = new HashSet<>(); + Map nodeFeatures = new HashMap<>(); for (FeatureSpecification spec : specs) { for (var hfe : spec.getHistoricalFeatures().entrySet()) { FeatureSpecification existing = allFeatures.putIfAbsent(hfe.getKey().id(), spec); @@ -78,14 +78,14 @@ public FeatureService(List specs) { ); } - nodeFeatures.add(f.id()); + nodeFeatures.put(f.id(), f); } } this.historicalFeatures = consolidateHistoricalFeatures(historicalFeatures); - this.nodeFeatures = Set.copyOf(nodeFeatures); + this.nodeFeatures = Map.copyOf(nodeFeatures); - logger.info("Registered local node features {}", nodeFeatures.stream().sorted().toList()); + logger.info("Registered local node features {}", nodeFeatures.keySet().stream().sorted().toList()); } private static NavigableMap> consolidateHistoricalFeatures( @@ -104,7 +104,7 @@ private static NavigableMap> consolidateHistoricalFeatures( /** * The non-historical features supported by this node. */ - public Set getNodeFeatures() { + public Map getNodeFeatures() { return nodeFeatures; } diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index a7cf7299a8502..e7b8eadb3f771 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Tuple; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.env.NodeMetadata; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.node.Node; @@ -184,7 +185,7 @@ private PersistedState createOnDiskPersistedState( long currentTerm = onDiskState.currentTerm; if (onDiskState.empty()) { - assert Version.CURRENT.major <= Version.V_7_0_0.major + 1 : "legacy metadata loader is not needed anymore from v9 onwards"; + @UpdateForV9 // legacy metadata loader is not needed anymore from v9 onwards final Tuple legacyState = metaStateService.loadFullState(); if (legacyState.v1().isEmpty() == false) { metadata = legacyState.v2(); diff --git a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java index 4ba7c91d411f3..1db0ec7346a32 100644 --- a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java +++ b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java @@ -17,6 +17,7 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Tuple; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -54,6 +55,7 @@ public MetaStateService(NodeEnvironment nodeEnv, NamedXContentRegistry namedXCon * meta state with globalGeneration -1 and empty meta data is returned. * @throws IOException if some IOException when loading files occurs or there is no metadata referenced by manifest file. */ + @UpdateForV9 public Tuple loadFullState() throws IOException { final Manifest manifest = Manifest.FORMAT.loadLatestState(logger, namedXContentRegistry, nodeEnv.nodeDataPaths()); if (manifest == null) { diff --git a/server/src/main/java/org/elasticsearch/health/HealthPeriodicLogger.java b/server/src/main/java/org/elasticsearch/health/HealthPeriodicLogger.java index 4d50764aa0cc1..c7946a6e17bc6 100644 --- a/server/src/main/java/org/elasticsearch/health/HealthPeriodicLogger.java +++ b/server/src/main/java/org/elasticsearch/health/HealthPeriodicLogger.java @@ -41,6 +41,7 @@ */ public class HealthPeriodicLogger implements ClusterStateListener, Closeable, SchedulerEngine.Listener { public static final String HEALTH_FIELD_PREFIX = "elasticsearch.health"; + public static final String MESSAGE_FIELD = "message"; public static final Setting POLL_INTERVAL_SETTING = Setting.timeSetting( "health.periodic_logger.poll_interval", @@ -90,7 +91,18 @@ public class HealthPeriodicLogger implements ClusterStateListener, Closeable, Sc * @param client the client used to call the Health Service. * @param healthService the Health Service, where the actual Health API logic lives. */ - public HealthPeriodicLogger(Settings settings, ClusterService clusterService, Client client, HealthService healthService) { + public static HealthPeriodicLogger create( + Settings settings, + ClusterService clusterService, + Client client, + HealthService healthService + ) { + HealthPeriodicLogger logger = new HealthPeriodicLogger(settings, clusterService, client, healthService); + logger.registerListeners(); + return logger; + } + + private HealthPeriodicLogger(Settings settings, ClusterService clusterService, Client client, HealthService healthService) { this.settings = settings; this.clusterService = clusterService; this.client = client; @@ -100,11 +112,8 @@ public HealthPeriodicLogger(Settings settings, ClusterService clusterService, Cl this.enabled = ENABLED_SETTING.get(settings); } - /** - * Initializer method to avoid the publication of a self reference in the constructor. - */ - public void init() { - if (this.enabled) { + private void registerListeners() { + if (enabled) { clusterService.addListener(this); } clusterService.getClusterSettings().addSettingsUpdateConsumer(ENABLED_SETTING, this::enable); @@ -193,6 +202,7 @@ static Map convertToLoggedFields(List ind // overall status final HealthStatus status = HealthStatus.merge(indicatorResults.stream().map(HealthIndicatorResult::status)); result.put(String.format(Locale.ROOT, "%s.overall.status", HEALTH_FIELD_PREFIX), status.xContentValue()); + result.put(MESSAGE_FIELD, String.format(Locale.ROOT, "health=%s", status.xContentValue())); // top-level status for each indicator indicatorResults.forEach((indicatorResult) -> { diff --git a/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java b/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java index a6f6eb8750cac..177e4d471cf30 100644 --- a/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java +++ b/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java @@ -91,7 +91,7 @@ public abstract class AbstractHttpServerTransport extends AbstractLifecycleCompo private volatile BoundTransportAddress boundAddress; private final AtomicLong totalChannelsAccepted = new AtomicLong(); private final Map httpChannels = new ConcurrentHashMap<>(); - private final PlainActionFuture allClientsClosedListener = PlainActionFuture.newFuture(); + private final PlainActionFuture allClientsClosedListener = new PlainActionFuture<>(); private final RefCounted refCounted = AbstractRefCounted.of(() -> allClientsClosedListener.onResponse(null)); private final Set httpServerChannels = Collections.newSetFromMap(new ConcurrentHashMap<>()); private final long shutdownGracePeriodMillis; diff --git a/server/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java b/server/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java index 5e8fb556b2089..9991d42e013e3 100644 --- a/server/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java +++ b/server/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java @@ -9,13 +9,11 @@ package org.elasticsearch.index; import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; public abstract class AbstractIndexComponent { protected final Logger logger; - protected final DeprecationLogger deprecationLogger; protected final IndexSettings indexSettings; /** @@ -23,7 +21,6 @@ public abstract class AbstractIndexComponent { */ protected AbstractIndexComponent(IndexSettings indexSettings) { this.logger = Loggers.getLogger(getClass(), indexSettings.getIndex()); - this.deprecationLogger = DeprecationLogger.getLogger(getClass()); this.indexSettings = indexSettings; } diff --git a/server/src/main/java/org/elasticsearch/index/MergePolicyConfig.java b/server/src/main/java/org/elasticsearch/index/MergePolicyConfig.java index bd228db91c0e1..e6b2a861458d0 100644 --- a/server/src/main/java/org/elasticsearch/index/MergePolicyConfig.java +++ b/server/src/main/java/org/elasticsearch/index/MergePolicyConfig.java @@ -111,17 +111,33 @@ public final class MergePolicyConfig { private final Logger logger; private final boolean mergesEnabled; private volatile Type mergePolicyType; + private final ByteSizeValue defaultMaxMergedSegment; + private final ByteSizeValue defaultMaxTimeBasedMergedSegment; public static final double DEFAULT_EXPUNGE_DELETES_ALLOWED = 10d; public static final ByteSizeValue DEFAULT_FLOOR_SEGMENT = new ByteSizeValue(2, ByteSizeUnit.MB); public static final int DEFAULT_MAX_MERGE_AT_ONCE = 10; public static final ByteSizeValue DEFAULT_MAX_MERGED_SEGMENT = new ByteSizeValue(5, ByteSizeUnit.GB); + public static final Setting DEFAULT_MAX_MERGED_SEGMENT_SETTING = Setting.byteSizeSetting( + "indices.merge.policy.max_merged_segment", + DEFAULT_MAX_MERGED_SEGMENT, + ByteSizeValue.ofBytes(1L), + ByteSizeValue.ofBytes(Long.MAX_VALUE), + Setting.Property.NodeScope + ); /** * Time-based data generally gets rolled over, so there is not much value in enforcing a maximum segment size, which has the side effect * of merging fewer segments together than the merge factor, which in-turn increases write amplification. So we set an arbitrarily high * roof that serves as a protection that we expect to never hit. */ public static final ByteSizeValue DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT = new ByteSizeValue(100, ByteSizeUnit.GB); + public static final Setting DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT_SETTING = Setting.byteSizeSetting( + "indices.merge.policy.max_time_based_merged_segment", + DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT, + ByteSizeValue.ofBytes(1L), + ByteSizeValue.ofBytes(Long.MAX_VALUE), + Setting.Property.NodeScope + ); public static final double DEFAULT_SEGMENTS_PER_TIER = 10.0d; /** * A default value for {@link LogByteSizeMergePolicy}'s merge factor: 32. This default value differs from the Lucene default of 10 in @@ -262,8 +278,8 @@ MergePolicy getMergePolicy(MergePolicyConfig config, boolean isTimeBasedIndex) { double forceMergeDeletesPctAllowed = indexSettings.getValue(INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING); // percentage ByteSizeValue floorSegment = indexSettings.getValue(INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING); int maxMergeAtOnce = indexSettings.getValue(INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING); - // TODO is this really a good default number for max_merge_segment, what happens for large indices, - // won't they end up with many segments? + this.defaultMaxMergedSegment = DEFAULT_MAX_MERGED_SEGMENT_SETTING.get(indexSettings.getNodeSettings()); + this.defaultMaxTimeBasedMergedSegment = DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT_SETTING.get(indexSettings.getNodeSettings()); ByteSizeValue maxMergedSegment = indexSettings.getValue(INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING); double segmentsPerTier = indexSettings.getValue(INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING); int mergeFactor = indexSettings.getValue(INDEX_MERGE_POLICY_MERGE_FACTOR_SETTING); @@ -315,8 +331,8 @@ void setMergeFactor(int mergeFactor) { void setMaxMergedSegment(ByteSizeValue maxMergedSegment) { // We use 0 as a placeholder for "unset". if (maxMergedSegment.getBytes() == 0) { - tieredMergePolicy.setMaxMergedSegmentMB(DEFAULT_MAX_MERGED_SEGMENT.getMbFrac()); - timeBasedMergePolicy.setMaxMergeMB(DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT.getMbFrac()); + tieredMergePolicy.setMaxMergedSegmentMB(defaultMaxMergedSegment.getMbFrac()); + timeBasedMergePolicy.setMaxMergeMB(defaultMaxTimeBasedMergedSegment.getMbFrac()); } else { tieredMergePolicy.setMaxMergedSegmentMB(maxMergedSegment.getMbFrac()); timeBasedMergePolicy.setMaxMergeMB(maxMergedSegment.getMbFrac()); diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index a496429cc3e2b..ed7fab325408e 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -1140,7 +1140,7 @@ public void externalRefresh(String source, ActionListener */ // TODO: Remove or rename for increased clarity public void flush(boolean force, boolean waitIfOngoing) throws EngineException { - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); flush(force, waitIfOngoing, future); future.actionGet(); } @@ -1167,7 +1167,7 @@ public void flush(boolean force, boolean waitIfOngoing) throws EngineException { * a lucene commit if nothing needs to be committed. */ public final void flush() throws EngineException { - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); flush(false, false, future); future.actionGet(); } diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 9419cd7e6ab5f..6cdd86ce6c9a7 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -759,13 +759,11 @@ private static String loadHistoryUUID(Map commitData) { private ExternalReaderManager createReaderManager(RefreshWarmerListener externalRefreshListener) throws EngineException { boolean success = false; + ElasticsearchDirectoryReader directoryReader = null; ElasticsearchReaderManager internalReaderManager = null; try { try { - final ElasticsearchDirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap( - DirectoryReader.open(indexWriter), - shardId - ); + directoryReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(indexWriter), shardId); lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); internalReaderManager = createInternalReaderManager(directoryReader); ExternalReaderManager externalReaderManager = new ExternalReaderManager(internalReaderManager, externalRefreshListener); @@ -782,7 +780,9 @@ private ExternalReaderManager createReaderManager(RefreshWarmerListener external } } finally { if (success == false) { // release everything we created on a failure - IOUtils.closeWhileHandlingException(internalReaderManager, indexWriter); + // make sure that we close the directory reader even if the internal reader manager has failed to initialize + var reader = internalReaderManager == null ? directoryReader : internalReaderManager; + IOUtils.closeWhileHandlingException(reader, indexWriter); } } } @@ -1039,11 +1039,19 @@ private VersionValue getVersionFromMap(BytesRef id) { // but we only need to do this once since the last operation per ID is to add to the version // map so once we pass this point we can safely lookup from the version map. if (versionMap.isUnsafe()) { - lastUnsafeSegmentGenerationForGets.set(lastCommittedSegmentInfos.getGeneration() + 1); refreshInternalSearcher(UNSAFE_VERSION_MAP_REFRESH_SOURCE, true); + // After the refresh, the doc that triggered it must now be part of the last commit. + // In rare cases, there could be other flush cycles completed in between the above line + // and the line below which push the last commit generation further. But that's OK. + // The invariant here is that doc is available within the generations of commits upto + // lastUnsafeSegmentGenerationForGets (inclusive). Therefore it is ok for it be larger + // which means the search shard needs to wait for extra generations and these generations + // are guaranteed to happen since they are all committed. + lastUnsafeSegmentGenerationForGets.set(lastCommittedSegmentInfos.getGeneration()); } versionMap.enforceSafeAccess(); } + // The versionMap can still be unsafe at this point due to archive being unsafe } return versionMap.getUnderLock(id); } @@ -2461,7 +2469,7 @@ public IndexCommitRef acquireLastIndexCommit(final boolean flushFirst) throws En if (flushFirst) { logger.trace("start flush for snapshot"); // TODO: Split acquireLastIndexCommit into two apis one with blocking flushes one without - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); flush(false, true, future); future.actionGet(); logger.trace("finish flush for snapshot"); diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java index addc6f33c9eba..a18ea0f90ec08 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java @@ -112,7 +112,7 @@ protected BinaryDocValues getBinaryDocValues(LeafReaderContext context, String f final BitSet rootDocs = nested.rootDocs(context); final DocIdSetIterator innerDocs = nested.innerDocs(context); final int maxChildren = nested.getNestedSort() != null ? nested.getNestedSort().getMaxChildren() : Integer.MAX_VALUE; - selectedValues = sortMode.select(values, missingBytes, rootDocs, innerDocs, context.reader().maxDoc(), maxChildren); + selectedValues = sortMode.select(values, missingBytes, rootDocs, innerDocs, maxChildren); } return selectedValues; } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/LongValuesComparatorSource.java b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/LongValuesComparatorSource.java index 827e1618adde2..e8d4363ca9932 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/LongValuesComparatorSource.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/LongValuesComparatorSource.java @@ -90,7 +90,7 @@ private NumericDocValues getNumericDocValues(LeafReaderContext context, long mis final BitSet rootDocs = nested.rootDocs(context); final DocIdSetIterator innerDocs = nested.innerDocs(context); final int maxChildren = nested.getNestedSort() != null ? nested.getNestedSort().getMaxChildren() : Integer.MAX_VALUE; - return sortMode.select(values, missingValue, rootDocs, innerDocs, context.reader().maxDoc(), maxChildren); + return sortMode.select(values, missingValue, rootDocs, innerDocs, maxChildren); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 996c6243064e9..17af6259ca27c 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -613,8 +613,12 @@ private static void postProcessDynamicArrayMapping(DocumentParserContext context || context.isCopyToField(fullFieldName) || mappers.size() < MIN_DIMS_FOR_DYNAMIC_FLOAT_MAPPING || mappers.size() > MAX_DIMS_COUNT + // Anything that is NOT a number or anything that IS a number but not mapped to `float` should NOT be mapped to dense_vector || mappers.stream() - .allMatch(m -> m instanceof NumberFieldMapper.Builder nb && nb.type != NumberFieldMapper.NumberType.FLOAT)) { + .anyMatch( + m -> m instanceof NumberFieldMapper.Builder == false + || ((NumberFieldMapper.Builder) m).type != NumberFieldMapper.NumberType.FLOAT + )) { return; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java index 1949249b9be2d..5a0d9c7c0cf79 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java @@ -954,7 +954,7 @@ public BlockLoader blockLoader(BlockLoaderContext blContext) { throw new IllegalArgumentException( "fetching values from a text field [" + name() - + "] is supported because synthetic _source is enabled and we don't have a way to load the fields" + + "] is not supported because synthetic _source is enabled and we don't have a way to load the fields" ); } return BlockSourceReader.bytesRefs(SourceValueFetcher.toString(blContext.sourcePaths(name()))); @@ -1019,7 +1019,7 @@ protected BytesRef storedToBytesRef(Object stored) { throw new IllegalArgumentException( "fetching values from a text field [" + name() - + "] is supported because synthetic _source is enabled and we don't have a way to load the fields" + + "] is not supported because synthetic _source is enabled and we don't have a way to load the fields" ); } return new SourceValueFetcherSortedBinaryIndexFieldData.Builder( diff --git a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java index 2e39b13b34c78..0b3b15670ef78 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.gateway.WriteStateException; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersions; @@ -466,13 +467,13 @@ public RetentionLeases loadRetentionLeases(final Path path) throws IOException { synchronized (retentionLeasePersistenceLock) { retentionLeases = RetentionLeases.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, path); } + return emptyIfNull(retentionLeases); + } - // TODO after backporting we expect this never to happen in 8.x, so adjust this to throw an exception instead. - assert Version.CURRENT.major <= 8 : "throw an exception instead of returning EMPTY on null"; - if (retentionLeases == null) { - return RetentionLeases.EMPTY; - } - return retentionLeases; + @UpdateForV9 + private static RetentionLeases emptyIfNull(RetentionLeases retentionLeases) { + // we expect never to see a null in 8.x, so adjust this to throw an exception from v9 onwards. + return retentionLeases == null ? RetentionLeases.EMPTY : retentionLeases; } private final Object retentionLeasePersistenceLock = new Object(); diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index f4812f280f917..8b6f6afb72042 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -285,6 +285,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl private final LongSupplier relativeTimeInNanosSupplier; private volatile long startedRelativeTimeInNanos; private volatile long indexingTimeBeforeShardStartedInNanos; + private final SubscribableListener waitForEngineOrClosedShardListeners = new SubscribableListener<>(); // the translog keeps track of the GCP, but unpromotable shards have no translog so we need to track the GCP here instead private volatile long globalCheckPointIfUnpromotable; @@ -1403,7 +1404,7 @@ public boolean flush(FlushRequest request) { verifyNotClosed(); final long time = System.nanoTime(); // TODO: Transition this method to async to support async flush - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); getEngine().flush(force, waitIfOngoing, future); Engine.FlushResult flushResult = future.actionGet(); flushMetric.inc(System.nanoTime() - time); @@ -1658,6 +1659,7 @@ public void close(String reason, boolean flushEngine) throws IOException { synchronized (mutex) { changeState(IndexShardState.CLOSED, reason); } + checkAndCallWaitForEngineOrClosedShardListeners(); } finally { final Engine engine = this.currentEngineReference.getAndSet(null); try { @@ -2016,6 +2018,7 @@ private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier) t onSettingsChanged(); assert assertSequenceNumbersInCommit(); recoveryState.validateCurrentStage(RecoveryState.Stage.TRANSLOG); + checkAndCallWaitForEngineOrClosedShardListeners(); } private boolean assertSequenceNumbersInCommit() throws IOException { @@ -3820,16 +3823,18 @@ && isSearchIdle() // lets skip this refresh since we are search idle and // don't necessarily need to refresh. the next searcher access will register a refreshListener and that will // cause the next schedule to refresh. + logger.trace("scheduledRefresh: search-idle, skipping refresh"); engine.maybePruneDeletes(); // try to prune the deletes in the engine if we accumulated some setRefreshPending(engine); l.onResponse(false); return; } else { - logger.trace("refresh with source [schedule]"); + logger.trace("scheduledRefresh: refresh with source [schedule]"); engine.maybeRefresh("schedule", l.map(Engine.RefreshResult::refreshed)); return; } } + logger.trace("scheduledRefresh: no refresh needed"); engine.maybePruneDeletes(); // try to prune the deletes in the engine if we accumulated some l.onResponse(false); }); @@ -3925,7 +3930,7 @@ public final void ensureShardSearchActive(Consumer listener) { // a refresh can be a costly operation, so we should fork to a refresh thread to be safe: threadPool.executor(ThreadPool.Names.REFRESH).execute(() -> { if (location == pendingRefreshLocation.get()) { - getEngine().maybeRefresh("ensure-shard-search-active", PlainActionFuture.newFuture()); + getEngine().maybeRefresh("ensure-shard-search-active", new PlainActionFuture<>()); } }); } @@ -4181,10 +4186,28 @@ public void waitForSegmentGeneration(long segmentGeneration, ActionListener listener) { + waitForEngineOrClosedShardListeners.addListener(listener); + } + /** * Registers a listener for an event when the shard advances to the provided primary term and segment generation */ public void waitForPrimaryTermAndGeneration(long primaryTerm, long segmentGeneration, ActionListener listener) { - getEngine().addPrimaryTermAndGenerationListener(primaryTerm, segmentGeneration, listener); + waitForEngineOrClosedShard( + listener.delegateFailureAndWrap( + (l, ignored) -> getEngine().addPrimaryTermAndGenerationListener(primaryTerm, segmentGeneration, l) + ) + ); } + } diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesFeatures.java b/server/src/main/java/org/elasticsearch/indices/IndicesFeatures.java new file mode 100644 index 0000000000000..7fb52bcd0be1c --- /dev/null +++ b/server/src/main/java/org/elasticsearch/indices/IndicesFeatures.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.indices; + +import org.elasticsearch.Version; +import org.elasticsearch.features.FeatureSpecification; +import org.elasticsearch.features.NodeFeature; + +import java.util.Map; + +public class IndicesFeatures implements FeatureSpecification { + @Override + public Map getHistoricalFeatures() { + return Map.of(IndicesService.SUPPORTS_AUTO_PUT, Version.V_8_8_0); + } +} diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index bcd5b6015df51..305494697216f 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -17,7 +17,6 @@ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ResourceAlreadyExistsException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.mapping.put.AutoPutMappingAction; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; @@ -75,6 +74,8 @@ import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.ShardLock; import org.elasticsearch.env.ShardLockObtainFailedException; +import org.elasticsearch.features.FeatureService; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.gateway.MetaStateService; import org.elasticsearch.gateway.MetadataStateFormat; import org.elasticsearch.index.Index; @@ -207,6 +208,8 @@ public class IndicesService extends AbstractLifecycleComponent Setting.Property.NodeScope ); + static final NodeFeature SUPPORTS_AUTO_PUT = new NodeFeature("indices.auto_put_supported"); + /** * The node's settings. */ @@ -226,6 +229,7 @@ public class IndicesService extends AbstractLifecycleComponent private final ScriptService scriptService; private final ClusterService clusterService; private final Client client; + private final FeatureService featureService; private volatile Map indices = Map.of(); private final Map> pendingDeletes = new HashMap<>(); private final AtomicInteger numUncompletedDeletes = new AtomicInteger(); @@ -284,6 +288,7 @@ public IndicesService( ScriptService scriptService, ClusterService clusterService, Client client, + FeatureService featureService, MetaStateService metaStateService, Collection>> engineFactoryProviders, Map directoryFactories, @@ -321,6 +326,7 @@ public IndicesService( this.scriptService = scriptService; this.clusterService = clusterService; this.client = client; + this.featureService = featureService; this.idFieldDataEnabled = INDICES_ID_FIELD_DATA_ENABLED_SETTING.get(clusterService.getSettings()); clusterService.getClusterSettings().addSettingsUpdateConsumer(INDICES_ID_FIELD_DATA_ENABLED_SETTING, this::setIdFieldDataEnabled); this.indicesFieldDataCache = new IndicesFieldDataCache(settings, new IndexFieldDataCache.Listener() { @@ -903,7 +909,7 @@ public void createShard( assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS : "mapping update consumer only required by local shards recovery"; client.execute( - clusterService.state().nodes().getMinNodeVersion().onOrAfter(Version.V_8_8_0) + featureService.clusterHasFeature(clusterService.state(), SUPPORTS_AUTO_PUT) ? AutoPutMappingAction.INSTANCE : PutMappingAction.INSTANCE, new PutMappingRequest().setConcreteIndex(shardRouting.index()) diff --git a/server/src/main/java/org/elasticsearch/indices/SystemIndexMappingUpdateService.java b/server/src/main/java/org/elasticsearch/indices/SystemIndexMappingUpdateService.java index a0667db91daf6..6f272d29efee2 100644 --- a/server/src/main/java/org/elasticsearch/indices/SystemIndexMappingUpdateService.java +++ b/server/src/main/java/org/elasticsearch/indices/SystemIndexMappingUpdateService.java @@ -92,7 +92,7 @@ public void clusterChanged(ClusterChangedEvent event) { } // if we're in a mixed-version cluster, exit - if (state.hasMixedSystemIndexVersions()) { + if (state.nodes().getMaxNodeVersion().after(state.nodes().getSmallestNonClientNodeVersion())) { logger.debug("Skipping system indices up-to-date check as cluster has mixed versions"); return; } diff --git a/server/src/main/java/org/elasticsearch/indices/SystemIndices.java b/server/src/main/java/org/elasticsearch/indices/SystemIndices.java index 56b0a07fcbc71..1da3e2dac261a 100644 --- a/server/src/main/java/org/elasticsearch/indices/SystemIndices.java +++ b/server/src/main/java/org/elasticsearch/indices/SystemIndices.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Booleans; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; @@ -49,7 +50,6 @@ import java.util.Map.Entry; import java.util.Optional; import java.util.Set; -import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Predicate; import java.util.stream.Collectors; @@ -929,7 +929,7 @@ public static void cleanUpFeature( Metadata metadata = clusterService.state().getMetadata(); final List exceptions = new ArrayList<>(); - final Consumer handleResponse = resetFeatureStateStatus -> { + final CheckedConsumer handleResponse = resetFeatureStateStatus -> { if (resetFeatureStateStatus.getStatus() == ResetFeatureStateStatus.Status.FAILURE) { synchronized (exceptions) { exceptions.add(resetFeatureStateStatus.getException()); diff --git a/server/src/main/java/org/elasticsearch/inference/InferenceService.java b/server/src/main/java/org/elasticsearch/inference/InferenceService.java index 2d7ee9f210e64..634b8af38a317 100644 --- a/server/src/main/java/org/elasticsearch/inference/InferenceService.java +++ b/server/src/main/java/org/elasticsearch/inference/InferenceService.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.ActionListener; import java.io.Closeable; +import java.util.List; import java.util.Map; import java.util.Set; @@ -61,7 +62,12 @@ public interface InferenceService extends Closeable { * @param taskSettings Settings in the request to override the model's defaults * @param listener Inference result listener */ - void infer(Model model, String input, Map taskSettings, ActionListener listener); + void infer( + Model model, + List input, + Map taskSettings, + ActionListener> listener + ); /** * Start or prepare the model for use. diff --git a/server/src/main/java/org/elasticsearch/lucene/search/uhighlight/CustomPassageFormatter.java b/server/src/main/java/org/elasticsearch/lucene/search/uhighlight/CustomPassageFormatter.java index eb87dc982543f..6ae2f53a94ad8 100644 --- a/server/src/main/java/org/elasticsearch/lucene/search/uhighlight/CustomPassageFormatter.java +++ b/server/src/main/java/org/elasticsearch/lucene/search/uhighlight/CustomPassageFormatter.java @@ -48,7 +48,7 @@ public Snippet[] format(Passage[] passages, String content) { assert end > start; // Look ahead to expand 'end' past all overlapping: while (i + 1 < passage.getNumMatches() && passage.getMatchStarts()[i + 1] < end) { - end = passage.getMatchEnds()[++i]; + end = Math.max(passage.getMatchEnds()[++i], end); } end = Math.min(end, passage.getEndOffset()); // in case match straddles past passage diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 5f8f35ad3cd2b..1c1b9745befe8 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -43,7 +43,6 @@ import org.elasticsearch.core.Assertions; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.PathUtils; -import org.elasticsearch.core.Releasables; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.Environment; @@ -508,8 +507,6 @@ public synchronized void close() throws IOException { toClose.add(injector.getInstance(SnapshotsService.class)); toClose.add(injector.getInstance(SnapshotShardsService.class)); toClose.add(injector.getInstance(RepositoriesService.class)); - toClose.add(() -> stopWatch.stop().start("client")); - Releasables.close(injector.getInstance(Client.class)); toClose.add(() -> stopWatch.stop().start("indices_cluster")); toClose.add(injector.getInstance(IndicesClusterStateService.class)); toClose.add(() -> stopWatch.stop().start("indices")); diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index 20c8529935867..8f557b4e9db5c 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -64,6 +64,7 @@ import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.inject.Injector; import org.elasticsearch.common.inject.Key; +import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.inject.ModulesBuilder; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.logging.DeprecationCategory; @@ -204,7 +205,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; @@ -245,7 +245,17 @@ static NodeConstruction prepareConstruction( List closeables = new ArrayList<>(); try { NodeConstruction constructor = new NodeConstruction(closeables); - constructor.construct(initialEnvironment, serviceProvider, forbidPrivateIndexSettings); + + Settings settings = constructor.createEnvironment(initialEnvironment, serviceProvider); + + ThreadPool threadPool = constructor.createThreadPool(settings); + SettingsModule settingsModule = constructor.validateSettings(initialEnvironment.settings(), settings, threadPool); + + SearchModule searchModule = constructor.createSearchModule(settingsModule.getSettings(), threadPool); + constructor.createClientAndRegistries(settingsModule.getSettings(), threadPool, searchModule); + + constructor.construct(threadPool, settingsModule, searchModule, serviceProvider, forbidPrivateIndexSettings); + return constructor; } catch (IOException e) { IOUtils.closeWhileHandlingException(closeables); @@ -263,6 +273,7 @@ static NodeConstruction prepareConstruction( private final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(Node.class); private final List resourcesToClose; + private final ModulesBuilder modules = new ModulesBuilder(); /* * References for storing in a Node */ @@ -330,7 +341,7 @@ private Optional getSinglePlugin(Class pluginClass) { return getSinglePlugin(pluginsService.filterPlugins(pluginClass), pluginClass); } - private Optional getSinglePlugin(Stream plugins, Class pluginClass) { + private static Optional getSinglePlugin(Stream plugins, Class pluginClass) { var it = plugins.iterator(); if (it.hasNext() == false) { return Optional.empty(); @@ -340,18 +351,17 @@ private Optional getSinglePlugin(Stream plugins, Class pluginClass) List allPlugins = new ArrayList<>(); allPlugins.add(plugin); it.forEachRemaining(allPlugins::add); - throw new IllegalStateException("A single " + pluginClass.getName() + " was expected but got :" + allPlugins); + throw new IllegalStateException("A single " + pluginClass.getName() + " was expected but got " + allPlugins); } return Optional.of(plugin); } - private void construct(Environment initialEnvironment, NodeServiceProvider serviceProvider, boolean forbidPrivateIndexSettings) - throws IOException { + private Settings createEnvironment(Environment initialEnvironment, NodeServiceProvider serviceProvider) { // Pass the node settings to the DeprecationLogger class so that it can have the deprecation.skip_deprecated_settings setting: - DeprecationLogger.initialize(initialEnvironment.settings()); - Settings environmentSettings = initialEnvironment.settings(); + Settings envSettings = initialEnvironment.settings(); + DeprecationLogger.initialize(envSettings); - final JvmInfo jvmInfo = JvmInfo.jvmInfo(); + JvmInfo jvmInfo = JvmInfo.jvmInfo(); logger.info( "version[{}], pid[{}], build[{}/{}/{}], OS[{}/{}/{}], JVM[{}/{}/{}/{}]", Build.current().qualifiedVersion(), @@ -375,7 +385,7 @@ private void construct(Environment initialEnvironment, NodeServiceProvider servi Build.current().qualifiedVersion() ); } - if (Environment.PATH_SHARED_DATA_SETTING.exists(environmentSettings)) { + if (Environment.PATH_SHARED_DATA_SETTING.exists(envSettings)) { // NOTE: this must be done with an explicit check here because the deprecation property on a path setting will // cause ES to fail to start since logging is not yet initialized on first read of the setting deprecationLogger.warn( @@ -394,7 +404,7 @@ private void construct(Environment initialEnvironment, NodeServiceProvider servi + "multiple disks. This feature will be removed in a future release." ); } - if (Environment.dataPathUsesList(environmentSettings)) { + if (Environment.dataPathUsesList(envSettings)) { // already checked for multiple values above, so if this is a list it is a single valued list deprecationLogger.warn( DeprecationCategory.SETTINGS, @@ -418,8 +428,9 @@ private void construct(Environment initialEnvironment, NodeServiceProvider servi (e, apmConfig) -> logger.error("failed to delete temporary APM config file [{}], reason: [{}]", apmConfig, e.getMessage()) ); - pluginsService = serviceProvider.newPluginService(initialEnvironment, environmentSettings); - final Settings settings = Node.mergePluginSettings(pluginsService.pluginMap(), environmentSettings); + pluginsService = serviceProvider.newPluginService(initialEnvironment, envSettings); + modules.bindToInstance(PluginsService.class, pluginsService); + Settings settings = Node.mergePluginSettings(pluginsService.pluginMap(), envSettings); /* * Create the environment based on the finalized view of the settings. This is to ensure that components get the same setting @@ -427,54 +438,37 @@ private void construct(Environment initialEnvironment, NodeServiceProvider servi */ environment = new Environment(settings, initialEnvironment.configFile()); Environment.assertEquivalent(initialEnvironment, environment); + modules.bindToInstance(Environment.class, environment); - final List> executorBuilders = pluginsService.flatMap(p -> p.getExecutorBuilders(settings)).toList(); + return settings; + } - final ThreadPool threadPool = new ThreadPool(settings, executorBuilders.toArray(new ExecutorBuilder[0])); + private ThreadPool createThreadPool(Settings settings) throws IOException { + ThreadPool threadPool = new ThreadPool( + settings, + pluginsService.flatMap(p -> p.getExecutorBuilders(settings)).toArray(ExecutorBuilder[]::new) + ); resourcesToClose.add(() -> ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS)); - final ResourceWatcherService resourceWatcherService = new ResourceWatcherService(settings, threadPool); - resourcesToClose.add(resourceWatcherService); + modules.bindToInstance(ThreadPool.class, threadPool); + // adds the context to the DeprecationLogger so that it does not need to be injected everywhere HeaderWarning.setThreadContext(threadPool.getThreadContext()); resourcesToClose.add(() -> HeaderWarning.removeThreadContext(threadPool.getThreadContext())); - final Set taskHeaders = Stream.concat( - pluginsService.filterPlugins(ActionPlugin.class).flatMap(p -> p.getTaskHeaders().stream()), - Task.HEADERS_TO_COPY.stream() - ).collect(Collectors.toSet()); - - final TelemetryProvider telemetryProvider = getSinglePlugin(TelemetryPlugin.class).map(p -> p.getTelemetryProvider(settings)) - .orElse(TelemetryProvider.NOOP); - - final Tracer tracer = telemetryProvider.getTracer(); - - final TaskManager taskManager = new TaskManager(settings, threadPool, taskHeaders, tracer); + return threadPool; + } + private SettingsModule validateSettings(Settings envSettings, Settings settings, ThreadPool threadPool) throws IOException { // register the node.data, node.ingest, node.master, node.remote_cluster_client settings here so we can mark them private - final List> additionalSettings = new ArrayList<>(pluginsService.flatMap(Plugin::getSettings).toList()); + List> additionalSettings = new ArrayList<>(pluginsService.flatMap(Plugin::getSettings).toList()); for (final ExecutorBuilder builder : threadPool.builders()) { additionalSettings.addAll(builder.getRegisteredSettings()); } SettingsExtension.load().forEach(e -> additionalSettings.addAll(e.getSettings())); - client = new NodeClient(settings, threadPool); - final ScriptModule scriptModule = new ScriptModule(settings, pluginsService.filterPlugins(ScriptPlugin.class).toList()); - final ScriptService scriptService = serviceProvider.newScriptService( - pluginsService, - settings, - scriptModule.engines, - scriptModule.contexts, - threadPool::absoluteTimeInMillis - ); - AnalysisModule analysisModule = new AnalysisModule( - environment, - pluginsService.filterPlugins(AnalysisPlugin.class).toList(), - pluginsService.getStablePluginRegistry() - ); - // this is as early as we can validate settings at this point. we already pass them to ScriptModule as well as ThreadPool + // this is as early as we can validate settings at this point. we already pass them to ThreadPool // so we might be late here already - - final SettingsModule settingsModule = new SettingsModule( + SettingsModule settingsModule = new SettingsModule( settings, additionalSettings, pluginsService.flatMap(Plugin::getSettingsFilter).toList() @@ -482,20 +476,114 @@ private void construct(Environment initialEnvironment, NodeServiceProvider servi // creating `NodeEnvironment` breaks the ability to rollback to 7.x on an 8.0 upgrade (`upgradeLegacyNodeFolders`) so do this // after settings validation. - nodeEnvironment = new NodeEnvironment(environmentSettings, environment); + nodeEnvironment = new NodeEnvironment(envSettings, environment); logger.info( "node name [{}], node ID [{}], cluster name [{}], roles {}", - Node.NODE_NAME_SETTING.get(environmentSettings), + Node.NODE_NAME_SETTING.get(envSettings), nodeEnvironment.nodeId(), - ClusterName.CLUSTER_NAME_SETTING.get(environmentSettings).value(), + ClusterName.CLUSTER_NAME_SETTING.get(envSettings).value(), DiscoveryNode.getRolesFromSettings(settings) .stream() .map(DiscoveryNodeRole::roleName) .collect(Collectors.toCollection(LinkedHashSet::new)) ); resourcesToClose.add(nodeEnvironment); + modules.bindToInstance(NodeEnvironment.class, nodeEnvironment); + + return settingsModule; + } + + private SearchModule createSearchModule(Settings settings, ThreadPool threadPool) { + IndexSearcher.setMaxClauseCount(SearchUtils.calculateMaxClauseValue(threadPool)); + return new SearchModule(settings, pluginsService.filterPlugins(SearchPlugin.class).toList()); + } + + /** + * Create various objects that are stored as member variables. This is so they are accessible as soon as possible. + */ + private void createClientAndRegistries(Settings settings, ThreadPool threadPool, SearchModule searchModule) { + client = new NodeClient(settings, threadPool); + modules.add(b -> { + b.bind(Client.class).toInstance(client); + b.bind(NodeClient.class).toInstance(client); + }); + localNodeFactory = new Node.LocalNodeFactory(settings, nodeEnvironment.nodeId()); + InferenceServiceRegistry inferenceServiceRegistry = new InferenceServiceRegistry( + pluginsService.filterPlugins(InferenceServicePlugin.class).toList(), + new InferenceServicePlugin.InferenceServiceFactoryContext(client) + ); + resourcesToClose.add(inferenceServiceRegistry); + modules.bindToInstance(InferenceServiceRegistry.class, inferenceServiceRegistry); + + namedWriteableRegistry = new NamedWriteableRegistry( + Stream.of( + NetworkModule.getNamedWriteables().stream(), + IndicesModule.getNamedWriteables().stream(), + searchModule.getNamedWriteables().stream(), + pluginsService.flatMap(Plugin::getNamedWriteables), + ClusterModule.getNamedWriteables().stream(), + SystemIndexMigrationExecutor.getNamedWriteables().stream(), + inferenceServiceRegistry.getNamedWriteables().stream() + ).flatMap(Function.identity()).toList() + ); + xContentRegistry = new NamedXContentRegistry( + Stream.of( + NetworkModule.getNamedXContents().stream(), + IndicesModule.getNamedXContents().stream(), + searchModule.getNamedXContents().stream(), + pluginsService.flatMap(Plugin::getNamedXContent), + ClusterModule.getNamedXWriteables().stream(), + SystemIndexMigrationExecutor.getNamedXContentParsers().stream(), + HealthNodeTaskExecutor.getNamedXContentParsers().stream() + ).flatMap(Function.identity()).toList() + ); + modules.add(b -> { + b.bind(NamedWriteableRegistry.class).toInstance(namedWriteableRegistry); + b.bind(NamedXContentRegistry.class).toInstance(xContentRegistry); + }); + } + + private void construct( + ThreadPool threadPool, + SettingsModule settingsModule, + SearchModule searchModule, + NodeServiceProvider serviceProvider, + boolean forbidPrivateIndexSettings + ) throws IOException { + + Settings settings = settingsModule.getSettings(); + + final ResourceWatcherService resourceWatcherService = new ResourceWatcherService(settings, threadPool); + resourcesToClose.add(resourceWatcherService); + + final Set taskHeaders = Stream.concat( + pluginsService.filterPlugins(ActionPlugin.class).flatMap(p -> p.getTaskHeaders().stream()), + Task.HEADERS_TO_COPY.stream() + ).collect(Collectors.toSet()); + + final TelemetryProvider telemetryProvider = getSinglePlugin(TelemetryPlugin.class).map(p -> p.getTelemetryProvider(settings)) + .orElse(TelemetryProvider.NOOP); + + final Tracer tracer = telemetryProvider.getTracer(); + + final TaskManager taskManager = new TaskManager(settings, threadPool, taskHeaders, tracer); + + final ScriptModule scriptModule = new ScriptModule(settings, pluginsService.filterPlugins(ScriptPlugin.class).toList()); + final ScriptService scriptService = serviceProvider.newScriptService( + pluginsService, + settings, + scriptModule.engines, + scriptModule.contexts, + threadPool::absoluteTimeInMillis + ); + AnalysisModule analysisModule = new AnalysisModule( + environment, + pluginsService.filterPlugins(AnalysisPlugin.class).toList(), + pluginsService.getStablePluginRegistry() + ); + ScriptModule.registerClusterSettingsListeners(scriptService, settingsModule.getClusterSettings()); final NetworkService networkService = new NetworkService( pluginsService.filterPlugins(DiscoveryPlugin.class) @@ -518,12 +606,6 @@ private void construct(Environment initialEnvironment, NodeServiceProvider servi Supplier documentParsingObserverSupplier = getDocumentParsingObserverSupplier(); - var factoryContext = new InferenceServicePlugin.InferenceServiceFactoryContext(client); - final InferenceServiceRegistry inferenceServiceRegistry = new InferenceServiceRegistry( - pluginsService.filterPlugins(InferenceServicePlugin.class).toList(), - factoryContext - ); - final IngestService ingestService = new IngestService( clusterService, threadPool, @@ -545,30 +627,6 @@ private void construct(Environment initialEnvironment, NodeServiceProvider servi ); final UsageService usageService = new UsageService(); - SearchModule searchModule = new SearchModule(settings, pluginsService.filterPlugins(SearchPlugin.class).toList()); - IndexSearcher.setMaxClauseCount(SearchUtils.calculateMaxClauseValue(threadPool)); - final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry( - Stream.of( - NetworkModule.getNamedWriteables().stream(), - IndicesModule.getNamedWriteables().stream(), - searchModule.getNamedWriteables().stream(), - pluginsService.flatMap(Plugin::getNamedWriteables), - ClusterModule.getNamedWriteables().stream(), - SystemIndexMigrationExecutor.getNamedWriteables().stream(), - inferenceServiceRegistry.getNamedWriteables().stream() - ).flatMap(Function.identity()).toList() - ); - NamedXContentRegistry xContentRegistry = new NamedXContentRegistry( - Stream.of( - NetworkModule.getNamedXContents().stream(), - IndicesModule.getNamedXContents().stream(), - searchModule.getNamedXContents().stream(), - pluginsService.flatMap(Plugin::getNamedXContent), - ClusterModule.getNamedXWriteables().stream(), - SystemIndexMigrationExecutor.getNamedXContentParsers().stream(), - HealthNodeTaskExecutor.getNamedXContentParsers().stream() - ).flatMap(Function.identity()).toList() - ); final List features = pluginsService.filterPlugins(SystemIndexPlugin.class).map(plugin -> { SystemIndices.validateFeatureName(plugin.getFeatureName(), plugin.getClass().getCanonicalName()); return SystemIndices.Feature.fromSystemIndexPlugin(plugin, settings); @@ -576,7 +634,6 @@ private void construct(Environment initialEnvironment, NodeServiceProvider servi final SystemIndices systemIndices = new SystemIndices(features); final ExecutorSelector executorSelector = systemIndices.getExecutorSelector(); - ModulesBuilder modules = new ModulesBuilder(); final MonitorService monitorService = new MonitorService(settings, nodeEnvironment, threadPool); final FsHealthService fsHealthService = new FsHealthService( settings, @@ -673,6 +730,8 @@ private void construct(Environment initialEnvironment, NodeServiceProvider servi rerouteServiceReference.set(rerouteService); clusterService.setRerouteService(rerouteService); + FeatureService featureService = new FeatureService(pluginsService.loadServiceProviders(FeatureSpecification.class)); + final IndicesService indicesService = new IndicesService( settings, pluginsService, @@ -689,6 +748,7 @@ private void construct(Environment initialEnvironment, NodeServiceProvider servi scriptService, clusterService, client, + featureService, metaStateService, engineFactoryProviders, indexStoreFactories, @@ -737,8 +797,6 @@ private void construct(Environment initialEnvironment, NodeServiceProvider servi threadPool ); - FeatureService featureService = new FeatureService(pluginsService.loadServiceProviders(FeatureSpecification.class)); - record PluginServiceInstances( Client client, ClusterService clusterService, @@ -956,9 +1014,9 @@ record PluginServiceInstances( fsHealthService, circuitBreakerService, compatibilityVersions, - featureService.getNodeFeatures() + featureService ); - this.nodeService = new NodeService( + nodeService = new NodeService( settings, threadPool, monitorService, @@ -1037,43 +1095,43 @@ record PluginServiceInstances( ); clusterService.addListener(pluginShutdownService); - final RecoveryPlannerService recoveryPlannerService = getRecoveryPlannerService(threadPool, clusterService, repositoryService); - final DesiredNodesSettingsValidator desiredNodesSettingsValidator = new DesiredNodesSettingsValidator(); + List reloadablePlugins = pluginsService.filterPlugins(ReloadablePlugin.class).toList(); + pluginsService.filterPlugins(ReloadAwarePlugin.class).forEach(p -> p.setReloadCallback(wrapPlugins(reloadablePlugins))); - final MasterHistoryService masterHistoryService = new MasterHistoryService(transportService, threadPool, clusterService); - final CoordinationDiagnosticsService coordinationDiagnosticsService = new CoordinationDiagnosticsService( - clusterService, - transportService, - discoveryModule.getCoordinator(), - masterHistoryService - ); - final HealthService healthService = createHealthService(clusterService, coordinationDiagnosticsService, threadPool); - HealthPeriodicLogger healthPeriodicLogger = createHealthPeriodicLogger(clusterService, settings, client, healthService); - healthPeriodicLogger.init(); - HealthMetadataService healthMetadataService = HealthMetadataService.create(clusterService, featureService, settings); - LocalHealthMonitor localHealthMonitor = LocalHealthMonitor.create( - settings, - clusterService, - nodeService, - threadPool, - client, - featureService + modules.add( + loadDiagnosticServices(settings, discoveryModule.getCoordinator(), clusterService, transportService, featureService, threadPool) ); - HealthInfoCache nodeHealthOverview = HealthInfoCache.create(clusterService); - HealthApiStats healthApiStats = new HealthApiStats(); - List reloadablePlugins = pluginsService.filterPlugins(ReloadablePlugin.class).toList(); - pluginsService.filterPlugins(ReloadAwarePlugin.class).forEach(p -> p.setReloadCallback(wrapPlugins(reloadablePlugins))); + RecoveryPlannerService recoveryPlannerService = getRecoveryPlannerService(threadPool, clusterService, repositoryService); + modules.add(b -> { + serviceProvider.processRecoverySettings(pluginsService, settingsModule.getClusterSettings(), recoverySettings); + SnapshotFilesProvider snapshotFilesProvider = new SnapshotFilesProvider(repositoryService); + var peerRecovery = new PeerRecoverySourceService( + transportService, + indicesService, + clusterService, + recoverySettings, + recoveryPlannerService + ); + resourcesToClose.add(peerRecovery); + b.bind(PeerRecoverySourceService.class).toInstance(peerRecovery); + b.bind(PeerRecoveryTargetService.class) + .toInstance( + new PeerRecoveryTargetService( + client, + threadPool, + transportService, + recoverySettings, + clusterService, + snapshotFilesProvider + ) + ); + }); + + modules.add(loadPluginComponents(pluginComponents)); modules.add(b -> { b.bind(NodeService.class).toInstance(nodeService); - b.bind(NamedXContentRegistry.class).toInstance(xContentRegistry); - b.bind(PluginsService.class).toInstance(pluginsService); - b.bind(Client.class).toInstance(client); - b.bind(NodeClient.class).toInstance(client); - b.bind(Environment.class).toInstance(environment); - b.bind(ThreadPool.class).toInstance(threadPool); - b.bind(NodeEnvironment.class).toInstance(nodeEnvironment); b.bind(ResourceWatcherService.class).toInstance(resourceWatcherService); b.bind(CircuitBreakerService.class).toInstance(circuitBreakerService); b.bind(BigArrays.class).toInstance(bigArrays); @@ -1084,7 +1142,6 @@ record PluginServiceInstances( b.bind(IndexingPressure.class).toInstance(indexingLimits); b.bind(UsageService.class).toInstance(usageService); b.bind(AggregationUsageService.class).toInstance(searchModule.getValuesSourceRegistry().getUsageService()); - b.bind(NamedWriteableRegistry.class).toInstance(namedWriteableRegistry); b.bind(MetadataUpgrader.class).toInstance(metadataUpgrader); b.bind(MetaStateService.class).toInstance(metaStateService); b.bind(PersistedClusterStateService.class).toInstance(persistedClusterStateService); @@ -1107,44 +1164,7 @@ record PluginServiceInstances( b.bind(FeatureService.class).toInstance(featureService); b.bind(Coordinator.class).toInstance(discoveryModule.getCoordinator()); b.bind(Reconfigurator.class).toInstance(discoveryModule.getReconfigurator()); - { - serviceProvider.processRecoverySettings(pluginsService, settingsModule.getClusterSettings(), recoverySettings); - final SnapshotFilesProvider snapshotFilesProvider = new SnapshotFilesProvider(repositoryService); - b.bind(PeerRecoverySourceService.class) - .toInstance( - new PeerRecoverySourceService( - transportService, - indicesService, - clusterService, - recoverySettings, - recoveryPlannerService - ) - ); - b.bind(PeerRecoveryTargetService.class) - .toInstance( - new PeerRecoveryTargetService( - client, - threadPool, - transportService, - recoverySettings, - clusterService, - snapshotFilesProvider - ) - ); - } b.bind(HttpServerTransport.class).toInstance(httpServerTransport); - pluginComponents.forEach(p -> { - if (p instanceof PluginComponentBinding pcb) { - @SuppressWarnings("unchecked") - Class clazz = (Class) pcb.inter(); - b.bind(clazz).toInstance(pcb.impl()); - - } else { - @SuppressWarnings("unchecked") - Class clazz = (Class) p.getClass(); - b.bind(clazz).toInstance(p); - } - }); b.bind(PersistentTasksService.class).toInstance(persistentTasksService); b.bind(PersistentTasksClusterService.class).toInstance(persistentTasksClusterService); b.bind(PersistentTasksExecutorRegistry.class).toInstance(registry); @@ -1159,39 +1179,81 @@ record PluginServiceInstances( b.bind(PluginShutdownService.class).toInstance(pluginShutdownService); b.bind(ExecutorSelector.class).toInstance(executorSelector); b.bind(IndexSettingProviders.class).toInstance(indexSettingProviders); - b.bind(DesiredNodesSettingsValidator.class).toInstance(desiredNodesSettingsValidator); - b.bind(HealthService.class).toInstance(healthService); - b.bind(MasterHistoryService.class).toInstance(masterHistoryService); - b.bind(CoordinationDiagnosticsService.class).toInstance(coordinationDiagnosticsService); + b.bind(DesiredNodesSettingsValidator.class).toInstance(new DesiredNodesSettingsValidator()); b.bind(HealthNodeTaskExecutor.class).toInstance(healthNodeTaskExecutor); - b.bind(HealthMetadataService.class).toInstance(healthMetadataService); - b.bind(LocalHealthMonitor.class).toInstance(localHealthMonitor); - b.bind(HealthInfoCache.class).toInstance(nodeHealthOverview); - b.bind(HealthApiStats.class).toInstance(healthApiStats); b.bind(Tracer.class).toInstance(tracer); b.bind(FileSettingsService.class).toInstance(fileSettingsService); b.bind(WriteLoadForecaster.class).toInstance(writeLoadForecaster); - b.bind(HealthPeriodicLogger.class).toInstance(healthPeriodicLogger); b.bind(CompatibilityVersions.class).toInstance(compatibilityVersions); - b.bind(InferenceServiceRegistry.class).toInstance(inferenceServiceRegistry); }); if (ReadinessService.enabled(environment)) { - modules.add( - b -> b.bind(ReadinessService.class) - .toInstance(serviceProvider.newReadinessService(pluginsService, clusterService, environment)) + modules.bindToInstance( + ReadinessService.class, + serviceProvider.newReadinessService(pluginsService, clusterService, environment) ); } injector = modules.createInjector(); - // We allocate copies of existing shards by looking for a viable copy of the shard in the cluster and assigning the shard there. - // The search for viable copies is triggered by an allocation attempt (i.e. a reroute) and is performed asynchronously. When it - // completes we trigger another reroute to try the allocation again. This means there is a circular dependency: the allocation - // service needs access to the existing shards allocators (e.g. the GatewayAllocator) which need to be able to trigger a - // reroute, which needs to call into the allocation service. We close the loop here: - clusterModule.setExistingShardsAllocators(injector.getInstance(GatewayAllocator.class)); + postInjection(clusterModule, actionModule, clusterService, transportService, featureService); + } + + private Module loadDiagnosticServices( + Settings settings, + Coordinator coordinator, + ClusterService clusterService, + TransportService transportService, + FeatureService featureService, + ThreadPool threadPool + ) { + + MasterHistoryService masterHistoryService = new MasterHistoryService(transportService, threadPool, clusterService); + CoordinationDiagnosticsService coordinationDiagnosticsService = new CoordinationDiagnosticsService( + clusterService, + transportService, + coordinator, + masterHistoryService + ); + + var serverHealthIndicatorServices = Stream.of( + new StableMasterHealthIndicatorService(coordinationDiagnosticsService, clusterService), + new RepositoryIntegrityHealthIndicatorService(clusterService), + new DiskHealthIndicatorService(clusterService), + new ShardsCapacityHealthIndicatorService(clusterService) + ); + var pluginHealthIndicatorServices = pluginsService.filterPlugins(HealthPlugin.class) + .flatMap(plugin -> plugin.getHealthIndicatorServices().stream()); + + HealthService healthService = new HealthService( + Stream.concat(serverHealthIndicatorServices, pluginHealthIndicatorServices).toList(), + threadPool + ); + HealthPeriodicLogger healthPeriodicLogger = HealthPeriodicLogger.create(settings, clusterService, client, healthService); + HealthMetadataService healthMetadataService = HealthMetadataService.create(clusterService, featureService, settings); + LocalHealthMonitor localHealthMonitor = LocalHealthMonitor.create( + settings, + clusterService, + nodeService, + threadPool, + client, + featureService + ); + HealthInfoCache nodeHealthOverview = HealthInfoCache.create(clusterService); + + return b -> { + b.bind(HealthService.class).toInstance(healthService); + b.bind(MasterHistoryService.class).toInstance(masterHistoryService); + b.bind(CoordinationDiagnosticsService.class).toInstance(coordinationDiagnosticsService); + b.bind(HealthMetadataService.class).toInstance(healthMetadataService); + b.bind(LocalHealthMonitor.class).toInstance(localHealthMonitor); + b.bind(HealthInfoCache.class).toInstance(nodeHealthOverview); + b.bind(HealthApiStats.class).toInstance(new HealthApiStats()); + b.bind(HealthPeriodicLogger.class).toInstance(healthPeriodicLogger); + }; + } + private Module loadPluginComponents(Collection pluginComponents) { List pluginLifecycleComponents = pluginComponents.stream().map(p -> { if (p instanceof PluginComponentBinding pcb) { return pcb.impl(); @@ -1199,8 +1261,34 @@ record PluginServiceInstances( return p; }).filter(p -> p instanceof LifecycleComponent).map(p -> (LifecycleComponent) p).toList(); resourcesToClose.addAll(pluginLifecycleComponents); - resourcesToClose.add(injector.getInstance(PeerRecoverySourceService.class)); - this.pluginLifecycleComponents = Collections.unmodifiableList(pluginLifecycleComponents); + this.pluginLifecycleComponents = pluginLifecycleComponents; + + return b -> pluginComponents.forEach(p -> { + if (p instanceof PluginComponentBinding pcb) { + @SuppressWarnings("unchecked") + Class clazz = (Class) pcb.inter(); + b.bind(clazz).toInstance(pcb.impl()); + } else { + @SuppressWarnings("unchecked") + Class clazz = (Class) p.getClass(); + b.bind(clazz).toInstance(p); + } + }); + } + + private void postInjection( + ClusterModule clusterModule, + ActionModule actionModule, + ClusterService clusterService, + TransportService transportService, + FeatureService featureService + ) { + // We allocate copies of existing shards by looking for a viable copy of the shard in the cluster and assigning the shard there. + // The search for viable copies is triggered by an allocation attempt (i.e. a reroute) and is performed asynchronously. When it + // completes we trigger another reroute to try the allocation again. This means there is a circular dependency: the allocation + // service needs access to the existing shards allocators (e.g. the GatewayAllocator) which need to be able to trigger a + // reroute, which needs to call into the allocation service. We close the loop here: + clusterModule.setExistingShardsAllocators(injector.getInstance(GatewayAllocator.class)); // Due to Java's type erasure with generics, the injector can't give us exactly what we need, and we have // to resort to some evil casting. @@ -1217,8 +1305,6 @@ record PluginServiceInstances( transportService.getRemoteClusterService(), namedWriteableRegistry ); - this.namedWriteableRegistry = namedWriteableRegistry; - this.xContentRegistry = xContentRegistry; logger.debug("initializing HTTP handlers ..."); actionModule.initRestHandlers(() -> clusterService.state().nodesIfRecovered(), f -> { @@ -1268,31 +1354,6 @@ private static ReloadablePlugin wrapPlugins(List reloadablePlu }; } - private HealthService createHealthService( - ClusterService clusterService, - CoordinationDiagnosticsService coordinationDiagnosticsService, - ThreadPool threadPool - ) { - var serverHealthIndicatorServices = Stream.of( - new StableMasterHealthIndicatorService(coordinationDiagnosticsService, clusterService), - new RepositoryIntegrityHealthIndicatorService(clusterService), - new DiskHealthIndicatorService(clusterService), - new ShardsCapacityHealthIndicatorService(clusterService) - ); - var pluginHealthIndicatorServices = pluginsService.filterPlugins(HealthPlugin.class) - .flatMap(plugin -> plugin.getHealthIndicatorServices().stream()); - return new HealthService(Stream.concat(serverHealthIndicatorServices, pluginHealthIndicatorServices).toList(), threadPool); - } - - private static HealthPeriodicLogger createHealthPeriodicLogger( - ClusterService clusterService, - Settings settings, - NodeClient client, - HealthService healthService - ) { - return new HealthPeriodicLogger(settings, clusterService, client, healthService); - } - private RecoveryPlannerService getRecoveryPlannerService( ThreadPool threadPool, ClusterService clusterService, diff --git a/server/src/main/java/org/elasticsearch/persistent/UpdatePersistentTaskStatusAction.java b/server/src/main/java/org/elasticsearch/persistent/UpdatePersistentTaskStatusAction.java index 6074cc0e4ea35..6c6573852700c 100644 --- a/server/src/main/java/org/elasticsearch/persistent/UpdatePersistentTaskStatusAction.java +++ b/server/src/main/java/org/elasticsearch/persistent/UpdatePersistentTaskStatusAction.java @@ -66,14 +66,26 @@ public void setTaskId(String taskId) { this.taskId = taskId; } + public String getTaskId() { + return taskId; + } + public void setAllocationId(long allocationId) { this.allocationId = allocationId; } + public long getAllocationId() { + return allocationId; + } + public void setState(PersistentTaskState state) { this.state = state; } + public PersistentTaskState getState() { + return state; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginDescriptor.java b/server/src/main/java/org/elasticsearch/plugins/PluginDescriptor.java index 13baae5950d6c..e0ee229fe1f98 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginDescriptor.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginDescriptor.java @@ -399,8 +399,8 @@ public String getVersion() { * * @return an Elasticsearch version */ - public Version getElasticsearchVersion() { - return Version.fromString(elasticsearchVersion); + public String getElasticsearchVersion() { + return elasticsearchVersion; } /** diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginsUtils.java b/server/src/main/java/org/elasticsearch/plugins/PluginsUtils.java index 4d30ca1f1a261..0533f535a19f1 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginsUtils.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginsUtils.java @@ -11,7 +11,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Build; -import org.elasticsearch.Version; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.jdk.JarHell; @@ -30,6 +30,8 @@ import java.util.Map; import java.util.Set; import java.util.function.Function; +import java.util.regex.Matcher; +import java.util.regex.Pattern; import java.util.stream.Collectors; /** @@ -77,43 +79,111 @@ public static List findPluginDirs(final Path rootPath) throws IOException * Verify the given plugin is compatible with the current Elasticsearch installation. */ public static void verifyCompatibility(PluginDescriptor info) { - if (info.isStable()) { - if (info.getElasticsearchVersion().major != Version.CURRENT.major) { - throw new IllegalArgumentException( - "Stable Plugin [" - + info.getName() - + "] was built for Elasticsearch major version " - + info.getElasticsearchVersion().major - + " but version " - + Version.CURRENT - + " is running" + final String currentVersion = Build.current().version(); + Matcher buildVersionMatcher = SemanticVersion.semanticPattern.matcher(currentVersion); + // If we're not on a semantic version, assume plugins are compatible + if (buildVersionMatcher.matches()) { + SemanticVersion currentElasticsearchSemanticVersion; + try { + currentElasticsearchSemanticVersion = new SemanticVersion( + Integer.parseInt(buildVersionMatcher.group(1)), + Integer.parseInt(buildVersionMatcher.group(2)), + Integer.parseInt(buildVersionMatcher.group(3)) ); + } catch (NumberFormatException e) { + throw new IllegalArgumentException("Couldn't parse integers from build version [" + currentVersion + "]", e); } - if (info.getElasticsearchVersion().after(Version.CURRENT)) { + if (info.isStable()) { + Matcher pluginEsVersionMatcher = SemanticVersion.semanticPattern.matcher(info.getElasticsearchVersion()); + if (pluginEsVersionMatcher.matches() == false) { + throw new IllegalArgumentException( + "Expected semantic version for plugin [" + info.getName() + "] but was [" + info.getElasticsearchVersion() + "]" + ); + } + SemanticVersion pluginElasticsearchSemanticVersion; + try { + pluginElasticsearchSemanticVersion = new SemanticVersion( + Integer.parseInt(pluginEsVersionMatcher.group(1)), + Integer.parseInt(pluginEsVersionMatcher.group(2)), + Integer.parseInt(pluginEsVersionMatcher.group(3)) + ); + } catch (NumberFormatException e) { + throw new IllegalArgumentException( + "Expected integer version for plugin [" + info.getName() + "] but found [" + info.getElasticsearchVersion() + "]", + e + ); + } + + // case: Major version mismatch + if (pluginElasticsearchSemanticVersion.major != currentElasticsearchSemanticVersion.major) { + throw new IllegalArgumentException( + "Stable Plugin [" + + info.getName() + + "] was built for Elasticsearch major version " + + pluginElasticsearchSemanticVersion.major + + " but version " + + currentVersion + + " is running" + ); + } + + // case: stable plugin from the future + if (pluginElasticsearchSemanticVersion.after(currentElasticsearchSemanticVersion)) { + throw new IllegalArgumentException( + "Stable Plugin [" + + info.getName() + + "] was built for Elasticsearch version " + + info.getElasticsearchVersion() + + " but earlier version " + + currentVersion + + " is running" + ); + } + } else if (info.getElasticsearchVersion().equals(currentVersion) == false) { throw new IllegalArgumentException( - "Stable Plugin [" + "Plugin [" + info.getName() + "] was built for Elasticsearch version " + info.getElasticsearchVersion() - + " but earlier version " - + Version.CURRENT + + " but version " + + currentVersion + " is running" ); } - } else if (info.getElasticsearchVersion().equals(Version.CURRENT) == false) { - throw new IllegalArgumentException( - "Plugin [" - + info.getName() - + "] was built for Elasticsearch version " - + info.getElasticsearchVersion() - + " but version " - + Version.CURRENT - + " is running" - ); } JarHell.checkJavaVersion(info.getName(), info.getJavaVersion()); } + private record SemanticVersion(int major, int minor, int bugfix) { + + static final Pattern semanticPattern = Pattern.compile("^(\\d+)\\.(\\d+)\\.(\\d+)$"); + + // does not compare anything after the semantic version + boolean after(SemanticVersion other) { + // major + if (this.major < other.major) { + return false; + } + if (this.major > other.major) { + return true; + } + // minor + if (this.minor < other.minor) { + return false; + } + if (this.minor > other.minor) { + return true; + } + // bugfix + return this.bugfix > other.bugfix; + } + + @Override + public String toString() { + return Strings.format("%d.%d.%d", this.major, this.minor, this.bugfix); + } + } + /** * Check for the existence of a marker file that indicates any plugins are in a garbage state from a failed attempt to remove the * plugin. diff --git a/server/src/main/java/org/elasticsearch/repositories/ShardGenerations.java b/server/src/main/java/org/elasticsearch/repositories/ShardGenerations.java index 6e8ae1956f945..e42552d3e5f3c 100644 --- a/server/src/main/java/org/elasticsearch/repositories/ShardGenerations.java +++ b/server/src/main/java/org/elasticsearch/repositories/ShardGenerations.java @@ -9,6 +9,7 @@ package org.elasticsearch.repositories; import org.elasticsearch.cluster.SnapshotsInProgress; +import org.elasticsearch.common.Strings; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; @@ -219,12 +220,22 @@ public Builder put(IndexId indexId, int shardId, SnapshotsInProgress.ShardSnapsh } public Builder put(IndexId indexId, int shardId, ShardGeneration generation) { + assert noDuplicateIndicesWithSameName(indexId); ShardGeneration existingGeneration = generations.computeIfAbsent(indexId, i -> new HashMap<>()).put(shardId, generation); assert generation != null || existingGeneration == null : "must not overwrite existing generation with null generation [" + existingGeneration + "]"; return this; } + private boolean noDuplicateIndicesWithSameName(IndexId newId) { + for (IndexId id : generations.keySet()) { + if (id.getName().equals(newId.getName()) && id.equals(newId) == false) { + assert false : Strings.format("Unable to add: %s. There's another index id with the same name: %s", newId, id); + } + } + return true; + } + public ShardGenerations build() { return new ShardGenerations(generations.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, entry -> { final Set shardIds = entry.getValue().keySet(); diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 4167717e09006..a53674882c84d 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -70,14 +70,15 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.common.util.concurrent.ListenableFuture; import org.elasticsearch.common.util.concurrent.ThrottledIterator; import org.elasticsearch.common.util.concurrent.ThrottledTaskRunner; import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.Tuple; @@ -455,6 +456,7 @@ protected void doStop() {} @Override protected void doClose() { + activityRefs.decRef(); BlobStore store; // to close blobStore if blobStore initialization is started during close synchronized (lock) { @@ -469,28 +471,14 @@ protected void doClose() { } } - // listeners to invoke when a restore completes and there are no more restores running - @Nullable - private List> emptyListeners; + private final SubscribableListener closedAndIdleListeners = new SubscribableListener<>(); - // Set of shard ids that this repository is currently restoring - private final Set ongoingRestores = new HashSet<>(); + private final RefCounted activityRefs = AbstractRefCounted.of(() -> closedAndIdleListeners.onResponse(null)); @Override public void awaitIdle() { - assert lifecycle.stoppedOrClosed(); - final PlainActionFuture future; - synchronized (ongoingRestores) { - if (ongoingRestores.isEmpty()) { - return; - } - future = new PlainActionFuture<>(); - if (emptyListeners == null) { - emptyListeners = new ArrayList<>(); - } - emptyListeners.add(future); - } - FutureUtils.get(future); + assert lifecycle.closed(); + PlainActionFuture.get(closedAndIdleListeners::addListener); } @SuppressForbidden(reason = "legacy usage of unbatched task") // TODO add support for batching here @@ -3305,30 +3293,19 @@ public void restoreShard( ); final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT); final BlobContainer container = shardContainer(indexId, snapshotShardId); - synchronized (ongoingRestores) { - if (store.isClosing()) { - restoreListener.onFailure(new AlreadyClosedException("store is closing")); - return; - } - if (lifecycle.started() == false) { - restoreListener.onFailure(new AlreadyClosedException("repository [" + metadata.name() + "] closed")); - return; - } - final boolean added = ongoingRestores.add(shardId); - assert added : "add restore for [" + shardId + "] that already has an existing restore"; + if (store.isClosing()) { + restoreListener.onFailure(new AlreadyClosedException("store is closing")); + return; } - executor.execute(ActionRunnable.wrap(ActionListener.runBefore(restoreListener, () -> { - final List> onEmptyListeners; - synchronized (ongoingRestores) { - if (ongoingRestores.remove(shardId) && ongoingRestores.isEmpty() && emptyListeners != null) { - onEmptyListeners = emptyListeners; - emptyListeners = null; - } else { - return; - } - } - ActionListener.onResponse(onEmptyListeners, null); - }), l -> { + if (lifecycle.started() == false) { + restoreListener.onFailure(new AlreadyClosedException("repository [" + metadata.name() + "] closed")); + return; + } + if (activityRefs.tryIncRef() == false) { + restoreListener.onFailure(new AlreadyClosedException("repository [" + metadata.name() + "] closing")); + return; + } + executor.execute(ActionRunnable.wrap(ActionListener.runBefore(restoreListener, activityRefs::decRef), l -> { final BlobStoreIndexShardSnapshot snapshot = loadShardSnapshot(container, snapshotId); final SnapshotFiles snapshotFiles = new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles(), null); new FileRestoreContext(metadata.name(), shardId, snapshotId, recoveryState) { diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java b/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java index 2d13af0248a73..56c975e148ab5 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java @@ -16,6 +16,7 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.ReservedStateMetadata; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.file.MasterNodeFileWatchingService; import org.elasticsearch.env.Environment; import org.elasticsearch.xcontent.XContentParserConfiguration; @@ -112,7 +113,7 @@ protected boolean shouldRefreshFileState(ClusterState clusterState) { */ @Override protected void processFileChanges() throws ExecutionException, InterruptedException, IOException { - PlainActionFuture completion = PlainActionFuture.newFuture(); + PlainActionFuture completion = new PlainActionFuture<>(); logger.info("processing path [{}] for [{}]", watchedFile(), NAMESPACE); try ( var fis = Files.newInputStream(watchedFile()); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java index 661ecf38c8523..b6e1240a3f85a 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java @@ -16,8 +16,11 @@ import org.elasticsearch.cluster.metadata.DataStreamAlias; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.logging.DeprecationCategory; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -25,6 +28,7 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.action.RestCancellableNodeClient; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -46,6 +50,8 @@ @ServerlessScope(Scope.PUBLIC) public class RestGetAliasesAction extends BaseRestHandler { + private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(RestGetAliasesAction.class); + @Override public List routes() { return List.of( @@ -201,16 +207,30 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); getAliasesRequest.indices(indices); getAliasesRequest.indicesOptions(IndicesOptions.fromRequest(request, getAliasesRequest.indicesOptions())); - getAliasesRequest.local(request.paramAsBoolean("local", getAliasesRequest.local())); + + if (request.hasParam("local")) { + // consume this param just for validation + final var localParam = request.paramAsBoolean("local", false); + if (request.getRestApiVersion() != RestApiVersion.V_7) { + DEPRECATION_LOGGER.critical( + DeprecationCategory.API, + "get-aliases-local", + "the [?local={}] query parameter to get-aliases requests has no effect and will be removed in a future version", + localParam + ); + } + } // we may want to move this logic to TransportGetAliasesAction but it is based on the original provided aliases, which will // not always be available there (they may get replaced so retrieving request.aliases is not quite the same). - return channel -> client.admin().indices().getAliases(getAliasesRequest, new RestBuilderListener<>(channel) { - @Override - public RestResponse buildResponse(GetAliasesResponse response, XContentBuilder builder) throws Exception { - return buildRestResponse(namesProvided, aliases, response.getAliases(), response.getDataStreamAliases(), builder); - } - }); + return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).admin() + .indices() + .getAliases(getAliasesRequest, new RestBuilderListener<>(channel) { + @Override + public RestResponse buildResponse(GetAliasesResponse response, XContentBuilder builder) throws Exception { + return buildRestResponse(namesProvided, aliases, response.getAliases(), response.getDataStreamAliases(), builder); + } + }); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java index 2f7468ee544bb..74eddca033398 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java @@ -46,6 +46,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC updateSettingsRequest.setPreserveExisting(request.paramAsBoolean("preserve_existing", updateSettingsRequest.isPreserveExisting())); updateSettingsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", updateSettingsRequest.masterNodeTimeout())); updateSettingsRequest.indicesOptions(IndicesOptions.fromRequest(request, updateSettingsRequest.indicesOptions())); + updateSettingsRequest.reopen(request.paramAsBoolean("reopen", false)); updateSettingsRequest.fromXContent(request.contentParser()); return channel -> client.admin().indices().updateSettings(updateSettingsRequest, new RestToXContentListener<>(channel)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestAliasAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestAliasAction.java index b8f083115614f..dc99b970864b2 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestAliasAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestAliasAction.java @@ -14,10 +14,14 @@ import org.elasticsearch.cluster.metadata.AliasMetadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.Table; +import org.elasticsearch.common.logging.DeprecationCategory; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; +import org.elasticsearch.rest.action.RestCancellableNodeClient; import org.elasticsearch.rest.action.RestResponseListener; import java.util.List; @@ -28,6 +32,8 @@ @ServerlessScope(Scope.PUBLIC) public class RestAliasAction extends AbstractCatAction { + private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(RestAliasAction.class); + @Override public List routes() { return List.of(new Route(GET, "/_cat/aliases"), new Route(GET, "/_cat/aliases/{alias}")); @@ -49,15 +55,29 @@ protected RestChannelConsumer doCatRequest(final RestRequest request, final Node ? new GetAliasesRequest(Strings.commaDelimitedListToStringArray(request.param("alias"))) : new GetAliasesRequest(); getAliasesRequest.indicesOptions(IndicesOptions.fromRequest(request, getAliasesRequest.indicesOptions())); - getAliasesRequest.local(request.paramAsBoolean("local", getAliasesRequest.local())); - return channel -> client.admin().indices().getAliases(getAliasesRequest, new RestResponseListener(channel) { - @Override - public RestResponse buildResponse(GetAliasesResponse response) throws Exception { - Table tab = buildTable(request, response); - return RestTable.buildResponse(tab, channel); + if (request.hasParam("local")) { + // consume this param just for validation + final var localParam = request.paramAsBoolean("local", false); + if (request.getRestApiVersion() != RestApiVersion.V_7) { + DEPRECATION_LOGGER.critical( + DeprecationCategory.API, + "cat-aliases-local", + "the [?local={}] query parameter to cat-aliases requests has no effect and will be removed in a future version", + localParam + ); } - }); + } + + return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).admin() + .indices() + .getAliases(getAliasesRequest, new RestResponseListener<>(channel) { + @Override + public RestResponse buildResponse(GetAliasesResponse response) throws Exception { + Table tab = buildTable(request, response); + return RestTable.buildResponse(tab, channel); + } + }); } @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java index e2767e45f4858..2dc657582a0a1 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java @@ -92,7 +92,7 @@ protected Table getTableWithHeader(final RestRequest request) { table.addCell("host", "alias:h;desc:host of node"); table.addCell("ip", "desc:ip of node"); table.addCell("node", "alias:n;desc:name of node"); - table.addCell("node.role", "default:false;alias:r,role,nodeRole;desc:node roles"); + table.addCell("node.role", "alias:r,role,nodeRole;desc:node roles"); table.endHeaders(); return table; } diff --git a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java index 4c1df376ebf63..a4f641fd6f071 100644 --- a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java @@ -468,11 +468,6 @@ public boolean sourceRequested() { return fetchSourceContext != null && fetchSourceContext.fetchSource(); } - @Override - public boolean hasFetchSourceContext() { - return fetchSourceContext != null; - } - @Override public FetchSourceContext fetchSourceContext() { return this.fetchSourceContext; diff --git a/server/src/main/java/org/elasticsearch/search/MultiValueMode.java b/server/src/main/java/org/elasticsearch/search/MultiValueMode.java index 9137d5c97248d..2b5d9cb17b4f4 100644 --- a/server/src/main/java/org/elasticsearch/search/MultiValueMode.java +++ b/server/src/main/java/org/elasticsearch/search/MultiValueMode.java @@ -543,7 +543,7 @@ public int docID() { } @Override - public long longValue() throws IOException { + public long longValue() { return value; } }; @@ -571,7 +571,6 @@ public NumericDocValues select( final long missingValue, final BitSet parentDocs, final DocIdSetIterator childDocs, - int maxDoc, int maxChildren ) throws IOException { if (parentDocs == null || childDocs == null) { @@ -654,7 +653,7 @@ public boolean advanceExact(int target) throws IOException { } @Override - public double doubleValue() throws IOException { + public double doubleValue() { return this.value; } }; @@ -804,7 +803,6 @@ public BinaryDocValues select( final BytesRef missingValue, final BitSet parentDocs, final DocIdSetIterator childDocs, - int maxDoc, int maxChildren ) throws IOException { if (parentDocs == null || childDocs == null) { diff --git a/server/src/main/java/org/elasticsearch/search/SearchHit.java b/server/src/main/java/org/elasticsearch/search/SearchHit.java index 9fa99bb4a773f..6c04f6feddc96 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchHit.java +++ b/server/src/main/java/org/elasticsearch/search/SearchHit.java @@ -1001,7 +1001,7 @@ private static Map parseHighlightFields(XContentParser p Map highlightFields = new HashMap<>(); while ((parser.nextToken()) != XContentParser.Token.END_OBJECT) { HighlightField highlightField = HighlightField.fromXContent(parser); - highlightFields.put(highlightField.getName(), highlightField); + highlightFields.put(highlightField.name(), highlightField); } return highlightFields; } diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 44a8f641fae91..73350d60b256c 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -739,7 +739,7 @@ public void executeQueryPhase( SearchOperationListenerExecutor executor = new SearchOperationListenerExecutor(searchContext) ) { searchContext.searcher().setAggregatedDfs(readerContext.getAggregatedDfs(null)); - processScroll(request, readerContext, searchContext); + processScroll(request, searchContext); QueryPhase.execute(searchContext); executor.success(); readerContext.setRescoreDocIds(searchContext.rescoreDocIds()); @@ -772,7 +772,8 @@ public void executeQueryPhase(QuerySearchRequest request, SearchShardTask task, ) { searchContext.searcher().setAggregatedDfs(request.dfs()); QueryPhase.execute(searchContext); - if (searchContext.queryResult().hasSearchContext() == false && readerContext.singleSession()) { + final QuerySearchResult queryResult = searchContext.queryResult(); + if (queryResult.hasSearchContext() == false && readerContext.singleSession()) { // no hits, we can release the context since there will be no fetch phase freeReaderContext(readerContext.id()); } @@ -781,10 +782,10 @@ public void executeQueryPhase(QuerySearchRequest request, SearchShardTask task, // and receive them back in the fetch phase. // We also pass the rescoreDocIds to the LegacyReaderContext in case the search state needs to stay in the data node. final RescoreDocIds rescoreDocIds = searchContext.rescoreDocIds(); - searchContext.queryResult().setRescoreDocIds(rescoreDocIds); + queryResult.setRescoreDocIds(rescoreDocIds); readerContext.setRescoreDocIds(rescoreDocIds); - searchContext.queryResult().incRef(); - return searchContext.queryResult(); + queryResult.incRef(); + return queryResult; } catch (Exception e) { assert TransportActions.isShardNotAvailableException(e) == false : new AssertionError(e); logger.trace("Query phase failed", e); @@ -830,7 +831,7 @@ public void executeFetchPhase( ) { searchContext.assignRescoreDocIds(readerContext.getRescoreDocIds(null)); searchContext.searcher().setAggregatedDfs(readerContext.getAggregatedDfs(null)); - processScroll(request, readerContext, searchContext); + processScroll(request, searchContext); searchContext.addQueryResult(); QueryPhase.execute(searchContext); final long afterQueryTime = executor.success(); @@ -1321,11 +1322,9 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc context.addQuerySearchResultReleasable(aggContext); try { final AggregatorFactories factories = source.aggregations().build(aggContext, null); - final Supplier supplier = () -> aggReduceContextBuilder( - context::isCancelled, - source.aggregations() + context.aggregations( + new SearchContextAggregations(factories, () -> aggReduceContextBuilder(context::isCancelled, source.aggregations())) ); - context.aggregations(new SearchContextAggregations(factories, supplier)); } catch (IOException e) { throw new AggregationInitializationException("Failed to create aggregators", e); } @@ -1506,7 +1505,7 @@ private static void shortcutDocIdsToLoad(SearchContext context) { context.docIdsToLoad(docIdsToLoad); } - private static void processScroll(InternalScrollSearchRequest request, ReaderContext reader, SearchContext context) { + private static void processScroll(InternalScrollSearchRequest request, SearchContext context) { // process scroll context.from(context.from() + context.size()); context.scrollContext().scroll = request.scroll(); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AdaptingAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/AdaptingAggregator.java index 0be4e7f729bbf..f96b732b9464f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AdaptingAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AdaptingAggregator.java @@ -130,10 +130,6 @@ public void collectDebugInfo(BiConsumer add) { add.accept("delegate_debug", delegateDebug); } - public Aggregator delegate() { - return delegate; - } - @Override public String toString() { return name(); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilders.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilders.java index 3f998bffd1860..d78567f3effdb 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilders.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilders.java @@ -75,7 +75,6 @@ import org.elasticsearch.search.aggregations.metrics.TopHitsAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.ValueCount; import org.elasticsearch.search.aggregations.metrics.ValueCountAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.WeightedAvgAggregationBuilder; import java.util.List; @@ -100,13 +99,6 @@ public static AvgAggregationBuilder avg(String name) { return new AvgAggregationBuilder(name); } - /** - * Create a new {@link Avg} aggregation with the given name. - */ - public static WeightedAvgAggregationBuilder weightedAvg(String name) { - return new WeightedAvgAggregationBuilder(name); - } - /** * Create a new {@link Max} aggregation with the given name. */ diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java index 98c131213b3fe..0738303020de5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java @@ -366,7 +366,6 @@ public Builder addPipelineAggregator(PipelineAggregationBuilder pipelineAggregat public ActionRequestValidationException validate(ActionRequestValidationException e) { PipelineAggregationBuilder.ValidationContext context = PipelineAggregationBuilder.ValidationContext.forTreeRoot( aggregationBuilders, - pipelineAggregatorBuilders, e ); validatePipelines(context); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java index bf74494e872bb..15b5c29589227 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java @@ -64,8 +64,6 @@ public String name() { return name; } - public void doValidate() {} - protected abstract Aggregator createInternal(Aggregator parent, CardinalityUpperBound cardinality, Map metadata) throws IOException; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java b/server/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java index c9f937b489a73..ff1ca58d351e3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java @@ -26,6 +26,18 @@ public abstract class InternalMultiBucketAggregation< A extends InternalMultiBucketAggregation, B extends InternalMultiBucketAggregation.InternalBucket> extends InternalAggregation implements MultiBucketsAggregation { + /** + * When we pre-count the empty buckets we report them periodically + * because you can configure the date_histogram to create an astounding + * number of buckets. It'd take a while to count that high only to abort. + * So we report every couple thousand buckets. It's be simpler to report + * every single bucket we plan to allocate one at a time but that'd cause + * needless overhead on the circuit breakers. Counting a couple thousand + * buckets is plenty fast to fail this quickly in pathological cases and + * plenty large to keep the overhead minimal. + */ + protected static final int REPORT_EMPTY_EVERY = 10_000; + public InternalMultiBucketAggregation(String name, Map metadata) { super(name, metadata); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/PipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/PipelineAggregationBuilder.java index c43eb252a5364..6ce5b32864f18 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/PipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/PipelineAggregationBuilder.java @@ -74,10 +74,9 @@ public abstract static class ValidationContext { */ public static ValidationContext forTreeRoot( Collection siblingAggregations, - Collection siblingPipelineAggregations, ActionRequestValidationException validationFailuresSoFar ) { - return new ForTreeRoot(siblingAggregations, siblingPipelineAggregations, validationFailuresSoFar); + return new ForTreeRoot(siblingAggregations, validationFailuresSoFar); } /** @@ -95,16 +94,10 @@ private ValidationContext(ActionRequestValidationException validationFailuresSoF private static class ForTreeRoot extends ValidationContext { private final Collection siblingAggregations; - private final Collection siblingPipelineAggregations; - ForTreeRoot( - Collection siblingAggregations, - Collection siblingPipelineAggregations, - ActionRequestValidationException validationFailuresSoFar - ) { + ForTreeRoot(Collection siblingAggregations, ActionRequestValidationException validationFailuresSoFar) { super(validationFailuresSoFar); this.siblingAggregations = Objects.requireNonNull(siblingAggregations); - this.siblingPipelineAggregations = Objects.requireNonNull(siblingPipelineAggregations); } @Override @@ -112,11 +105,6 @@ public Collection getSiblingAggregations() { return siblingAggregations; } - @Override - public Collection getSiblingPipelineAggregations() { - return siblingPipelineAggregations; - } - @Override public void validateHasParent(String type, String name) { addValidationError(type + " aggregation [" + name + "] must be declared inside of another aggregation"); @@ -155,11 +143,6 @@ public Collection getSiblingAggregations() { return parent.getSubAggregations(); } - @Override - public Collection getSiblingPipelineAggregations() { - return parent.getPipelineAggregations(); - } - @Override public void validateHasParent(String type, String name) { // There is a parent inside the tree. @@ -181,11 +164,6 @@ public void validateParentAggSequentiallyOrderedWithoutSkips(String type, String */ public abstract Collection getSiblingAggregations(); - /** - * Pipeline aggregations that are siblings to the aggregation being validated. - */ - public abstract Collection getSiblingPipelineAggregations(); - /** * Add a validation error to this context. All validation errors * are accumulated in a list and, if there are any, the request diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/PipelineAggregatorBuilders.java b/server/src/main/java/org/elasticsearch/search/aggregations/PipelineAggregatorBuilders.java index f280eb4de61bb..61da00241a4ea 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/PipelineAggregatorBuilders.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/PipelineAggregatorBuilders.java @@ -11,7 +11,6 @@ import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.pipeline.AvgBucketPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.BucketScriptPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.CumulativeSumPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.ExtendedStatsBucketPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.MaxBucketPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.MinBucketPipelineAggregationBuilder; @@ -62,10 +61,6 @@ public static BucketScriptPipelineAggregationBuilder bucketScript(String name, S return new BucketScriptPipelineAggregationBuilder(name, script, bucketsPaths); } - public static CumulativeSumPipelineAggregationBuilder cumulativeSum(String name, String bucketsPath) { - return new CumulativeSumPipelineAggregationBuilder(name, bucketsPath); - } - public static SerialDiffPipelineAggregationBuilder diff(String name, String bucketsPath) { return new SerialDiffPipelineAggregationBuilder(name, bucketsPath); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/IteratorAndCurrent.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/IteratorAndCurrent.java index af8757e10ccf7..37c5f49dc55fe 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/IteratorAndCurrent.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/IteratorAndCurrent.java @@ -21,10 +21,6 @@ public IteratorAndCurrent(Iterator iterator) { this.current = iterator.next(); } - public Iterator getIterator() { - return iterator; - } - public B current() { return current; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/BinaryValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/BinaryValuesSource.java index 58fd7f85f6076..62f587f5249d1 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/BinaryValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/BinaryValuesSource.java @@ -48,7 +48,7 @@ class BinaryValuesSource extends SingleDimensionValuesSource { int size, int reverseMul ) { - super(bigArrays, format, fieldType, missingBucket, missingOrder, size, reverseMul); + super(bigArrays, format, fieldType, missingBucket, missingOrder, reverseMul); this.breakerConsumer = breakerConsumer; this.docValuesFunc = docValuesFunc; this.values = bigArrays.newObjectArray(Math.min(size, 100)); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java index 2cd33e470e3e5..d31d3a18b3567 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java @@ -212,13 +212,6 @@ public ZoneId timeZone() { return timeZone; } - /** - * Get the offset to use when rounding, which is a number of milliseconds. - */ - public long offset() { - return offset; - } - /** * Set the offset on this builder, which is a number of milliseconds. * @return this for chaining diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DoubleValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DoubleValuesSource.java index 2dc48b7ce0e2d..752c4ecf97401 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DoubleValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DoubleValuesSource.java @@ -43,7 +43,7 @@ class DoubleValuesSource extends SingleDimensionValuesSource { int size, int reverseMul ) { - super(bigArrays, format, fieldType, missingBucket, missingOrder, size, reverseMul); + super(bigArrays, format, fieldType, missingBucket, missingOrder, reverseMul); this.docValuesFunc = docValuesFunc; this.bits = this.missingBucket ? new BitArray(100, bigArrays) : null; boolean success = false; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/GlobalOrdinalValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/GlobalOrdinalValuesSource.java index 855b456546314..f833bb39b3b56 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/GlobalOrdinalValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/GlobalOrdinalValuesSource.java @@ -76,7 +76,7 @@ class GlobalOrdinalValuesSource extends SingleDimensionValuesSource { int size, int reverseMul ) { - super(bigArrays, format, type, missingBucket, missingOrder, size, reverseMul); + super(bigArrays, format, type, missingBucket, missingOrder, reverseMul); this.uniqueValueCount = uniqueValueCount; this.docValuesFunc = docValuesFunc; this.values = bigArrays.newLongArray(Math.min(size, 100), false); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java index e3ca337ef8a8c..9d17db7a77864 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java @@ -147,13 +147,6 @@ public String type() { return TYPE; } - /** - * Returns the interval that is set on this source - **/ - public double interval() { - return interval; - } - /** * Sets the interval on this source. **/ diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java index 7613d926d3c6d..3d79509ad9377 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java @@ -59,7 +59,7 @@ class LongValuesSource extends SingleDimensionValuesSource { int size, int reverseMul ) { - super(bigArrays, format, fieldType, missingBucket, missingOrder, size, reverseMul); + super(bigArrays, format, fieldType, missingBucket, missingOrder, reverseMul); this.bigArrays = bigArrays; this.docValuesFunc = docValuesFunc; this.rounding = rounding; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/SingleDimensionValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/SingleDimensionValuesSource.java index bd3c8b7eb322c..6376c5334d7b5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/SingleDimensionValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/SingleDimensionValuesSource.java @@ -31,8 +31,6 @@ abstract class SingleDimensionValuesSource> implements R protected final MappedFieldType fieldType; protected final boolean missingBucket; protected final MissingOrder missingOrder; - - protected final int size; protected final int reverseMul; protected T afterValue; @@ -45,7 +43,6 @@ abstract class SingleDimensionValuesSource> implements R * @param fieldType The field type or null if the source is a script. * @param missingBucket If true, an explicit `null bucket represents documents with missing values. * @param missingOrder How to order missing buckets if missingBucket is true. - * @param size The number of values to record. * @param reverseMul -1 if the natural order ({@link SortOrder#ASC} should be reversed. */ SingleDimensionValuesSource( @@ -54,7 +51,6 @@ abstract class SingleDimensionValuesSource> implements R @Nullable MappedFieldType fieldType, boolean missingBucket, MissingOrder missingOrder, - int size, int reverseMul ) { this.bigArrays = bigArrays; @@ -62,7 +58,6 @@ abstract class SingleDimensionValuesSource> implements R this.fieldType = fieldType; this.missingBucket = missingBucket; this.missingOrder = missingOrder; - this.size = size; this.reverseMul = reverseMul; this.afterValue = null; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java index 81678404d1dab..c96fe5ad550f4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java @@ -198,13 +198,6 @@ public FiltersAggregationBuilder keyedBucket(boolean keyedBucket) { return this; } - /** - * Get whether to return keyed bucket in array - */ - public boolean keyedBucket() { - return keyedBucket; - } - @Override public BucketCardinality bucketCardinality() { return BucketCardinality.MANY; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java index 882b4960dd36c..ff9495ca4d825 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java @@ -151,10 +151,6 @@ public GeoGridAggregationBuilder size(int size) { return this; } - public int size() { - return requiredSize; - } - public GeoGridAggregationBuilder shardSize(int shardSize) { if (shardSize <= 0) { throw new IllegalArgumentException("[shardSize] must be greater than 0. Found [" + shardSize + "] in [" + name + "]"); @@ -163,10 +159,6 @@ public GeoGridAggregationBuilder shardSize(int shardSize) { return this; } - public int shardSize() { - return shardSize; - } - public GeoGridAggregationBuilder setGeoBoundingBox(GeoBoundingBox geoBoundingBox) { this.geoBoundingBox = geoBoundingBox; // no validation done here, similar to geo_bounding_box query behavior. diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java index e0edebd7e5201..faeb569688994 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java @@ -110,11 +110,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java index b7532bdcb4e5b..72d3ee2267a87 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java @@ -106,11 +106,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.V_7_0_0; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java index ebbb290e1db9c..20223f6f92524 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java @@ -290,11 +290,6 @@ public DateHistogramAggregationBuilder extendedBounds(LongBounds extendedBounds) return this; } - /** Return hard bounds for this histogram, or {@code null} if none are set. */ - public LongBounds hardBounds() { - return hardBounds; - } - /** Set hard bounds on this histogram, specifying boundaries outside which buckets cannot be created. */ public DateHistogramAggregationBuilder hardBounds(LongBounds hardBounds) { if (hardBounds == null) { @@ -406,11 +401,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override protected ValuesSourceAggregatorFactory innerBuild( AggregationContext context, diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java index b62e3c9e91f6f..1a75766c40a6b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java @@ -117,10 +117,6 @@ public DateHistogramAggregatorFactory( this.rounding = rounding; } - public long minDocCount() { - return minDocCount; - } - @Override protected Aggregator doCreateInternal(Aggregator parent, CardinalityUpperBound cardinality, Map metadata) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java index 7218bef0b9d9b..2371506082f1b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java @@ -26,7 +26,6 @@ import java.io.IOException; import java.time.ZoneId; -import java.util.Locale; import java.util.Objects; import static org.elasticsearch.core.RestApiVersion.equalTo; @@ -59,10 +58,6 @@ public enum IntervalTypeEnum implements Writeable { @Deprecated LEGACY_DATE_HISTO(null); - public static IntervalTypeEnum fromString(String name) { - return valueOf(name.trim().toUpperCase(Locale.ROOT)); - } - public static IntervalTypeEnum fromStream(StreamInput in) throws IOException { return in.readEnum(IntervalTypeEnum.class); } @@ -72,10 +67,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeEnum(this); } - public String value() { - return name().toLowerCase(Locale.ROOT); - } - public boolean isValid() { // I'm being a little cheeky here and just reusing the name for signaling invlaid choices too return this.preferredName != null; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java index 0740557a526d3..fa2c5dc219cba 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java @@ -216,10 +216,6 @@ public double maxBound() { return DoubleBounds.getEffectiveMax(extendedBounds); } - protected DoubleBounds extendedBounds() { - return extendedBounds; - } - /** * Set extended bounds on this builder: buckets between {@code minBound} and * {@code maxBound} will be created even if no documents fell into these @@ -364,11 +360,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override protected ValuesSourceAggregatorFactory innerBuild( AggregationContext context, diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java index d0ec504f0aa2b..ca61a5f4ddcf6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java @@ -84,10 +84,6 @@ public HistogramAggregatorFactory( this.hardBounds = hardBounds; } - public long minDocCount() { - return minDocCount; - } - @Override protected Aggregator doCreateInternal(Aggregator parent, CardinalityUpperBound cardinality, Map metadata) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java index 4eaec7034b7f4..4ffc9abdc2202 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java @@ -269,10 +269,6 @@ public List getBuckets() { return Collections.unmodifiableList(buckets); } - DocValueFormat getFormatter() { - return format; - } - long getMinDocCount() { return minDocCount; } @@ -377,18 +373,6 @@ protected Bucket reduceBucket(List buckets, AggregationReduceContext con return createBucket(buckets.get(0).key, docCount, aggs); } - /** - * When we pre-count the empty buckets we report them periodically - * because you can configure the date_histogram to create an astounding - * number of buckets. It'd take a while to count that high only to abort. - * So we report every couple thousand buckets. It's be simpler to report - * every single bucket we plan to allocate one at a time but that'd cause - * needless overhead on the circuit breakers. Counting a couple thousand - * buckets is plenty fast to fail this quickly in pathological cases and - * plenty large to keep the overhead minimal. - */ - private static final int REPORT_EMPTY_EVERY = 10_000; - private void addEmptyBuckets(List list, AggregationReduceContext reduceContext) { /* * Make sure we have space for the empty buckets we're going to add by diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java index caef13221b0f3..6ce723d12db26 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java @@ -291,10 +291,11 @@ protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurrent(histogram.buckets.iterator())); + pq.add(new IteratorAndCurrent<>(histogram.buckets.iterator())); } } + int consumeBucketCount = 0; List reducedBuckets = new ArrayList<>(); if (pq.size() > 0) { // list of buckets coming from different shards that have the same key @@ -310,6 +311,10 @@ protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurrent= minDocCount || reduceContext.isFinalReduce() == false) { reducedBuckets.add(reduced); + if (consumeBucketCount++ >= REPORT_EMPTY_EVERY) { + reduceContext.consumeBucketsAndMaybeBreak(consumeBucketCount); + consumeBucketCount = 0; + } } currentBuckets.clear(); key = top.current().key; @@ -330,10 +335,15 @@ protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurrent= minDocCount || reduceContext.isFinalReduce() == false) { reducedBuckets.add(reduced); + if (consumeBucketCount++ >= REPORT_EMPTY_EVERY) { + reduceContext.consumeBucketsAndMaybeBreak(consumeBucketCount); + consumeBucketCount = 0; + } } } } + reduceContext.consumeBucketsAndMaybeBreak(consumeBucketCount); return reducedBuckets; } @@ -358,18 +368,6 @@ private double round(double key) { return Math.floor((key - emptyBucketInfo.offset) / emptyBucketInfo.interval) * emptyBucketInfo.interval + emptyBucketInfo.offset; } - /** - * When we pre-count the empty buckets we report them periodically - * because you can configure the histogram to create more buckets than - * there are atoms in the universe. It'd take a while to count that high - * only to abort. So we report every couple thousand buckets. It's be - * simpler to report every single bucket we plan to allocate one at a time - * but that'd cause needless overhead on the circuit breakers. Counting a - * couple thousand buckets is plenty fast to fail this quickly in - * pathological cases and plenty large to keep the overhead minimal. - */ - private static final int REPORT_EMPTY_EVERY = 10_000; - private void addEmptyBuckets(List list, AggregationReduceContext reduceContext) { /* * Make sure we have space for the empty buckets we're going to add by @@ -377,7 +375,7 @@ private void addEmptyBuckets(List list, AggregationReduceContext reduceC * consumeBucketsAndMaybeBreak. */ class Counter implements DoubleConsumer { - private int size = list.size(); + private int size = 0; @Override public void accept(double key) { @@ -456,11 +454,9 @@ private void iterateEmptyBuckets(List list, ListIterator iter, D @Override public InternalAggregation reduce(List aggregations, AggregationReduceContext reduceContext) { List reducedBuckets = reduceBuckets(aggregations, reduceContext); - boolean alreadyAccountedForBuckets = false; if (reduceContext.isFinalReduce()) { if (minDocCount == 0) { addEmptyBuckets(reducedBuckets, reduceContext); - alreadyAccountedForBuckets = true; } if (InternalOrder.isKeyDesc(order)) { // we just need to reverse here... @@ -474,9 +470,6 @@ public InternalAggregation reduce(List aggregations, Aggreg CollectionUtil.introSort(reducedBuckets, order.comparator()); } } - if (false == alreadyAccountedForBuckets) { - reduceContext.consumeBucketsAndMaybeBreak(reducedBuckets.size()); - } return new InternalHistogram(getName(), reducedBuckets, order, minDocCount, emptyBucketInfo, format, keyed, getMetadata()); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java index 9f464fa1b23cb..5686c0ea11dfa 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java @@ -187,10 +187,6 @@ public int compareKey(InternalVariableWidthHistogram.Bucket other) { return Double.compare(centroid, other.centroid); // Use centroid for bucket ordering } - public DocValueFormat getFormatter() { - return format; - } - Bucket finalizeSampling(SamplingContext samplingContext) { return new Bucket( centroid, @@ -282,10 +278,6 @@ public List getBuckets() { return Collections.unmodifiableList(buckets); } - DocValueFormat getFormatter() { - return format; - } - public int getTargetBuckets() { return targetNumBuckets; } @@ -525,7 +517,7 @@ private void mergeBucketsWithSameMin(List buckets, AggregationReduceCont * * After this adjustment, A will contain more values than indicated and B will have less. */ - private static void adjustBoundsForOverlappingBuckets(List buckets, AggregationReduceContext reduceContext) { + private static void adjustBoundsForOverlappingBuckets(List buckets) { for (int i = 1; i < buckets.size(); i++) { Bucket curBucket = buckets.get(i); Bucket prevBucket = buckets.get(i - 1); @@ -545,7 +537,7 @@ public InternalAggregation reduce(List aggregations, Aggreg if (reduceContext.isFinalReduce()) { buckets.sort(Comparator.comparing(Bucket::min)); mergeBucketsWithSameMin(reducedBuckets, reduceContext); - adjustBoundsForOverlappingBuckets(reducedBuckets, reduceContext); + adjustBoundsForOverlappingBuckets(reducedBuckets); } return new InternalVariableWidthHistogram(getName(), reducedBuckets, emptyBucketInfo, targetNumBuckets, format, metadata); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedVariableWidthHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedVariableWidthHistogram.java index cc29ce21c2507..ba33373354f3e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedVariableWidthHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedVariableWidthHistogram.java @@ -93,16 +93,6 @@ public double getMin() { return min; } - public String getMinAsString() { - if (minAsString != null) { - return minAsString; - } - if (min != null) { - return Double.toString(min); - } - return null; - } - public void setMax(Double max) { this.max = max; } @@ -115,16 +105,6 @@ public double getMax() { return max; } - public String getMaxAsString() { - if (maxAsString != null) { - return maxAsString; - } - if (max != null) { - return Double.toString(max); - } - return null; - } - static ParsedBucket fromXContent(XContentParser parser, boolean keyed) throws IOException { final ParsedBucket bucket = new ParsedBucket(); bucket.setKeyed(keyed); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregationBuilder.java index f7870df45648e..cc7619070b96a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregationBuilder.java @@ -114,10 +114,6 @@ public VariableWidthHistogramAggregationBuilder setInitialBuffer(int initialBuff return this; } - public int getNumBuckets() { - return numBuckets; - } - public int getShardSize() { if (shardSize == -1) { return numBuckets * 50; @@ -236,11 +232,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.V_7_9_0; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregator.java index d89d9b07e57bc..516c9d91a7b65 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregator.java @@ -179,7 +179,7 @@ private class MergeBucketsPhase extends CollectionPhase { * Sorts the indices of values by their underlying value * This will produce a merge map whose application will sort values */ - private class ClusterSorter extends InPlaceMergeSorter { + private static class ClusterSorter extends InPlaceMergeSorter { final DoubleArray values; final long[] indexes; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregationBuilder.java index 915d7c32b4c74..f7c190b443a79 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregationBuilder.java @@ -114,11 +114,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregationBuilder.java index f4ba3db383586..9df53a0cfe826 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregationBuilder.java @@ -69,13 +69,6 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeString(path); } - /** - * Get the path to use for this nested aggregation. - */ - public String path() { - return path; - } - @Override public BucketCardinality bucketCardinality() { return BucketCardinality.ONE; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregationBuilder.java index e6c4e59bf3f93..71e6c6ace203d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregationBuilder.java @@ -72,13 +72,6 @@ public ReverseNestedAggregationBuilder path(String path) { return this; } - /** - * Get the path to use for this nested aggregation. - */ - public String path() { - return path; - } - @Override public BucketCardinality bucketCardinality() { return BucketCardinality.ONE; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java index d64e016be7351..789f936359dfa 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java @@ -33,7 +33,6 @@ public class ReverseNestedAggregator extends BucketsAggregator implements Single static final ParseField PATH_FIELD = new ParseField("path"); - private final Query parentFilter; private final BitSetProducer parentBitsetProducer; public ReverseNestedAggregator( @@ -46,6 +45,7 @@ public ReverseNestedAggregator( Map metadata ) throws IOException { super(name, factories, context, parent, cardinality, metadata); + Query parentFilter; if (objectMapper == null) { parentFilter = Queries.newNonNestedFilter(context.getIndexSettings().getIndexVersionCreated()); } else { @@ -102,7 +102,4 @@ public InternalAggregation buildEmptyAggregation() { return new InternalReverseNested(name, 0, buildEmptySubAggregations(), metadata()); } - Query getParentFilter() { - return parentFilter; - } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/IpPrefixAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/IpPrefixAggregationBuilder.java index 733a306a70c75..a64c1221698c2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/IpPrefixAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/IpPrefixAggregationBuilder.java @@ -188,11 +188,6 @@ protected void innerWriteTo(StreamOutput out) throws IOException { out.writeBoolean(keyed); } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override protected ValuesSourceType defaultValueSourceType() { return CoreValuesSourceType.IP; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/IpPrefixAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/IpPrefixAggregator.java index d7e807cc7cc6b..eb8b0f95047b9 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/IpPrefixAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/IpPrefixAggregator.java @@ -53,22 +53,6 @@ public IpPrefix(boolean isIpv6, int prefixLength, boolean appendPrefixLength, By this.netmask = netmask; } - public boolean isIpv6() { - return isIpv6; - } - - public int getPrefixLength() { - return prefixLength; - } - - public boolean appendPrefixLength() { - return appendPrefixLength; - } - - public BytesRef getNetmask() { - return netmask; - } - @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java index a084f251693a4..a8476071ee52d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java @@ -17,7 +17,6 @@ import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; -import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry; import java.io.IOException; import java.util.Map; @@ -27,12 +26,10 @@ public class AbstractRangeAggregatorFactory extends ValuesSourc private final InternalRange.Factory rangeFactory; private final R[] ranges; private final boolean keyed; - private final ValuesSourceRegistry.RegistryKey registryKey; private final RangeAggregatorSupplier aggregatorSupplier; public AbstractRangeAggregatorFactory( String name, - ValuesSourceRegistry.RegistryKey registryKey, ValuesSourceConfig config, R[] ranges, boolean keyed, @@ -47,7 +44,6 @@ public AbstractRangeAggregatorFactory( this.ranges = ranges; this.keyed = keyed; this.rangeFactory = rangeFactory; - this.registryKey = registryKey; this.aggregatorSupplier = aggregatorSupplier; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregationBuilder.java index 1dfb7a8dac2f5..b6462f0f17bad 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregationBuilder.java @@ -124,11 +124,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; @@ -227,27 +222,6 @@ public DateRangeAggregationBuilder addRange(double from, double to) { return addRange(null, from, to); } - /** - * Add a new range with no lower bound. - * - * @param key - * the key to use for this range in the response - * @param to - * the upper bound on the dates, exclusive - */ - public DateRangeAggregationBuilder addUnboundedTo(String key, double to) { - addRange(new RangeAggregator.Range(key, null, to)); - return this; - } - - /** - * Same as {@link #addUnboundedTo(String, double)} but the key will be - * computed automatically. - */ - public DateRangeAggregationBuilder addUnboundedTo(double to) { - return addUnboundedTo(null, to); - } - /** * Add a new range with no upper bound. * diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregatorFactory.java index b0661811c5932..393c732409a91 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregatorFactory.java @@ -30,19 +30,7 @@ public DateRangeAggregatorFactory( RangeAggregatorSupplier aggregatorSupplier ) throws IOException { - super( - name, - DateRangeAggregationBuilder.REGISTRY_KEY, - config, - ranges, - keyed, - rangeFactory, - context, - parent, - subFactoriesBuilder, - metadata, - aggregatorSupplier - ); + super(name, config, ranges, keyed, rangeFactory, context, parent, subFactoriesBuilder, metadata, aggregatorSupplier); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java index a44d92f024e46..dc0b42f507d84 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java @@ -401,11 +401,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - public GeoDistanceAggregationBuilder unit(DistanceUnit unit) { if (unit == null) { throw new IllegalArgumentException("[unit] must not be null: [" + name + "]"); @@ -414,10 +409,6 @@ public GeoDistanceAggregationBuilder unit(DistanceUnit unit) { return this; } - public DistanceUnit unit() { - return unit; - } - public GeoDistanceAggregationBuilder distanceType(GeoDistance distanceType) { if (distanceType == null) { throw new IllegalArgumentException("[distanceType] must not be null: [" + name + "]"); @@ -426,19 +417,11 @@ public GeoDistanceAggregationBuilder distanceType(GeoDistance distanceType) { return this; } - public GeoDistance distanceType() { - return distanceType; - } - public GeoDistanceAggregationBuilder keyed(boolean keyed) { this.keyed = keyed; return this; } - public boolean keyed() { - return keyed; - } - @Override public BucketCardinality bucketCardinality() { return BucketCardinality.MANY; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalDateRange.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalDateRange.java index 999d37e1fe65a..c99abc4eb904b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalDateRange.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalDateRange.java @@ -11,7 +11,6 @@ import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.aggregations.support.ValueType; import java.io.IOException; import java.time.Instant; @@ -71,25 +70,9 @@ private Double internalGetTo() { return to; } - @Override - protected InternalRange.Factory getFactory() { - return FACTORY; - } - - boolean keyed() { - return keyed; - } - - DocValueFormat format() { - return format; - } } public static class Factory extends InternalRange.Factory { - @Override - public ValueType getValueType() { - return ValueType.DATE; - } @Override public InternalDateRange create( diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalGeoDistance.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalGeoDistance.java index 028fce1b4c567..31306d81220d6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalGeoDistance.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalGeoDistance.java @@ -11,7 +11,6 @@ import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; -import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSourceType; import java.io.IOException; @@ -27,14 +26,6 @@ static class Bucket extends InternalRange.Bucket { super(key, from, to, docCount, aggregations, keyed, DocValueFormat.RAW); } - @Override - protected InternalRange.Factory getFactory() { - return FACTORY; - } - - boolean keyed() { - return keyed; - } } public static class Factory extends InternalRange.Factory { @@ -43,11 +34,6 @@ public ValuesSourceType getValueSourceType() { return CoreValuesSourceType.GEOPOINT; } - @Override - public ValueType getValueType() { - return ValueType.GEOPOINT; - } - @Override public InternalGeoDistance create( String name, diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java index c6f3cbaf740f0..cb970fc87fd33 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java @@ -17,7 +17,6 @@ import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; import org.elasticsearch.search.aggregations.support.SamplingContext; -import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSourceType; import org.elasticsearch.xcontent.XContentBuilder; @@ -117,11 +116,6 @@ public InternalAggregations getAggregations() { return aggregations; } - @SuppressWarnings("unchecked") - protected Factory getFactory() { - return FACTORY; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { final String key = getKeyAsString(); @@ -206,10 +200,6 @@ public ValuesSourceType getValueSourceType() { return CoreValuesSourceType.NUMERIC; } - public ValueType getValueType() { - return ValueType.NUMERIC; - } - @SuppressWarnings("unchecked") public R create(String name, List ranges, DocValueFormat format, boolean keyed, Map metadata) { return (R) new InternalRange(name, ranges, format, keyed, metadata); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/IpRangeAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/IpRangeAggregationBuilder.java index ef580929521d0..9c22917b81c8e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/IpRangeAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/IpRangeAggregationBuilder.java @@ -39,7 +39,6 @@ import java.net.InetAddress; import java.net.UnknownHostException; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; @@ -164,18 +163,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(to); } - public String getKey() { - return key; - } - - public String getFrom() { - return from; - } - - public String getTo() { - return to; - } - @Override public boolean equals(Object obj) { if (obj == null || getClass() != obj.getClass()) { @@ -239,25 +226,11 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - public IpRangeAggregationBuilder keyed(boolean keyed) { this.keyed = keyed; return this; } - public boolean keyed() { - return keyed; - } - - /** Get the current list or ranges that are configured on this aggregation. */ - public List getRanges() { - return Collections.unmodifiableList(ranges); - } - /** Add a new {@link Range} to this aggregation. */ public IpRangeAggregationBuilder addRange(Range range) { ranges.add(range); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java index 9fdbaa10509e6..e94d7c1ebcaed 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java @@ -198,18 +198,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - - private static String generateKey(double from, double to, DocValueFormat format) { - StringBuilder builder = new StringBuilder().append(Double.isInfinite(from) ? "*" : format.format(from)) - .append("-") - .append(Double.isInfinite(to) ? "*" : format.format(to)); - return builder.toString(); - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java index 98237d19f0f33..7c89061ea32f2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java @@ -199,14 +199,6 @@ public double getTo() { return this.originalTo; } - public Double getOriginalFrom() { - return originalFrom; - } - - public Double getOriginalTo() { - return originalTo; - } - public String getFromAsString() { return this.fromAsStr; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregatorFactory.java index 6e9781b0d9531..42d0f55e14a8d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregatorFactory.java @@ -32,18 +32,6 @@ public RangeAggregatorFactory( Map metadata, RangeAggregatorSupplier aggregatorSupplier ) throws IOException { - super( - name, - RangeAggregationBuilder.REGISTRY_KEY, - config, - ranges, - keyed, - rangeFactory, - context, - parent, - subFactoriesBuilder, - metadata, - aggregatorSupplier - ); + super(name, config, ranges, keyed, rangeFactory, context, parent, subFactoriesBuilder, metadata, aggregatorSupplier); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregationBuilder.java index 6dd998c0db043..e77b15e1ed1d4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregationBuilder.java @@ -106,13 +106,6 @@ public DiversifiedAggregationBuilder shardSize(int shardSize) { return this; } - /** - * Get the max num docs to be returned from each shard. - */ - public int shardSize() { - return shardSize; - } - /** * Set the max num docs to be returned per value. */ @@ -126,13 +119,6 @@ public DiversifiedAggregationBuilder maxDocsPerValue(int maxDocsPerValue) { return this; } - /** - * Get the max num docs to be returned per value. - */ - public int maxDocsPerValue() { - return maxDocsPerValue; - } - /** * Set the execution hint. */ @@ -141,13 +127,6 @@ public DiversifiedAggregationBuilder executionHint(String executionHint) { return this; } - /** - * Get the execution hint. - */ - public String executionHint() { - return executionHint; - } - @Override public BucketCardinality bucketCardinality() { return BucketCardinality.ONE; @@ -206,11 +185,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregationBuilder.java index 9795097f308da..5c3208418df08 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregationBuilder.java @@ -67,13 +67,6 @@ public SamplerAggregationBuilder shardSize(int shardSize) { return this; } - /** - * Get the max num docs to be returned from each shard. - */ - public int shardSize() { - return shardSize; - } - @Override public BucketCardinality bucketCardinality() { return BucketCardinality.ONE; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregationBuilder.java index 80d396d9aff7d..240f016c66954 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregationBuilder.java @@ -21,7 +21,6 @@ import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Collection; @@ -45,10 +44,6 @@ public class RandomSamplerAggregationBuilder extends AbstractAggregationBuilder< PARSER.declareDouble(RandomSamplerAggregationBuilder::setProbability, PROBABILITY); } - public static RandomSamplerAggregationBuilder parse(String aggregationName, XContentParser parser) throws IOException { - return PARSER.parse(parser, new RandomSamplerAggregationBuilder(aggregationName), null); - } - private int seed = Randomness.get().nextInt(); private double p; @@ -78,10 +73,6 @@ public RandomSamplerAggregationBuilder(StreamInput in) throws IOException { this.seed = in.readInt(); } - public double getProbability() { - return p; - } - protected RandomSamplerAggregationBuilder( RandomSamplerAggregationBuilder clone, AggregatorFactories.Builder factoriesBuilder, @@ -140,10 +131,6 @@ protected AggregatorFactory doBuild( return new RandomSamplerAggregatorFactory(name, seed, p, context, parent, subfactoriesBuilder, metadata); } - public int getSeed() { - return seed; - } - @Override protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedRareTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedRareTerms.java index fd6ecb0b36252..aaa9857fc1562 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedRareTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedRareTerms.java @@ -7,8 +7,6 @@ */ package org.elasticsearch.search.aggregations.bucket.terms; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.io.stream.StreamInput; @@ -30,8 +28,6 @@ import java.util.Map; import java.util.Objects; import java.util.Optional; -import java.util.function.Function; -import java.util.stream.Collectors; public abstract class InternalMappedRareTerms, B extends InternalRareTerms.Bucket> extends InternalRareTerms { @@ -42,8 +38,6 @@ public abstract class InternalMappedRareTerms, final SetBackedScalingCuckooFilter filter; - protected final Logger logger = LogManager.getLogger(getClass()); - InternalMappedRareTerms( String name, BucketOrder order, @@ -59,10 +53,6 @@ public abstract class InternalMappedRareTerms, this.filter = filter; } - public long getMaxDocCount() { - return maxDocCount; - } - SetBackedScalingCuckooFilter getFilter() { return filter; } @@ -164,14 +154,6 @@ public List getBuckets() { return buckets; } - @Override - public B getBucketByKey(String term) { - if (bucketMap == null) { - bucketMap = buckets.stream().collect(Collectors.toMap(InternalRareTerms.Bucket::getKeyAsString, Function.identity())); - } - return bucketMap.get(term); - } - @Override public boolean equals(Object obj) { if (this == obj) return true; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalRareTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalRareTerms.java index 2b2f6a19d46a0..6cd4c76317106 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalRareTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalRareTerms.java @@ -141,9 +141,6 @@ protected final void doWriteTo(StreamOutput out) throws IOException { @Override public abstract List getBuckets(); - @Override - public abstract B getBucketByKey(String term); - @Override public InternalAggregation reduce(List aggregations, AggregationReduceContext reduceContext) { throw new UnsupportedOperationException(); @@ -166,11 +163,6 @@ protected B reduceBucket(List buckets, AggregationReduceContext context) { protected abstract A createWithFilter(String name, List buckets, SetBackedScalingCuckooFilter filter); - /** - * Create an array to hold some buckets. Used in collecting the results. - */ - protected abstract B[] createBucketsArray(int size); - @Override public boolean equals(Object obj) { if (this == obj) return true; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java index 85307a903a3eb..1d32251ffc33a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java @@ -93,6 +93,18 @@ public long getDocCount() { return docCount; } + public void setDocCount(long docCount) { + this.docCount = docCount; + } + + public long getBucketOrd() { + return bucketOrd; + } + + public void setBucketOrd(long bucketOrd) { + this.bucketOrd = bucketOrd; + } + @Override public long getDocCountError() { if (showDocCountError == false) { @@ -102,7 +114,7 @@ public long getDocCountError() { } @Override - protected void setDocCountError(long docCountError) { + public void setDocCountError(long docCountError) { this.docCountError = docCountError; } @@ -121,6 +133,10 @@ public Aggregations getAggregations() { return aggregations; } + public void setAggregations(InternalAggregations aggregations) { + this.aggregations = aggregations; + } + @Override public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongRareTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongRareTerms.java index 8a27a6929f0ba..f8e7ca460ea48 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongRareTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongRareTerms.java @@ -57,11 +57,6 @@ public Object getKey() { return term; } - @Override - public Number getKeyAsNumber() { - return term; - } - @Override public int compareKey(Bucket other) { return Long.compare(term, other.term); @@ -126,11 +121,6 @@ protected LongRareTerms createWithFilter(String name, List return new LongRareTerms(name, order, getMetadata(), format, buckets, maxDocCount, filter); } - @Override - protected LongRareTerms.Bucket[] createBucketsArray(int size) { - return new LongRareTerms.Bucket[size]; - } - @Override public boolean containsTerm(SetBackedScalingCuckooFilter filter, LongRareTerms.Bucket bucket) { return filter.mightContain((long) bucket.getKey()); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedLongRareTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedLongRareTerms.java index c383d27022648..616bfb3d5a115 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedLongRareTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedLongRareTerms.java @@ -57,10 +57,6 @@ public String getKeyAsString() { return null; } - public Number getKeyAsNumber() { - return key; - } - @Override protected XContentBuilder keyToXContent(XContentBuilder builder) throws IOException { builder.field(CommonFields.KEY.getPreferredName(), key); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedRareTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedRareTerms.java index 3edf31b9ed69d..e1e865760d5e7 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedRareTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedRareTerms.java @@ -29,16 +29,6 @@ public List getBuckets() { return buckets; } - @Override - public RareTerms.Bucket getBucketByKey(String term) { - for (RareTerms.Bucket bucket : getBuckets()) { - if (bucket.getKeyAsString().equals(term)) { - return bucket; - } - } - return null; - } - @Override protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { builder.startArray(CommonFields.BUCKETS.getPreferredName()); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedSignificantLongTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedSignificantLongTerms.java index 9f7bfb564b73f..13cdc88a0082d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedSignificantLongTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedSignificantLongTerms.java @@ -52,10 +52,6 @@ public String getKeyAsString() { return Long.toString(key); } - public Number getKeyAsNumber() { - return key; - } - @Override protected XContentBuilder keyToXContent(XContentBuilder builder) throws IOException { builder.field(CommonFields.KEY.getPreferredName(), key); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedSignificantStringTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedSignificantStringTerms.java index f2b1375b7bd94..28cb9a6fb2a44 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedSignificantStringTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedSignificantStringTerms.java @@ -54,10 +54,6 @@ public String getKeyAsString() { return key.utf8ToString(); } - public Number getKeyAsNumber() { - return Double.parseDouble(key.utf8ToString()); - } - @Override protected XContentBuilder keyToXContent(XContentBuilder builder) throws IOException { return builder.field(CommonFields.KEY.getPreferredName(), getKey()); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedStringRareTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedStringRareTerms.java index 24923a115b27c..e19d07f5ee22d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedStringRareTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedStringRareTerms.java @@ -59,13 +59,6 @@ public String getKeyAsString() { return null; } - public Number getKeyAsNumber() { - if (key != null) { - return Double.parseDouble(key.utf8ToString()); - } - return null; - } - @Override protected XContentBuilder keyToXContent(XContentBuilder builder) throws IOException { return builder.field(CommonFields.KEY.getPreferredName(), getKey()); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTerms.java index 9b544296982e8..33d4443a49148 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTerms.java @@ -18,7 +18,6 @@ public interface RareTerms extends MultiBucketsAggregation { */ interface Bucket extends MultiBucketsAggregation.Bucket { - Number getKeyAsNumber(); } /** @@ -27,9 +26,4 @@ interface Bucket extends MultiBucketsAggregation.Bucket { @Override List getBuckets(); - /** - * Get the bucket for the given term, or null if there is no such bucket. - */ - Bucket getBucketByKey(String term); - } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java index 768c962d13db9..f2ea616802655 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java @@ -162,13 +162,6 @@ public IncludeExclude includeExclude() { return includeExclude; } - /** - * Get the current false positive rate for individual cuckoo filters. - */ - public double getPrecision() { - return precision; - } - /** * Set's the false-positive rate for individual cuckoo filters. Does not dictate the overall fpp rate * since we use a "scaling" cuckoo filter which adds more filters as required, and the overall @@ -244,11 +237,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.V_7_3_0; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregatorFactory.java index 088d575a98ea8..2cc49816d3c25 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregatorFactory.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.bucket.terms; -import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -210,20 +209,8 @@ Aggregator create( ); } - @Override - boolean needsGlobalOrdinals() { - return false; - } - }; - public static ExecutionMode fromString(String value, final DeprecationLogger deprecationLogger) { - return switch (value) { - case "map" -> MAP; - default -> throw new IllegalArgumentException("Unknown `execution_hint`: [" + value + "], expected any of [map]"); - }; - } - private final ParseField parseField; ExecutionMode(ParseField parseField) { @@ -244,8 +231,6 @@ abstract Aggregator create( CardinalityUpperBound cardinality ) throws IOException; - abstract boolean needsGlobalOrdinals(); - @Override public String toString() { return parseField.getPreferredName(); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificanceLookup.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificanceLookup.java index 9ac9c0e241566..cf8a1df3e0079 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificanceLookup.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificanceLookup.java @@ -223,7 +223,7 @@ private long getBackgroundFrequency(Query query) throws IOException { // for types that use the inverted index, we prefer using a terms // enum that will do a better job at reusing index inputs Term term = ((TermQuery) query).getTerm(); - TermsEnum termsEnum = getTermsEnum(term.field()); + TermsEnum termsEnum = getTermsEnum(); if (termsEnum.seekExact(term.bytes())) { return termsEnum.docFreq(); } @@ -237,7 +237,7 @@ private long getBackgroundFrequency(Query query) throws IOException { return new IndexSearcher(context.searcher().getIndexReader()).count(query); } - private TermsEnum getTermsEnum(String field) throws IOException { + private TermsEnum getTermsEnum() throws IOException { // TODO this method helps because of asMultiBucketAggregator. Once we remove it we can move this logic into the aggregators. if (termsEnum != null) { return termsEnum; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantLongTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantLongTerms.java index 6040c5e42e841..3d188e1fd09f7 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantLongTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantLongTerms.java @@ -72,11 +72,6 @@ public String getKeyAsString() { return format.format(term).toString(); } - @Override - public Number getKeyAsNumber() { - return term; - } - @Override protected XContentBuilder keyToXContent(XContentBuilder builder) throws IOException { builder.field(CommonFields.KEY.getPreferredName(), term); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantStringTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantStringTerms.java index 9782093401396..b35359c80a75e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantStringTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantStringTerms.java @@ -66,12 +66,6 @@ public void writeTo(StreamOutput out) throws IOException { aggregations.writeTo(out); } - @Override - public Number getKeyAsNumber() { - // this method is needed for scripted numeric aggregations - return Double.parseDouble(termBytes.utf8ToString()); - } - @Override public String getKeyAsString() { return format.format(termBytes).toString(); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTerms.java index 55be3342a10d7..f28054aaf52eb 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTerms.java @@ -47,10 +47,6 @@ interface Bucket extends MultiBucketsAggregation.Bucket { */ long getSupersetSize(); - /** - * @return The key, expressed as a number - */ - Number getKeyAsNumber(); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregationBuilder.java index 5311688ceee54..056a8a00dd72f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregationBuilder.java @@ -186,14 +186,6 @@ public TermsAggregator.BucketCountThresholds bucketCountThresholds() { return bucketCountThresholds; } - public SignificantTermsAggregationBuilder bucketCountThresholds(TermsAggregator.BucketCountThresholds bucketCountThresholds) { - if (bucketCountThresholds == null) { - throw new IllegalArgumentException("[bucketCountThresholds] must not be null: [" + name + "]"); - } - this.bucketCountThresholds = bucketCountThresholds; - return this; - } - /** * Sets the size - indicating how many term buckets should be returned * (defaults to 10) @@ -256,13 +248,6 @@ public SignificantTermsAggregationBuilder executionHint(String executionHint) { return this; } - /** - * Expert: gets an execution hint to the aggregation. - */ - public String executionHint() { - return executionHint; - } - public SignificantTermsAggregationBuilder backgroundFilter(QueryBuilder backgroundFilter) { if (backgroundFilter == null) { throw new IllegalArgumentException("[backgroundFilter] must not be null: [" + name + "]"); @@ -271,10 +256,6 @@ public SignificantTermsAggregationBuilder backgroundFilter(QueryBuilder backgrou return this; } - public QueryBuilder backgroundFilter() { - return backgroundFilter; - } - /** * Set terms to include and exclude from the aggregation results */ @@ -379,11 +360,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.V_7_3_0; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTextAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTextAggregationBuilder.java index a73d12c23a378..99dc93a175f7b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTextAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTextAggregationBuilder.java @@ -123,10 +123,6 @@ protected AggregationBuilder shallowCopy(Builder factoriesBuilder, Map getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedRareTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedRareTerms.java index 87f5476b7c65a..eb504e05292a6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedRareTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedRareTerms.java @@ -107,13 +107,4 @@ public List getBuckets() { return emptyList(); } - @Override - public UnmappedRareTerms.Bucket getBucketByKey(String term) { - return null; - } - - @Override - protected UnmappedRareTerms.Bucket[] createBucketsArray(int size) { - return new UnmappedRareTerms.Bucket[size]; - } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/heuristic/PercentageScore.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/heuristic/PercentageScore.java index 7c3ee3bdbc044..dee8a2b16afb0 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/heuristic/PercentageScore.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/heuristic/PercentageScore.java @@ -8,13 +8,10 @@ package org.elasticsearch.search.aggregations.bucket.terms.heuristic; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; @@ -42,17 +39,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - public static SignificanceHeuristic parse(XContentParser parser) throws IOException, QueryShardException { - // move to the closing bracket - if (parser.nextToken().equals(XContentParser.Token.END_OBJECT) == false) { - throw new ElasticsearchParseException( - "failed to parse [percentage] significance heuristic. expected an empty object, " + "but got [{}] instead", - parser.currentToken() - ); - } - return new PercentageScore(); - } - /** * Indicates the significance of a term in a sample by determining what percentage * of all occurrences of a term are found in the sample. diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractPercentilesAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractPercentilesAggregationBuilder.java index c66a93b480558..53b8c5ce11e9f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractPercentilesAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractPercentilesAggregationBuilder.java @@ -193,13 +193,6 @@ public T keyed(boolean keyed) { return (T) this; } - /** - * Get whether the XContent response should be keyed - */ - public boolean keyed() { - return keyed; - } - /** * Expert: set the number of significant digits in the values. Only relevant * when using {@link PercentilesMethod#HDR}. diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AvgAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AvgAggregationBuilder.java index e66d3d0a34580..3038e35dc06ac 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AvgAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AvgAggregationBuilder.java @@ -99,11 +99,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java index 615b46434226d..25d38b11b03ff 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java @@ -132,25 +132,6 @@ public CardinalityAggregationBuilder precisionThreshold(long precisionThreshold) return this; } - /** - * Get the precision threshold. Higher values improve accuracy but also - * increase memory usage. Will return null if the - * precisionThreshold has not been set yet. - */ - public Long precisionThreshold() { - return precisionThreshold; - } - - /** - * Get the execution hint. This is an optional user specified hint that - * will be used to decide on the specific collection algorithm. Since this - * is a hint, the implementation may choose to ignore it (typically when - * the specified method is not applicable to the given field type) - */ - public String ExecutionHint() { - return executionHint; - } - /** * Set the execution hint. This is an optional user specified hint that * will be used to decide on the specific collection algorithm. Since this @@ -213,11 +194,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregationBuilder.java index 1078f48a39317..91257fb2cd88e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregationBuilder.java @@ -104,10 +104,6 @@ public ExtendedStatsAggregationBuilder sigma(double sigma) { return this; } - public double sigma() { - return sigma; - } - @Override protected ExtendedStatsAggregatorFactory innerBuild( AggregationContext context, @@ -144,11 +140,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregationBuilder.java index db7fdb5995eec..e109cb4066785 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregationBuilder.java @@ -99,13 +99,6 @@ public GeoBoundsAggregationBuilder wrapLongitude(boolean wrapLongitude) { return this; } - /** - * Get whether to wrap longitudes. - */ - public boolean wrapLongitude() { - return wrapLongitude; - } - @Override protected GeoBoundsAggregatorFactory innerBuild( AggregationContext context, @@ -151,11 +144,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregationBuilder.java index ac0ffae7ac2e1..d5fbeb0459a3b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregationBuilder.java @@ -106,11 +106,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HyperLogLogPlusPlus.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HyperLogLogPlusPlus.java index d9e06c95fffc8..08d0907c2a1bd 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HyperLogLogPlusPlus.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HyperLogLogPlusPlus.java @@ -202,7 +202,6 @@ private void merge(long thisBucket, AbstractHyperLogLog.RunLenIterator runLens) private static class HyperLogLog extends AbstractHyperLogLog implements Releasable { private final BigArrays bigArrays; - private final int precision; // array for holding the runlens. private ByteArray runLens; @@ -210,7 +209,6 @@ private static class HyperLogLog extends AbstractHyperLogLog implements Releasab super(precision); this.runLens = bigArrays.newByteArray(initialBucketCount << precision); this.bigArrays = bigArrays; - this.precision = precision; } public long maxOrd() { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalStats.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalStats.java index 6dccdf0c2fae4..2c6b28362c128 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalStats.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalStats.java @@ -226,7 +226,7 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th return builder; } - protected XContentBuilder otherStatsToXContent(XContentBuilder builder, Params params) throws IOException { + protected XContentBuilder otherStatsToXContent(XContentBuilder builder, @SuppressWarnings("unused") Params params) throws IOException { return builder; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MaxAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MaxAggregationBuilder.java index b4bb1f01b0662..e14de225dcd92 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MaxAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MaxAggregationBuilder.java @@ -103,11 +103,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregationBuilder.java index 315e0bab027c5..8f5d3c1b9f322 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregationBuilder.java @@ -81,13 +81,6 @@ protected MedianAbsoluteDeviationAggregationBuilder( this.executionHint = clone.executionHint; } - /** - * Returns the compression factor of the t-digest sketches used - */ - public double compression() { - return compression; - } - /** * Set the compression factor of the t-digest sketches used */ @@ -187,11 +180,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MinAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MinAggregationBuilder.java index 3d4957feba7db..d410fb032117e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MinAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MinAggregationBuilder.java @@ -105,11 +105,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedExtendedStats.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedExtendedStats.java index 516783d42afd1..4bf32b5ef5cf1 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedExtendedStats.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedExtendedStats.java @@ -39,9 +39,6 @@ public class ParsedExtendedStats extends ParsedStats implements ExtendedStats { protected double stdDeviationBoundUpperSampling; protected double stdDeviationBoundLowerSampling; - protected double sum; - protected double avg; - @Override public String getType() { return ExtendedStatsAggregationBuilder.NAME; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentileRanksAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentileRanksAggregationBuilder.java index e61105b5822cf..6854ff8f8b632 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentileRanksAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentileRanksAggregationBuilder.java @@ -110,11 +110,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesAggregationBuilder.java index 0347d157cf3c5..934619ae76b7d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesAggregationBuilder.java @@ -146,11 +146,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregationBuilder.java index 8386bb8bbdb06..0596af8cbb51d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregationBuilder.java @@ -127,13 +127,6 @@ public ScriptedMetricAggregationBuilder initScript(Script initScript) { return this; } - /** - * Get the {@code init} script. - */ - public Script initScript() { - return initScript; - } - /** * Set the {@code map} script. */ @@ -145,13 +138,6 @@ public ScriptedMetricAggregationBuilder mapScript(Script mapScript) { return this; } - /** - * Get the {@code map} script. - */ - public Script mapScript() { - return mapScript; - } - /** * Set the {@code combine} script. */ @@ -163,13 +149,6 @@ public ScriptedMetricAggregationBuilder combineScript(Script combineScript) { return this; } - /** - * Get the {@code combine} script. - */ - public Script combineScript() { - return combineScript; - } - /** * Set the {@code reduce} script. */ @@ -181,13 +160,6 @@ public ScriptedMetricAggregationBuilder reduceScript(Script reduceScript) { return this; } - /** - * Get the {@code reduce} script. - */ - public Script reduceScript() { - return reduceScript; - } - /** * Set parameters that will be available in the {@code init}, * {@code map} and {@code combine} phases. @@ -200,14 +172,6 @@ public ScriptedMetricAggregationBuilder params(Map params) { return this; } - /** - * Get parameters that will be available in the {@code init}, - * {@code map} and {@code combine} phases. - */ - public Map params() { - return params; - } - @Override public BucketCardinality bucketCardinality() { return BucketCardinality.NONE; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregationBuilder.java index e978ffec42b4a..f6a04a9d64684 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregationBuilder.java @@ -110,11 +110,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/SumAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/SumAggregationBuilder.java index da441363020bd..feeecc2d5a06a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/SumAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/SumAggregationBuilder.java @@ -103,11 +103,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestState.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestState.java index d80eb8a58040e..23c26794f6bb5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestState.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestState.java @@ -77,9 +77,6 @@ public static TDigestState create(double compression, TDigestExecutionHint execu return switch (executionHint) { case HIGH_ACCURACY -> createOptimizedForAccuracy(compression); case DEFAULT -> create(compression); - default -> throw new IllegalArgumentException( - "Unexpected TDigestExecutionHint in TDigestState initialization: " + executionHint - ); }; } @@ -99,7 +96,6 @@ protected TDigestState(Type type, double compression) { case AVL_TREE -> TDigest.createAvlTreeDigest(compression); case SORTING -> TDigest.createSortingDigest(); case MERGING -> TDigest.createMergingDigest(compression); - default -> throw new IllegalArgumentException("Unexpected TDigestState type: " + type); }; this.type = type; this.compression = compression; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java index 4a5df855b5095..d7113fc6ec798 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java @@ -278,13 +278,6 @@ public TopHitsAggregationBuilder sorts(List> sorts) { return this; } - /** - * Gets the bytes representing the sort builders for this request. - */ - public List> sorts() { - return sorts; - } - /** * Adds highlight to perform as part of the search. */ @@ -296,23 +289,6 @@ public TopHitsAggregationBuilder highlighter(HighlightBuilder highlightBuilder) return this; } - /** - * Gets the highlighter builder for this request. - */ - public HighlightBuilder highlighter() { - return highlightBuilder; - } - - /** - * Indicates whether the response should contain the stored _source for - * every hit - */ - public TopHitsAggregationBuilder fetchSource(boolean fetch) { - FetchSourceContext fetchSourceContext = this.fetchSourceContext != null ? this.fetchSourceContext : FetchSourceContext.FETCH_SOURCE; - this.fetchSourceContext = FetchSourceContext.of(fetch, fetchSourceContext.includes(), fetchSourceContext.excludes()); - return this; - } - /** * Indicate that _source should be returned with every hit, with an * "include" and/or "exclude" set which can include simple wildcard @@ -362,14 +338,6 @@ public TopHitsAggregationBuilder fetchSource(@Nullable FetchSourceContext fetchS return this; } - /** - * Gets the {@link FetchSourceContext} which defines how the _source - * should be fetched. - */ - public FetchSourceContext fetchSource() { - return fetchSourceContext; - } - /** * Adds a stored field to load and return (note, it must be stored) as part of the search request. * To disable the stored fields entirely (source and metadata fields) use {@code storedField("_none_")}. @@ -394,13 +362,6 @@ public TopHitsAggregationBuilder storedFields(List fields) { return this; } - /** - * Gets the stored fields context - */ - public StoredFieldsContext storedFields() { - return storedFieldsContext; - } - /** * Adds a field to load from doc values and return as part of * the search request. @@ -424,13 +385,6 @@ public TopHitsAggregationBuilder docValueField(String docValueField) { return docValueField(docValueField, null); } - /** - * Gets the field-data fields. - */ - public List docValueFields() { - return docValueFields; - } - /** * Adds a field to load and return as part of the search request. */ @@ -452,13 +406,6 @@ public TopHitsAggregationBuilder fetchField(String field) { return fetchField(new FieldAndFormat(field, null, null)); } - /** - * Gets the fields to load and return as part of the search request. - */ - public List fetchFields() { - return fetchFields; - } - /** * Adds a script field under the given name with the provided script. * @@ -511,13 +458,6 @@ public TopHitsAggregationBuilder scriptFields(List scriptFields) { return this; } - /** - * Gets the script fields. - */ - public Set scriptFields() { - return scriptFields; - } - /** * Should each {@link org.elasticsearch.search.SearchHit} be returned * with an explanation of the hit (ranking). @@ -527,14 +467,6 @@ public TopHitsAggregationBuilder explain(boolean explain) { return this; } - /** - * Indicates whether each search hit will be returned with an - * explanation of the hit (ranking) - */ - public boolean explain() { - return explain; - } - /** * Should each {@link org.elasticsearch.search.SearchHit} be returned * with a version associated with it. @@ -544,14 +476,6 @@ public TopHitsAggregationBuilder version(boolean version) { return this; } - /** - * Indicates whether the document's version will be included in the - * search hits. - */ - public boolean version() { - return version; - } - /** * Should each {@link org.elasticsearch.search.SearchHit} be returned with the * sequence number and primary term of the last modification of the document. @@ -561,14 +485,6 @@ public TopHitsAggregationBuilder seqNoAndPrimaryTerm(Boolean seqNoAndPrimaryTerm return this; } - /** - * Indicates whether {@link org.elasticsearch.search.SearchHit}s should be returned with the - * sequence number and primary term of the last modification of the document. - */ - public Boolean seqNoAndPrimaryTerm() { - return seqNoAndPrimaryTerm; - } - /** * Applies when sorting, and controls if scores will be tracked as well. * Defaults to {@code false}. @@ -578,13 +494,6 @@ public TopHitsAggregationBuilder trackScores(boolean trackScores) { return this; } - /** - * Indicates whether scores will be tracked for this request. - */ - public boolean trackScores() { - return trackScores; - } - @Override public TopHitsAggregationBuilder subAggregations(Builder subFactories) { throw new AggregationInitializationException( diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregationBuilder.java index 71f745559fc77..3b815640e1199 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregationBuilder.java @@ -112,11 +112,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/AbstractPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/AbstractPipelineAggregationBuilder.java index 74bb9a8881d79..b3335dcbd5be5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/AbstractPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/AbstractPipelineAggregationBuilder.java @@ -58,10 +58,6 @@ public void writeTo(StreamOutput out) throws IOException { protected abstract void doWriteTo(StreamOutput out) throws IOException; - public String type() { - return type; - } - protected abstract PipelineAggregator createInternal(Map metadata); /** diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketMetricsPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketMetricsPipelineAggregationBuilder.java index b5a9da37c60f6..c93d8f1c41874 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketMetricsPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketMetricsPipelineAggregationBuilder.java @@ -65,13 +65,6 @@ public AF format(String format) { return (AF) this; } - /** - * Gets the format to use on the output of this aggregation. - */ - public String format() { - return format; - } - protected DocValueFormat formatter() { if (format != null) { return new DocValueFormat.Decimal(format); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptPipelineAggregationBuilder.java index 4993c8ec25d83..1cf49af421466 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptPipelineAggregationBuilder.java @@ -142,13 +142,6 @@ public BucketScriptPipelineAggregationBuilder format(String format) { return this; } - /** - * Gets the format to use on the output of this aggregation. - */ - public String format() { - return format; - } - protected DocValueFormat formatter() { if (format != null) { return new DocValueFormat.Decimal(format); @@ -168,13 +161,6 @@ public BucketScriptPipelineAggregationBuilder gapPolicy(GapPolicy gapPolicy) { return this; } - /** - * Gets the gap policy to use for this aggregation. - */ - public GapPolicy gapPolicy() { - return gapPolicy; - } - @Override protected PipelineAggregator createInternal(Map metadata) { return new BucketScriptPipelineAggregator(name, bucketsPathsMap, script, formatter(), gapPolicy, metadata); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumPipelineAggregationBuilder.java index c2816629b653f..944c4e8a88d08 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumPipelineAggregationBuilder.java @@ -73,13 +73,6 @@ public CumulativeSumPipelineAggregationBuilder format(String format) { return this; } - /** - * Gets the format to use on the output of this aggregation. - */ - public String format() { - return format; - } - protected DocValueFormat formatter() { if (format != null) { return new DocValueFormat.Decimal(format); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ParsedDerivative.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ParsedDerivative.java index 66d2c0621b410..e7751230334d5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ParsedDerivative.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ParsedDerivative.java @@ -24,16 +24,6 @@ public class ParsedDerivative extends ParsedSimpleValue { private static final ParseField NORMALIZED_AS_STRING = new ParseField("normalized_value_as_string"); private static final ParseField NORMALIZED = new ParseField("normalized_value"); - /** - * Returns the normalized value. If no normalised factor has been specified - * this method will return {@link #value()} - * - * @return the normalized value - */ - public double normalizedValue() { - return this.normalizedValue; - } - @Override public String getType() { return "derivative"; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregationBuilder.java index a47f1255e0fe9..4edecb3c8b480 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregationBuilder.java @@ -84,13 +84,6 @@ public PercentilesBucketPipelineAggregationBuilder setKeyed(boolean keyed) { return this; } - /** - * Get whether the XContent should be keyed - */ - public boolean getKeyed() { - return keyed; - } - @Override protected PipelineAggregator createInternal(Map metadata) { return new PercentilesBucketPipelineAggregator(name, percents, keyed, bucketsPaths, gapPolicy(), formatter(), metadata); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffPipelineAggregationBuilder.java index 1143594e98d16..935104bcacd51 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffPipelineAggregationBuilder.java @@ -70,13 +70,6 @@ public SerialDiffPipelineAggregationBuilder lag(int lag) { return this; } - /** - * Gets the lag to use when calculating the serial difference. - */ - public int lag() { - return lag; - } - /** * Sets the format to use on the output of this aggregation. */ @@ -88,13 +81,6 @@ public SerialDiffPipelineAggregationBuilder format(String format) { return this; } - /** - * Gets the format to use on the output of this aggregation. - */ - public String format() { - return format; - } - /** * Sets the GapPolicy to use on the output of this aggregation. */ @@ -106,13 +92,6 @@ public SerialDiffPipelineAggregationBuilder gapPolicy(GapPolicy gapPolicy) { return this; } - /** - * Gets the GapPolicy to use on the output of this aggregation. - */ - public GapPolicy gapPolicy() { - return gapPolicy; - } - protected DocValueFormat formatter() { if (format != null) { return new DocValueFormat.Decimal(format); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationContext.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationContext.java index 822dd6d983e5c..c2aa26409f010 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationContext.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationContext.java @@ -150,11 +150,6 @@ public final FieldContext buildFieldContext(MappedFieldType ft) { */ public abstract Set getMatchingFieldNames(String pattern); - /** - * Returns true if the field identified by the provided name is mapped, false otherwise - */ - public abstract boolean isFieldMapped(String field); - /** * Compile a script. */ @@ -474,11 +469,6 @@ public Set getMatchingFieldNames(String pattern) { return context.getMatchingFieldNames(pattern); } - @Override - public boolean isFieldMapped(String field) { - return context.isFieldMapped(field); - } - @Override public FactoryType compile(Script script, ScriptContext scriptContext) { return context.compile(script, scriptContext); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationInfo.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationInfo.java index e3662b150270c..11b2da7c82e24 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationInfo.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationInfo.java @@ -55,10 +55,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeMap(aggs, StreamOutput::writeStringCollection); } - public Map> getAggregations() { - return aggs; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject("aggregations"); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceFieldConfig.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceFieldConfig.java index 9f698528dcefb..42330b995ae94 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceFieldConfig.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceFieldConfig.java @@ -310,37 +310,21 @@ public static class Builder { private String format = null; private IncludeExclude includeExclude = null; - public String getFieldName() { - return fieldName; - } - public Builder setFieldName(String fieldName) { this.fieldName = fieldName; return this; } - public Object getMissing() { - return missing; - } - public Builder setMissing(Object missing) { this.missing = missing; return this; } - public Script getScript() { - return script; - } - public Builder setScript(Script script) { this.script = script; return this; } - public ZoneId getTimeZone() { - return timeZone; - } - public Builder setTimeZone(ZoneId timeZone) { this.timeZone = timeZone; return this; @@ -356,19 +340,11 @@ public Builder setUserValueTypeHint(ValueType userValueTypeHint) { return this; } - public ValueType getUserValueTypeHint() { - return userValueTypeHint; - } - public Builder setFormat(String format) { this.format = format; return this; } - public String getFormat() { - return format; - } - public Builder setIncludeExclude(IncludeExclude includeExclude) { this.includeExclude = includeExclude; return this; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/TimeSeriesIndexSearcher.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/TimeSeriesIndexSearcher.java index 375ccd127dc9e..21138f46e974e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/TimeSeriesIndexSearcher.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/TimeSeriesIndexSearcher.java @@ -240,8 +240,6 @@ private static class LeafWalker { private final SortedNumericDocValues timestamps; // TODO can we have this just a NumericDocValues? private final BytesRefBuilder scratch = new BytesRefBuilder(); - private final Scorer scorer; - int docId = -1; int tsidOrd; long timestamp; @@ -252,7 +250,6 @@ private static class LeafWalker { this.collector = bucketCollector.getLeafCollector(aggCtx); liveDocs = context.reader().getLiveDocs(); this.collector.setScorer(scorer); - this.scorer = scorer; iterator = scorer.iterator(); tsids = DocValues.getSorted(context.reader(), TimeSeriesIdFieldMapper.NAME); timestamps = DocValues.getSortedNumeric(context.reader(), DataStream.TIMESTAMP_FIELD_NAME); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/TimeSeriesValuesSourceType.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/TimeSeriesValuesSourceType.java index e1e249466aea6..6529c3f565a33 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/TimeSeriesValuesSourceType.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/TimeSeriesValuesSourceType.java @@ -84,10 +84,6 @@ public ValuesSource replaceMissing( } }; - public static ValuesSourceType fromString(String name) { - return valueOf(name.trim().toUpperCase(Locale.ROOT)); - } - public String value() { return name().toLowerCase(Locale.ROOT); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java index 2106976252581..bc83a5b5cd3b1 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java @@ -49,7 +49,7 @@ public enum ValueType implements Writeable { final ValuesSourceType valuesSourceType; final DocValueFormat defaultFormat; private final byte id; - private String preferredName; + private final String preferredName; public static final ParseField VALUE_TYPE = new ParseField("value_type", "valueType"); @@ -101,10 +101,6 @@ public boolean isNotA(ValueType valueType) { return isA(valueType) == false; } - public DocValueFormat defaultFormat() { - return defaultFormat; - } - public static ValueType lenientParse(String type) { return switch (type) { case "string" -> STRING; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java index c1b9b8c376a59..2b7e27eb97c7d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java @@ -374,14 +374,6 @@ public AB missing(Object missing) { return (AB) this; } - /** - * Gets the value to use when the aggregation finds a missing value in a - * document - */ - public Object missing() { - return missing; - } - /** * Sets the time zone to use for this aggregation */ @@ -422,8 +414,6 @@ protected final ValuesSourceAggregatorFactory doBuild(AggregationContext context return factory; } - protected abstract ValuesSourceRegistry.RegistryKey getRegistryKey(); - /** * Aggregations should use this method to define a {@link ValuesSourceType} of last resort. This will only be used when the resolver * can't find a field and the user hasn't provided a value type hint. diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceRegistry.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceRegistry.java index 6249612184157..c33ad5266d4e2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceRegistry.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceRegistry.java @@ -150,7 +150,7 @@ public ValuesSourceRegistry build() { /** Maps Aggregation names to (ValuesSourceType, Supplier) pairs, keyed by ValuesSourceType */ private final AggregationUsageService usageService; - private Map, Map> aggregatorRegistry; + private final Map, Map> aggregatorRegistry; public ValuesSourceRegistry( Map, List>> aggregatorRegistry, @@ -160,10 +160,6 @@ public ValuesSourceRegistry( this.usageService = usageService; } - public boolean isRegistered(RegistryKey registryKey) { - return aggregatorRegistry.containsKey(registryKey); - } - public T getAggregator(RegistryKey registryKey, ValuesSourceConfig valuesSourceConfig) { if (registryKey != null && aggregatorRegistry.containsKey(registryKey)) { @SuppressWarnings("unchecked") diff --git a/server/src/main/java/org/elasticsearch/search/collapse/CollapseBuilder.java b/server/src/main/java/org/elasticsearch/search/collapse/CollapseBuilder.java index 049e06b0d98c7..f787e30644658 100644 --- a/server/src/main/java/org/elasticsearch/search/collapse/CollapseBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/collapse/CollapseBuilder.java @@ -204,6 +204,6 @@ public CollapseContext build(SearchExecutionContext searchExecutionContext) { ); } - return new CollapseContext(field, fieldType, innerHits); + return new CollapseContext(field, fieldType); } } diff --git a/server/src/main/java/org/elasticsearch/search/collapse/CollapseContext.java b/server/src/main/java/org/elasticsearch/search/collapse/CollapseContext.java index 62d7f7cc74cd4..080caaeed0fde 100644 --- a/server/src/main/java/org/elasticsearch/search/collapse/CollapseContext.java +++ b/server/src/main/java/org/elasticsearch/search/collapse/CollapseContext.java @@ -11,23 +11,18 @@ import org.apache.lucene.search.Sort; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType.CollapseType; -import org.elasticsearch.index.query.InnerHitBuilder; import org.elasticsearch.lucene.grouping.SinglePassGroupingCollector; -import java.util.List; - /** * Context used for field collapsing */ public class CollapseContext { private final String fieldName; private final MappedFieldType fieldType; - private final List innerHits; - public CollapseContext(String fieldName, MappedFieldType fieldType, List innerHits) { + public CollapseContext(String fieldName, MappedFieldType fieldType) { this.fieldName = fieldName; this.fieldType = fieldType; - this.innerHits = innerHits; } /** @@ -42,11 +37,6 @@ public MappedFieldType getFieldType() { return fieldType; } - /** The inner hit options to expand the collapsed results **/ - public List getInnerHit() { - return innerHits; - } - public SinglePassGroupingCollector createTopDocs(Sort sort, int topN, FieldDoc after) { if (fieldType.collapseType() == CollapseType.KEYWORD) { return SinglePassGroupingCollector.createKeyword(fieldName, fieldType, sort, topN, after); diff --git a/server/src/main/java/org/elasticsearch/search/dfs/AggregatedDfs.java b/server/src/main/java/org/elasticsearch/search/dfs/AggregatedDfs.java index 1bd70b5c14817..0ce6824ec432b 100644 --- a/server/src/main/java/org/elasticsearch/search/dfs/AggregatedDfs.java +++ b/server/src/main/java/org/elasticsearch/search/dfs/AggregatedDfs.java @@ -21,9 +21,9 @@ public class AggregatedDfs implements Writeable { - private Map termStatistics; - private Map fieldStatistics; - private long maxDoc; + private final Map termStatistics; + private final Map fieldStatistics; + private final long maxDoc; public AggregatedDfs(StreamInput in) throws IOException { int size = in.readVInt(); @@ -51,10 +51,6 @@ public Map fieldStatistics() { return fieldStatistics; } - public long maxDoc() { - return maxDoc; - } - @Override public void writeTo(final StreamOutput out) throws IOException { out.writeMap(termStatistics, (o, k) -> { diff --git a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java index 5a04404c2e38a..91ac7356a9670 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -91,7 +91,7 @@ private static class PreloadedSourceProvider implements SourceProvider { Source source; @Override - public Source getSource(LeafReaderContext ctx, int doc) throws IOException { + public Source getSource(LeafReaderContext ctx, int doc) { return source; } } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/QueryFetchSearchResult.java b/server/src/main/java/org/elasticsearch/search/fetch/QueryFetchSearchResult.java index 78d6882472ebd..193f8c04664bf 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/QueryFetchSearchResult.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/QueryFetchSearchResult.java @@ -16,6 +16,7 @@ import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.query.QuerySearchResult; +import org.elasticsearch.transport.LeakTracker; import java.io.IOException; @@ -26,14 +27,8 @@ public final class QueryFetchSearchResult extends SearchPhaseResult { private final RefCounted refCounted; public QueryFetchSearchResult(StreamInput in) throws IOException { - super(in); // These get a ref count of 1 when we create them, so we don't need to incRef here - queryResult = new QuerySearchResult(in); - fetchResult = new FetchSearchResult(in); - refCounted = AbstractRefCounted.of(() -> { - queryResult.decRef(); - fetchResult.decRef(); - }); + this(new QuerySearchResult(in), new FetchSearchResult(in)); } public QueryFetchSearchResult(QuerySearchResult queryResult, FetchSearchResult fetchResult) { @@ -42,10 +37,10 @@ public QueryFetchSearchResult(QuerySearchResult queryResult, FetchSearchResult f // We're acquiring a copy, we should incRef it this.queryResult.incRef(); this.fetchResult.incRef(); - refCounted = AbstractRefCounted.of(() -> { + refCounted = LeakTracker.wrap(AbstractRefCounted.of(() -> { queryResult.decRef(); fetchResult.decRef(); - }); + })); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/fetch/ShardFetchRequest.java b/server/src/main/java/org/elasticsearch/search/fetch/ShardFetchRequest.java index 9ce93a825f849..86f6db0b681d7 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/ShardFetchRequest.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/ShardFetchRequest.java @@ -37,7 +37,8 @@ public class ShardFetchRequest extends TransportRequest { private final int[] docIds; - private ScoreDoc lastEmittedDoc; + @Nullable + private final ScoreDoc lastEmittedDoc; public ShardFetchRequest(ShardSearchContextId contextId, List docIds, ScoreDoc lastEmittedDoc) { this.contextId = contextId; @@ -60,6 +61,8 @@ public ShardFetchRequest(StreamInput in) throws IOException { lastEmittedDoc = Lucene.readScoreDoc(in); } else if (flag != 0) { throw new IOException("Unknown flag: " + flag); + } else { + lastEmittedDoc = null; } } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/StoredFieldsContext.java b/server/src/main/java/org/elasticsearch/search/fetch/StoredFieldsContext.java index ae0e52ab69091..c3a91fde896bd 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/StoredFieldsContext.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/StoredFieldsContext.java @@ -115,7 +115,7 @@ public boolean equals(Object o) { StoredFieldsContext that = (StoredFieldsContext) o; if (fetchFields != that.fetchFields) return false; - return fieldNames != null ? fieldNames.equals(that.fieldNames) : that.fieldNames == null; + return Objects.equals(fieldNames, that.fieldNames); } @@ -164,7 +164,7 @@ public static StoredFieldsContext fromXContent(String fieldName, XContentParser return fromList(Collections.singletonList(parser.text())); } else if (token == XContentParser.Token.START_ARRAY) { ArrayList list = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { list.add(parser.text()); } return fromList(list); diff --git a/server/src/main/java/org/elasticsearch/search/fetch/StoredFieldsSpec.java b/server/src/main/java/org/elasticsearch/search/fetch/StoredFieldsSpec.java index 45054a90c749f..48aea98887ff0 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/StoredFieldsSpec.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/StoredFieldsSpec.java @@ -27,12 +27,12 @@ public boolean noRequirements() { /** * Use when no stored fields are required */ - public static StoredFieldsSpec NO_REQUIREMENTS = new StoredFieldsSpec(false, false, Set.of()); + public static final StoredFieldsSpec NO_REQUIREMENTS = new StoredFieldsSpec(false, false, Set.of()); /** * Use when the source should be loaded but no other stored fields are required */ - public static StoredFieldsSpec NEEDS_SOURCE = new StoredFieldsSpec(true, false, Set.of()); + public static final StoredFieldsSpec NEEDS_SOURCE = new StoredFieldsSpec(true, false, Set.of()); /** * Combine these stored field requirements with those from another StoredFieldsSpec diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceContext.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceContext.java index bba614dce78a5..4587d7560b2d9 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceContext.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceContext.java @@ -26,8 +26,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import java.util.Map; -import java.util.function.Function; /** * Context used to fetch the {@code _source}. @@ -42,7 +40,6 @@ public class FetchSourceContext implements Writeable, ToXContentObject { private final boolean fetchSource; private final String[] includes; private final String[] excludes; - private Function, Map> filter; public static FetchSourceContext of(boolean fetchSource) { return fetchSource ? FETCH_SOURCE : DO_NOT_FETCH_SOURCE; @@ -153,33 +150,9 @@ public static FetchSourceContext fromXContent(XContentParser parser) throws IOEx currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_ARRAY) { if (INCLUDES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - List includesList = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - if (token == XContentParser.Token.VALUE_STRING) { - includesList.add(parser.text()); - } else { - throw new ParsingException( - parser.getTokenLocation(), - "Unknown key for a " + token + " in [" + currentFieldName + "].", - parser.getTokenLocation() - ); - } - } - includes = includesList.toArray(Strings.EMPTY_ARRAY); + includes = parseStringArray(parser, currentFieldName); } else if (EXCLUDES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - List excludesList = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - if (token == XContentParser.Token.VALUE_STRING) { - excludesList.add(parser.text()); - } else { - throw new ParsingException( - parser.getTokenLocation(), - "Unknown key for a " + token + " in [" + currentFieldName + "].", - parser.getTokenLocation() - ); - } - } - excludes = excludesList.toArray(Strings.EMPTY_ARRAY); + excludes = parseStringArray(parser, currentFieldName); } else { throw new ParsingException( parser.getTokenLocation(), @@ -227,6 +200,25 @@ public static FetchSourceContext fromXContent(XContentParser parser) throws IOEx return FetchSourceContext.of(fetchSource, includes, excludes); } + private static String[] parseStringArray(XContentParser parser, String currentFieldName) throws IOException { + XContentParser.Token token; + String[] excludes; + List excludesList = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + if (token == XContentParser.Token.VALUE_STRING) { + excludesList.add(parser.text()); + } else { + throw new ParsingException( + parser.getTokenLocation(), + "Unknown key for a " + token + " in [" + currentFieldName + "].", + parser.getTokenLocation() + ); + } + } + excludes = excludesList.toArray(Strings.EMPTY_ARRAY); + return excludes; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { if (fetchSource) { diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java index 3207f1ffa99f0..36cda88a063ec 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java @@ -451,13 +451,6 @@ public HB boundaryScannerLocale(String boundaryScannerLocale) { return (HB) this; } - /** - * @return the value set by {@link #boundaryScannerLocale(String)} - */ - public Locale boundaryScannerLocale() { - return this.boundaryScannerLocale; - } - /** * Allows to set custom options for custom highlighters. */ diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/CustomQueryScorer.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/CustomQueryScorer.java index 31e704fe30ff9..cae353bb91014 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/CustomQueryScorer.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/CustomQueryScorer.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.fetch.subphase.highlight; -import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.Query; import org.apache.lucene.search.highlight.QueryScorer; import org.apache.lucene.search.highlight.WeightedSpanTerm; @@ -22,18 +21,6 @@ public final class CustomQueryScorer extends QueryScorer { - public CustomQueryScorer(Query query, IndexReader reader, String field, String defaultField) { - super(query, reader, field, defaultField); - } - - public CustomQueryScorer(Query query, IndexReader reader, String field) { - super(query, reader, field); - } - - public CustomQueryScorer(Query query, String field, String defaultField) { - super(query, field, defaultField); - } - public CustomQueryScorer(Query query, String field) { super(query, field); } @@ -42,10 +29,6 @@ public CustomQueryScorer(Query query) { super(query); } - public CustomQueryScorer(WeightedSpanTerm[] weightedTerms) { - super(weightedTerms); - } - @Override protected WeightedSpanTermExtractor newTermExtractor(String defaultField) { return defaultField == null ? new CustomWeightedSpanTermExtractor() : new CustomWeightedSpanTermExtractor(defaultField); @@ -69,7 +52,6 @@ protected void extractUnknownQuery(Query query, Map te protected void extract(Query query, float boost, Map terms) throws IOException { if (isChildOrParentQuery(query.getClass())) { // skip has_child or has_parent queries, see: https://github.com/elastic/elasticsearch/issues/14999 - return; } else if (query instanceof FunctionScoreQuery) { super.extract(((FunctionScoreQuery) query).getSubQuery(), boost, terms); } else if (query instanceof ESToParentBlockJoinQuery) { diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/DefaultHighlighter.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/DefaultHighlighter.java index d90aba24a94df..e77436ba61423 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/DefaultHighlighter.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/DefaultHighlighter.java @@ -32,7 +32,6 @@ import org.elasticsearch.lucene.search.uhighlight.Snippet; import org.elasticsearch.search.fetch.FetchContext; import org.elasticsearch.search.fetch.FetchSubPhase; -import org.elasticsearch.search.fetch.FetchSubPhase.HitContext; import java.io.IOException; import java.text.BreakIterator; @@ -120,7 +119,7 @@ CustomUnifiedHighlighter buildHighlighter(FieldHighlightContext fieldContext) { fieldContext.context.getSearchExecutionContext().getIndexAnalyzer(f -> Lucene.KEYWORD_ANALYZER), queryMaxAnalyzedOffset ); - PassageFormatter passageFormatter = getPassageFormatter(fieldContext.hitContext, fieldContext.field, encoder); + PassageFormatter passageFormatter = getPassageFormatter(fieldContext.field, encoder); IndexSearcher searcher = fieldContext.context.searcher(); OffsetSource offsetSource = getOffsetSource(fieldContext.context, fieldContext.fieldType); BreakIterator breakIterator; @@ -161,7 +160,7 @@ CustomUnifiedHighlighter buildHighlighter(FieldHighlightContext fieldContext) { ); } - protected PassageFormatter getPassageFormatter(HitContext hitContext, SearchHighlightContext.Field field, Encoder encoder) { + protected PassageFormatter getPassageFormatter(SearchHighlightContext.Field field, Encoder encoder) { return new CustomPassageFormatter(field.fieldOptions().preTags()[0], field.fieldOptions().postTags()[0], encoder); } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FastVectorHighlighter.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FastVectorHighlighter.java index 75a1777ae7d8f..8417c9d747981 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FastVectorHighlighter.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FastVectorHighlighter.java @@ -312,6 +312,6 @@ private static class FieldHighlightEntry { private static class HighlighterEntry { public org.apache.lucene.search.vectorhighlight.FastVectorHighlighter fvh; - public Map fields = new HashMap<>(); + public final Map fields = new HashMap<>(); } } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FragmentBuilderHelper.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FragmentBuilderHelper.java index 9c761936863d6..5421cd59a23e4 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FragmentBuilderHelper.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FragmentBuilderHelper.java @@ -8,15 +8,11 @@ package org.elasticsearch.search.fetch.subphase.highlight; -import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.search.vectorhighlight.FastVectorHighlighter; import org.apache.lucene.search.vectorhighlight.FieldFragList.WeightedFragInfo; import org.apache.lucene.search.vectorhighlight.FieldFragList.WeightedFragInfo.SubInfo; import org.apache.lucene.search.vectorhighlight.FragmentsBuilder; import org.apache.lucene.util.CollectionUtil; -import org.elasticsearch.index.analysis.AnalyzerComponentsProvider; -import org.elasticsearch.index.analysis.NamedAnalyzer; -import org.elasticsearch.index.analysis.TokenFilterFactory; import java.util.List; @@ -45,7 +41,7 @@ public static WeightedFragInfo fixWeightedFragInfo(WeightedFragInfo fragInfo) { CollectionUtil.introSort(subInfos, (o1, o2) -> { int startOffset = o1.getTermsOffsets().get(0).getStartOffset(); int startOffset2 = o2.getTermsOffsets().get(0).getStartOffset(); - return compare(startOffset, startOffset2); + return Integer.compare(startOffset, startOffset2); }); return new WeightedFragInfo( Math.min(fragInfo.getSubInfos().get(0).getTermsOffsets().get(0).getStartOffset(), fragInfo.getStartOffset()), @@ -58,23 +54,4 @@ public static WeightedFragInfo fixWeightedFragInfo(WeightedFragInfo fragInfo) { } } - private static int compare(int x, int y) { - return (x < y) ? -1 : ((x == y) ? 0 : 1); - } - - private static boolean containsBrokenAnalysis(Analyzer analyzer) { - // TODO maybe we need a getter on Namedanalyzer that tells if this uses broken Analysis - if (analyzer instanceof NamedAnalyzer) { - analyzer = ((NamedAnalyzer) analyzer).analyzer(); - } - if (analyzer instanceof AnalyzerComponentsProvider) { - final TokenFilterFactory[] tokenFilters = ((AnalyzerComponentsProvider) analyzer).getComponents().getTokenFilters(); - for (TokenFilterFactory tokenFilterFactory : tokenFilters) { - if (tokenFilterFactory.breaksFastVectorHighlighter()) { - return true; - } - } - } - return false; - } } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java index 7d371ac372774..0042b1eafba71 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java @@ -45,8 +45,6 @@ * @see org.elasticsearch.search.builder.SearchSourceBuilder#highlight() */ public final class HighlightBuilder extends AbstractHighlighterBuilder { - /** default for whether to highlight fields based on the source even if stored separately */ - public static final boolean DEFAULT_FORCE_SOURCE = false; /** default for whether a field should be highlighted only if a query matches that field */ public static final boolean DEFAULT_REQUIRE_FIELD_MATCH = true; /** default for whether to stop highlighting at the defined max_analyzed_offset to avoid exceptions for longer texts */ @@ -149,17 +147,6 @@ public HighlightBuilder field(String name) { return field(new Field(name)); } - /** - * Adds a field to be highlighted with a provided fragment size (in characters), and - * default number of fragments of 5. - * - * @param name The field to highlight - * @param fragmentSize The size of a fragment in characters - */ - public HighlightBuilder field(String name, int fragmentSize) { - return field(new Field(name).fragmentSize(fragmentSize)); - } - /** * Adds a field to be highlighted with a provided fragment size (in characters), and * a provided (maximum) number of fragments. diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightField.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightField.java index d4b5234f4e0b2..6bc9f65ac655f 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightField.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightField.java @@ -30,23 +30,12 @@ */ public class HighlightField implements ToXContentFragment, Writeable { - private String name; + private final String name; - private Text[] fragments; + private final Text[] fragments; public HighlightField(StreamInput in) throws IOException { - name = in.readString(); - if (in.readBoolean()) { - int size = in.readVInt(); - if (size == 0) { - fragments = Text.EMPTY_ARRAY; - } else { - fragments = new Text[size]; - for (int i = 0; i < size; i++) { - fragments[i] = in.readText(); - } - } - } + this(in.readString(), in.readOptionalArray(StreamInput::readText, Text[]::new)); } public HighlightField(String name, Text[] fragments) { @@ -61,13 +50,6 @@ public String name() { return name; } - /** - * The name of the field highlighted. - */ - public String getName() { - return name(); - } - /** * The highlighted fragments. {@code null} if failed to highlight (for example, the field is not stored). */ @@ -75,13 +57,6 @@ public Text[] fragments() { return fragments; } - /** - * The highlighted fragments. {@code null} if failed to highlight (for example, the field is not stored). - */ - public Text[] getFragments() { - return fragments(); - } - @Override public String toString() { return "[" + name + "], fragments[" + Arrays.toString(fragments) + "]"; @@ -101,14 +76,14 @@ public void writeTo(StreamOutput out) throws IOException { public static HighlightField fromXContent(XContentParser parser) throws IOException { ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser); String fieldName = parser.currentName(); - Text[] fragments = null; + Text[] fragments; XContentParser.Token token = parser.nextToken(); if (token == XContentParser.Token.START_ARRAY) { List values = new ArrayList<>(); while (parser.nextToken() != XContentParser.Token.END_ARRAY) { values.add(new Text(parser.text())); } - fragments = values.toArray(new Text[values.size()]); + fragments = values.toArray(Text.EMPTY_ARRAY); } else if (token == XContentParser.Token.VALUE_NULL) { fragments = null; } else { diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/SourceScoreOrderFragmentsBuilder.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/SourceScoreOrderFragmentsBuilder.java index f1bb3f2c773ac..79c7198564be5 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/SourceScoreOrderFragmentsBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/SourceScoreOrderFragmentsBuilder.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.fetch.subphase.highlight; import org.apache.lucene.document.Field; -import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.highlight.Encoder; import org.apache.lucene.search.vectorhighlight.BoundaryScanner; @@ -20,8 +19,6 @@ import org.elasticsearch.search.lookup.Source; import java.io.IOException; -import java.util.ArrayList; -import java.util.List; public class SourceScoreOrderFragmentsBuilder extends ScoreOrderFragmentsBuilder { @@ -51,19 +48,7 @@ public SourceScoreOrderFragmentsBuilder( @Override protected Field[] getFields(IndexReader reader, int docId, String fieldName) throws IOException { // we know its low level reader, and matching docId, since that's how we call the highlighter with - List values = valueFetcher.fetchValues(source, docId, new ArrayList<>()); - if (values.size() > 1 && fetchContext.sourceLoader().reordersFieldValues()) { - throw new IllegalArgumentException( - "The fast vector highlighter doesn't support loading multi-valued fields from _source in index [" - + fetchContext.getIndexName() - + "] because _source can reorder field values" - ); - } - Field[] fields = new Field[values.size()]; - for (int i = 0; i < values.size(); i++) { - fields[i] = new Field(fieldType.name(), values.get(i).toString(), TextField.TYPE_NOT_STORED); - } - return fields; + return SourceSimpleFragmentsBuilder.doGetFields(docId, valueFetcher, source, fetchContext, fieldType); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/SourceSimpleFragmentsBuilder.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/SourceSimpleFragmentsBuilder.java index 0a7a5d300339b..c6b69717b8f75 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/SourceSimpleFragmentsBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/SourceSimpleFragmentsBuilder.java @@ -46,6 +46,11 @@ public SourceSimpleFragmentsBuilder( @Override protected Field[] getFields(IndexReader reader, int docId, String fieldName) throws IOException { // we know its low level reader, and matching docId, since that's how we call the highlighter with + return doGetFields(docId, valueFetcher, source, fetchContext, fieldType); + } + + static Field[] doGetFields(int docId, ValueFetcher valueFetcher, Source source, FetchContext fetchContext, MappedFieldType fieldType) + throws IOException { List values = valueFetcher.fetchValues(source, docId, new ArrayList<>()); if (values.isEmpty()) { return EMPTY_FIELDS; @@ -63,5 +68,4 @@ protected Field[] getFields(IndexReader reader, int docId, String fieldName) thr } return fields; } - } diff --git a/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java b/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java index 3c69db98c7588..b7c77e4968854 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java @@ -265,7 +265,7 @@ private static LeafSlice[] computeSlices(List leaves, int min List sortedLeaves = new ArrayList<>(leaves); // Sort by maxDoc, descending: final Comparator leafComparator = Comparator.comparingInt(l -> l.reader().maxDoc()); - Collections.sort(sortedLeaves, leafComparator.reversed()); + sortedLeaves.sort(leafComparator.reversed()); // we add the groups on a priority queue, so we can add orphan leafs to the smallest group final Comparator> groupComparator = Comparator.comparingInt( l -> l.stream().mapToInt(lr -> lr.reader().maxDoc()).sum() diff --git a/server/src/main/java/org/elasticsearch/search/internal/ExitableDirectoryReader.java b/server/src/main/java/org/elasticsearch/search/internal/ExitableDirectoryReader.java index 794e429bbc473..ecb7833558a6b 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ExitableDirectoryReader.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ExitableDirectoryReader.java @@ -142,28 +142,7 @@ public void searchNearestVectors(String field, byte[] target, KnnCollector colle in.searchNearestVectors(field, target, collector, acceptDocs); return; } - // when acceptDocs is null due to no doc deleted, we will instantiate a new one that would - // match all docs to allow timeout checking. - final Bits updatedAcceptDocs = acceptDocs == null ? new Bits.MatchAllBits(maxDoc()) : acceptDocs; - Bits timeoutCheckingAcceptDocs = new Bits() { - private static final int MAX_CALLS_BEFORE_QUERY_TIMEOUT_CHECK = 10; - private int calls; - - @Override - public boolean get(int index) { - if (calls++ % MAX_CALLS_BEFORE_QUERY_TIMEOUT_CHECK == 0) { - queryCancellation.checkCancelled(); - } - - return updatedAcceptDocs.get(index); - } - - @Override - public int length() { - return updatedAcceptDocs.length(); - } - }; - in.searchNearestVectors(field, target, collector, timeoutCheckingAcceptDocs); + in.searchNearestVectors(field, target, collector, new TimeOutCheckingBits(acceptDocs)); } @Override @@ -181,29 +160,32 @@ public void searchNearestVectors(String field, float[] target, KnnCollector coll in.searchNearestVectors(field, target, collector, acceptDocs); return; } - // when acceptDocs is null due to no doc deleted, we will instantiate a new one that would - // match all docs to allow timeout checking. - final Bits updatedAcceptDocs = acceptDocs == null ? new Bits.MatchAllBits(maxDoc()) : acceptDocs; - Bits timeoutCheckingAcceptDocs = new Bits() { - private static final int MAX_CALLS_BEFORE_QUERY_TIMEOUT_CHECK = 10; - private int calls; - - @Override - public boolean get(int index) { - if (calls++ % MAX_CALLS_BEFORE_QUERY_TIMEOUT_CHECK == 0) { - queryCancellation.checkCancelled(); - } - - return updatedAcceptDocs.get(index); - } + in.searchNearestVectors(field, target, collector, new TimeOutCheckingBits(acceptDocs)); + } + + private class TimeOutCheckingBits implements Bits { + private static final int MAX_CALLS_BEFORE_QUERY_TIMEOUT_CHECK = 10; + private final Bits updatedAcceptDocs; + private int calls; - @Override - public int length() { - return updatedAcceptDocs.length(); + TimeOutCheckingBits(Bits acceptDocs) { + // when acceptDocs is null due to no doc deleted, we will instantiate a new one that would + // match all docs to allow timeout checking. + this.updatedAcceptDocs = acceptDocs == null ? new Bits.MatchAllBits(maxDoc()) : acceptDocs; + } + + @Override + public boolean get(int index) { + if (calls++ % MAX_CALLS_BEFORE_QUERY_TIMEOUT_CHECK == 0) { + queryCancellation.checkCancelled(); } - }; + return updatedAcceptDocs.get(index); + } - in.searchNearestVectors(field, target, collector, acceptDocs); + @Override + public int length() { + return updatedAcceptDocs.length(); + } } } diff --git a/server/src/main/java/org/elasticsearch/search/internal/FieldUsageTrackingDirectoryReader.java b/server/src/main/java/org/elasticsearch/search/internal/FieldUsageTrackingDirectoryReader.java index 5dc0374b73fc6..07fa169642dbf 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/FieldUsageTrackingDirectoryReader.java +++ b/server/src/main/java/org/elasticsearch/search/internal/FieldUsageTrackingDirectoryReader.java @@ -251,8 +251,7 @@ public void searchNearestVectors(String field, float[] target, KnnCollector coll @Override public String toString() { - final StringBuilder sb = new StringBuilder("FieldUsageTrackingLeafReader(reader="); - return sb.append(in).append(')').toString(); + return "FieldUsageTrackingLeafReader(reader=" + in + ')'; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java b/server/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java index 67a265127026d..8bd91c9b9cfe7 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java @@ -174,11 +174,6 @@ public boolean sourceRequested() { return in.sourceRequested(); } - @Override - public boolean hasFetchSourceContext() { - return in.hasFetchSourceContext(); - } - @Override public FetchSourceContext fetchSourceContext() { return in.fetchSourceContext(); diff --git a/server/src/main/java/org/elasticsearch/search/internal/InternalScrollSearchRequest.java b/server/src/main/java/org/elasticsearch/search/internal/InternalScrollSearchRequest.java index b8886c3e79a8c..911b647067e63 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/InternalScrollSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/search/internal/InternalScrollSearchRequest.java @@ -54,11 +54,6 @@ public Scroll scroll() { return scroll; } - public InternalScrollSearchRequest scroll(Scroll scroll) { - this.scroll = scroll; - return this; - } - @Override public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { return new SearchShardTask(id, type, action, getDescription(), parentTaskId, headers); diff --git a/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java b/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java index 274dc233ff5c7..ef67d3d19e42f 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java @@ -182,8 +182,6 @@ public final void assignRescoreDocIds(RescoreDocIds rescoreDocIds) { */ public abstract boolean sourceRequested(); - public abstract boolean hasFetchSourceContext(); - public abstract FetchSourceContext fetchSourceContext(); public abstract SearchContext fetchSourceContext(FetchSourceContext fetchSourceContext); diff --git a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java index fbfcfdf9500ed..fe9cfdc87695e 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java @@ -240,7 +240,7 @@ public ShardSearchRequest( this.originalIndices = originalIndices; this.readerId = readerId; this.keepAlive = keepAlive; - assert keepAlive == null || readerId != null : "readerId: " + readerId + " keepAlive: " + keepAlive; + assert keepAlive == null || readerId != null : "readerId: null keepAlive: " + keepAlive; this.channelVersion = TransportVersion.current(); this.waitForCheckpoint = waitForCheckpoint; this.waitForCheckpointsTimeout = waitForCheckpointsTimeout; @@ -334,7 +334,7 @@ public ShardSearchRequest(StreamInput in) throws IOException { readerId = null; keepAlive = null; } - assert keepAlive == null || readerId != null : "readerId: " + readerId + " keepAlive: " + keepAlive; + assert keepAlive == null || readerId != null : "readerId: null keepAlive: " + keepAlive; channelVersion = TransportVersion.min(TransportVersion.readVersion(in), in.getTransportVersion()); if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0)) { waitForCheckpoint = in.readLong(); diff --git a/server/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java b/server/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java index f1fd984aec5ba..8b4824e42cbf4 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java @@ -107,11 +107,6 @@ public boolean sourceRequested() { return fetchSourceContext != null && fetchSourceContext.fetchSource(); } - @Override - public boolean hasFetchSourceContext() { - return fetchSourceContext != null; - } - @Override public FetchSourceContext fetchSourceContext() { return fetchSourceContext; diff --git a/server/src/main/java/org/elasticsearch/search/lookup/LeafDocLookup.java b/server/src/main/java/org/elasticsearch/search/lookup/LeafDocLookup.java index bd6971dceb7be..988ea24d0fcc2 100644 --- a/server/src/main/java/org/elasticsearch/search/lookup/LeafDocLookup.java +++ b/server/src/main/java/org/elasticsearch/search/lookup/LeafDocLookup.java @@ -102,30 +102,27 @@ private FieldFactoryWrapper getFactoryForField(String fieldName) { // Load the field data on behalf of the script. Otherwise, it would require // additional permissions to deal with pagedbytes/ramusagestimator/etc. - return AccessController.doPrivileged(new PrivilegedAction() { - @Override - public FieldFactoryWrapper run() { - FieldFactoryWrapper fieldFactory = null; - IndexFieldData indexFieldData = fieldDataLookup.apply(fieldType, SCRIPT); + return AccessController.doPrivileged((PrivilegedAction) () -> { + IndexFieldData indexFieldData = fieldDataLookup.apply(fieldType, SCRIPT); - FieldFactoryWrapper docFactory = null; + FieldFactoryWrapper docFactory = null; - if (docFactoryCache.isEmpty() == false) { - docFactory = docFactoryCache.get(fieldName); - } + if (docFactoryCache.isEmpty() == false) { + docFactory = docFactoryCache.get(fieldName); + } - // if this field has already been accessed via the doc-access API and the field-access API - // uses doc values then we share to avoid double-loading - if (docFactory != null && indexFieldData instanceof SourceValueFetcherIndexFieldData == false) { - fieldFactory = docFactory; - } else { - fieldFactory = new FieldFactoryWrapper(indexFieldData.load(reader).getScriptFieldFactory(fieldName)); - } + // if this field has already been accessed via the doc-access API and the field-access API + // uses doc values then we share to avoid double-loading + FieldFactoryWrapper fieldFactory; + if (docFactory != null && indexFieldData instanceof SourceValueFetcherIndexFieldData == false) { + fieldFactory = docFactory; + } else { + fieldFactory = new FieldFactoryWrapper(indexFieldData.load(reader).getScriptFieldFactory(fieldName)); + } - fieldFactoryCache.put(fieldName, fieldFactory); + fieldFactoryCache.put(fieldName, fieldFactory); - return fieldFactory; - } + return fieldFactory; }); } @@ -150,35 +147,32 @@ private FieldFactoryWrapper getFactoryForDoc(String fieldName) { // Load the field data on behalf of the script. Otherwise, it would require // additional permissions to deal with pagedbytes/ramusagestimator/etc. - return AccessController.doPrivileged(new PrivilegedAction() { - @Override - public FieldFactoryWrapper run() { - FieldFactoryWrapper docFactory = null; - FieldFactoryWrapper fieldFactory = null; - - if (fieldFactoryCache.isEmpty() == false) { - fieldFactory = fieldFactoryCache.get(fieldName); - } + return AccessController.doPrivileged((PrivilegedAction) () -> { + FieldFactoryWrapper docFactory = null; + FieldFactoryWrapper fieldFactory = null; - if (fieldFactory != null) { - IndexFieldData fieldIndexFieldData = fieldDataLookup.apply(fieldType, SCRIPT); + if (fieldFactoryCache.isEmpty() == false) { + fieldFactory = fieldFactoryCache.get(fieldName); + } - // if this field has already been accessed via the field-access API and the field-access API - // uses doc values then we share to avoid double-loading - if (fieldIndexFieldData instanceof SourceValueFetcherIndexFieldData == false) { - docFactory = fieldFactory; - } - } + if (fieldFactory != null) { + IndexFieldData fieldIndexFieldData = fieldDataLookup.apply(fieldType, SCRIPT); - if (docFactory == null) { - IndexFieldData indexFieldData = fieldDataLookup.apply(fieldType, SEARCH); - docFactory = new FieldFactoryWrapper(indexFieldData.load(reader).getScriptFieldFactory(fieldName)); + // if this field has already been accessed via the field-access API and the field-access API + // uses doc values then we share to avoid double-loading + if (fieldIndexFieldData instanceof SourceValueFetcherIndexFieldData == false) { + docFactory = fieldFactory; } + } - docFactoryCache.put(fieldName, docFactory); - - return docFactory; + if (docFactory == null) { + IndexFieldData indexFieldData = fieldDataLookup.apply(fieldType, SEARCH); + docFactory = new FieldFactoryWrapper(indexFieldData.load(reader).getScriptFieldFactory(fieldName)); } + + docFactoryCache.put(fieldName, docFactory); + + return docFactory; }); } diff --git a/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java b/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java index 3044d15ab8552..01015ec8cc78e 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java +++ b/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java @@ -94,13 +94,14 @@ static void executeRank(SearchContext searchContext) throws QueryPhaseExecutionE if (searchTimedOut) { break; } - RankSearchContext rankSearchContext = new RankSearchContext(searchContext, rankQuery, rankShardContext.windowSize()); - QueryPhase.addCollectorsAndSearch(rankSearchContext); - QuerySearchResult rrfQuerySearchResult = rankSearchContext.queryResult(); - rrfRankResults.add(rrfQuerySearchResult.topDocs().topDocs); - serviceTimeEWMA += rrfQuerySearchResult.serviceTimeEWMA(); - nodeQueueSize = Math.max(nodeQueueSize, rrfQuerySearchResult.nodeQueueSize()); - searchTimedOut = rrfQuerySearchResult.searchTimedOut(); + try (RankSearchContext rankSearchContext = new RankSearchContext(searchContext, rankQuery, rankShardContext.windowSize())) { + QueryPhase.addCollectorsAndSearch(rankSearchContext); + QuerySearchResult rrfQuerySearchResult = rankSearchContext.queryResult(); + rrfRankResults.add(rrfQuerySearchResult.topDocs().topDocs); + serviceTimeEWMA += rrfQuerySearchResult.serviceTimeEWMA(); + nodeQueueSize = Math.max(nodeQueueSize, rrfQuerySearchResult.nodeQueueSize()); + searchTimedOut = rrfQuerySearchResult.searchTimedOut(); + } } querySearchResult.setRankShardResult(rankShardContext.combine(rrfRankResults)); diff --git a/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java b/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java index edebf602af188..301d7fb219ca7 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java +++ b/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java @@ -31,6 +31,7 @@ import org.elasticsearch.search.profile.SearchProfileQueryPhaseResult; import org.elasticsearch.search.rank.RankShardResult; import org.elasticsearch.search.suggest.Suggest; +import org.elasticsearch.transport.LeakTracker; import java.io.IOException; import java.util.ArrayList; @@ -104,8 +105,8 @@ public QuerySearchResult(ShardSearchContextId contextId, SearchShardTarget shard setSearchShardTarget(shardTarget); isNull = false; setShardSearchRequest(shardSearchRequest); - this.refCounted = AbstractRefCounted.of(this::close); this.toRelease = new ArrayList<>(); + this.refCounted = LeakTracker.wrap(AbstractRefCounted.of(() -> Releasables.close(toRelease))); } private QuerySearchResult(boolean isNull) { @@ -245,10 +246,6 @@ public void releaseAggs() { } } - private void close() { - Releasables.close(toRelease); - } - public void addReleasable(Releasable releasable) { toRelease.add(releasable); } diff --git a/server/src/main/java/org/elasticsearch/search/rank/RankSearchContext.java b/server/src/main/java/org/elasticsearch/search/rank/RankSearchContext.java index 4b075523c5286..ed6fcd16fb5e2 100644 --- a/server/src/main/java/org/elasticsearch/search/rank/RankSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/rank/RankSearchContext.java @@ -64,6 +64,7 @@ public RankSearchContext(SearchContext parent, Query rankQuery, int windowSize) this.rankQuery = parent.buildFilteredQuery(rankQuery); this.windowSize = windowSize; this.querySearchResult = new QuerySearchResult(parent.readerContext().id(), parent.shardTarget(), parent.request()); + this.addReleasable(querySearchResult::decRef); } @Override @@ -320,11 +321,6 @@ public boolean sourceRequested() { throw new UnsupportedOperationException(); } - @Override - public boolean hasFetchSourceContext() { - throw new UnsupportedOperationException(); - } - @Override public FetchSourceContext fetchSourceContext() { throw new UnsupportedOperationException(); diff --git a/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java b/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java index 389e3a56cf152..c873717fe55e7 100644 --- a/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java +++ b/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java @@ -197,9 +197,6 @@ public void setScoreMode(QueryRescoreMode scoreMode) { this.scoreMode = scoreMode; } - public void setScoreMode(String scoreMode) { - setScoreMode(QueryRescoreMode.fromString(scoreMode)); - } } } diff --git a/server/src/main/java/org/elasticsearch/search/runtime/GeoPointScriptFieldDistanceFeatureQuery.java b/server/src/main/java/org/elasticsearch/search/runtime/GeoPointScriptFieldDistanceFeatureQuery.java index a7977c18d338c..de081fd386d54 100644 --- a/server/src/main/java/org/elasticsearch/search/runtime/GeoPointScriptFieldDistanceFeatureQuery.java +++ b/server/src/main/java/org/elasticsearch/search/runtime/GeoPointScriptFieldDistanceFeatureQuery.java @@ -114,7 +114,6 @@ private class DistanceScorer extends Scorer { private final TwoPhaseIterator twoPhase; private final DocIdSetIterator disi; private final float weight; - private double maxDistance = GeoUtils.EARTH_MEAN_RADIUS_METERS * Math.PI; protected DistanceScorer(Weight weight, AbstractLongFieldScript script, int maxDoc, float boost) { super(weight); diff --git a/server/src/main/java/org/elasticsearch/search/sort/BucketedSort.java b/server/src/main/java/org/elasticsearch/search/sort/BucketedSort.java index ae2f7fc4ecbbb..b1b30856324b4 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/BucketedSort.java +++ b/server/src/main/java/org/elasticsearch/search/sort/BucketedSort.java @@ -25,7 +25,6 @@ import java.util.ArrayList; import java.util.Comparator; import java.util.List; -import java.util.Locale; import static java.util.Collections.emptyList; @@ -110,7 +109,7 @@ interface Loader { public void swap(long lhs, long rhs) {} @Override - public Loader loader(LeafReaderContext ctx) throws IOException { + public Loader loader(LeafReaderContext ctx) { return (index, doc) -> {}; } }; @@ -254,24 +253,6 @@ public boolean inHeapMode(long bucket) { */ protected abstract void swap(long lhs, long rhs); - /** - * Return a fairly human readable representation of the array backing the sort. - *

    - * This is intentionally not a {@link #toString()} implementation because it'll - * be quite slow. - *

    - */ - protected final String debugFormat() { - StringBuilder b = new StringBuilder(); - for (long index = 0; index < values().size(); index++) { - if (index % bucketSize == 0) { - b.append('\n').append(String.format(Locale.ROOT, "%20d", index / bucketSize)).append(": "); - } - b.append(String.format(Locale.ROOT, "%20s", getValue(index))).append(' '); - } - return b.toString(); - } - /** * Initialize the gather offsets after setting up values. Subclasses * should call this once, after setting up their {@link #values()}. @@ -415,7 +396,6 @@ public final void collect(int doc, long bucket) throws IOException { } else { setNextGatherOffset(rootIndex, next - 1); } - return; } /** diff --git a/server/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java index 5d11563b5d8ed..0c9b56b1855d7 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java @@ -100,8 +100,12 @@ public SortFieldAndFormat build(SearchExecutionContext context) { } @Override - public BucketedSort buildBucketedSort(SearchExecutionContext context, BigArrays bigArrays, int bucketSize, BucketedSort.ExtraData extra) - throws IOException { + public BucketedSort buildBucketedSort( + SearchExecutionContext context, + BigArrays bigArrays, + int bucketSize, + BucketedSort.ExtraData extra + ) { return new BucketedSort.ForFloats(bigArrays, order, DocValueFormat.RAW, bucketSize, extra) { @Override public boolean needsScores() { @@ -109,7 +113,7 @@ public boolean needsScores() { } @Override - public Leaf forLeaf(LeafReaderContext ctx) throws IOException { + public Leaf forLeaf(LeafReaderContext ctx) { return new BucketedSort.ForFloats.Leaf(ctx) { private Scorable scorer; private float score; @@ -165,7 +169,7 @@ public TransportVersion getMinimalSupportedVersion() { } @Override - public ScoreSortBuilder rewrite(QueryRewriteContext ctx) throws IOException { + public ScoreSortBuilder rewrite(QueryRewriteContext ctx) { return this; } diff --git a/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java index 4ac7348a6c4a4..a0745d0f9c64a 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java @@ -295,7 +295,7 @@ protected SortedBinaryDocValues getValues(LeafReaderContext context) throws IOEx final BytesRefBuilder spare = new BytesRefBuilder(); @Override - public boolean advanceExact(int doc) throws IOException { + public boolean advanceExact(int doc) { leafScript.setDocument(doc); return true; } @@ -343,7 +343,7 @@ protected SortedNumericDoubleValues getValues(LeafReaderContext context) throws leafScript = numberSortScript.newInstance(new DocValuesDocReader(searchLookup, context)); final NumericDoubleValues values = new NumericDoubleValues() { @Override - public boolean advanceExact(int doc) throws IOException { + public boolean advanceExact(int doc) { leafScript.setDocument(doc); return true; } @@ -374,7 +374,7 @@ protected SortedBinaryDocValues getValues(LeafReaderContext context) throws IOEx final BinaryDocValues values = new AbstractBinaryDocValues() { @Override - public boolean advanceExact(int doc) throws IOException { + public boolean advanceExact(int doc) { leafScript.setDocument(doc); return true; } diff --git a/server/src/main/java/org/elasticsearch/search/sort/SortBuilders.java b/server/src/main/java/org/elasticsearch/search/sort/SortBuilders.java index 34363a614a7e4..c0bcbdc98e35f 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/SortBuilders.java +++ b/server/src/main/java/org/elasticsearch/search/sort/SortBuilders.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.sort; -import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.script.Script; import org.elasticsearch.search.builder.PointInTimeBuilder; import org.elasticsearch.search.sort.ScriptSortBuilder.ScriptSortType; @@ -65,16 +64,6 @@ public static GeoDistanceSortBuilder geoDistanceSort(String fieldName, double la return new GeoDistanceSortBuilder(fieldName, lat, lon); } - /** - * Constructs a new distance based sort on a geo point like field. - * - * @param fieldName The geo point like field name. - * @param points The points to create the range distance facets from. - */ - public static GeoDistanceSortBuilder geoDistanceSort(String fieldName, GeoPoint... points) { - return new GeoDistanceSortBuilder(fieldName, points); - } - /** * Constructs a new distance based sort on a geo point like field. * diff --git a/server/src/main/java/org/elasticsearch/search/sort/SortValue.java b/server/src/main/java/org/elasticsearch/search/sort/SortValue.java index 067439931a85b..ab7dcd6615f79 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/SortValue.java +++ b/server/src/main/java/org/elasticsearch/search/sort/SortValue.java @@ -369,7 +369,6 @@ private static class EmptySortValue extends SortValue { public static final String NAME = "empty"; private static final String EMPTY_STRING = ""; - private int sortValue = 0; private EmptySortValue() {} @@ -381,7 +380,7 @@ public String getWriteableName() { } @Override - public void writeTo(StreamOutput out) throws IOException {} + public void writeTo(StreamOutput out) {} @Override public Object getKey() { @@ -394,7 +393,7 @@ public String format(DocValueFormat format) { } @Override - protected XContentBuilder rawToXContent(XContentBuilder builder) throws IOException { + protected XContentBuilder rawToXContent(XContentBuilder builder) { return builder; } @@ -420,7 +419,7 @@ public String toString() { @Override public int typeComparisonKey() { - return sortValue; + return 0; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/suggest/DirectSpellcheckerSettings.java b/server/src/main/java/org/elasticsearch/search/suggest/DirectSpellcheckerSettings.java index 80beb5d2ec7ca..0956a9f94677c 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/DirectSpellcheckerSettings.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/DirectSpellcheckerSettings.java @@ -20,17 +20,17 @@ public class DirectSpellcheckerSettings { // NB: If this changes, make sure to change the default in TermBuilderSuggester - public static SuggestMode DEFAULT_SUGGEST_MODE = SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX; - public static float DEFAULT_ACCURACY = 0.5f; - public static SortBy DEFAULT_SORT = SortBy.SCORE; + public static final SuggestMode DEFAULT_SUGGEST_MODE = SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX; + public static final float DEFAULT_ACCURACY = 0.5f; + public static final SortBy DEFAULT_SORT = SortBy.SCORE; // NB: If this changes, make sure to change the default in TermBuilderSuggester - public static StringDistance DEFAULT_STRING_DISTANCE = DirectSpellChecker.INTERNAL_LEVENSHTEIN; - public static int DEFAULT_MAX_EDITS = LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE; - public static int DEFAULT_MAX_INSPECTIONS = 5; - public static float DEFAULT_MAX_TERM_FREQ = 0.01f; - public static int DEFAULT_PREFIX_LENGTH = 1; - public static int DEFAULT_MIN_WORD_LENGTH = 4; - public static float DEFAULT_MIN_DOC_FREQ = 0f; + public static final StringDistance DEFAULT_STRING_DISTANCE = DirectSpellChecker.INTERNAL_LEVENSHTEIN; + public static final int DEFAULT_MAX_EDITS = LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE; + public static final int DEFAULT_MAX_INSPECTIONS = 5; + public static final float DEFAULT_MAX_TERM_FREQ = 0.01f; + public static final int DEFAULT_PREFIX_LENGTH = 1; + public static final int DEFAULT_MIN_WORD_LENGTH = 4; + public static final float DEFAULT_MIN_DOC_FREQ = 0f; private SuggestMode suggestMode = DEFAULT_SUGGEST_MODE; private float accuracy = DEFAULT_ACCURACY; diff --git a/server/src/main/java/org/elasticsearch/search/suggest/Suggest.java b/server/src/main/java/org/elasticsearch/search/suggest/Suggest.java index f126091c785d8..f3371caf4c1a7 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/Suggest.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/Suggest.java @@ -206,7 +206,6 @@ public int hashCode() { @SuppressWarnings("rawtypes") public abstract static class Suggestion implements Iterable, NamedWriteable, ToXContentFragment { - public static final int TYPE = 0; protected final String name; protected final int size; protected final List entries = new ArrayList<>(5); @@ -635,10 +634,6 @@ public boolean collateMatch() { return (collateMatch != null) ? collateMatch : true; } - protected void setScore(float score) { - this.score = score; - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeText(text); diff --git a/server/src/main/java/org/elasticsearch/search/suggest/SuggestionSearchContext.java b/server/src/main/java/org/elasticsearch/search/suggest/SuggestionSearchContext.java index 674f936890283..37cc7bb59c253 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/SuggestionSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/SuggestionSearchContext.java @@ -35,8 +35,8 @@ public abstract static class SuggestionContext { private Analyzer analyzer; private int size = 5; private int shardSize = -1; - private SearchExecutionContext searchExecutionContext; - private Suggester suggester; + private final SearchExecutionContext searchExecutionContext; + private final Suggester suggester; protected SuggestionContext(Suggester suggester, SearchExecutionContext searchExecutionContext) { this.suggester = suggester; diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java index c71673962ca2d..e088948b18e03 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java @@ -56,7 +56,7 @@ */ public final class CompletionSuggestion extends Suggest.Suggestion { - private boolean skipDuplicates; + private final boolean skipDuplicates; /** * Creates a completion suggestion given its name, size and whether it should skip duplicates diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionBuilder.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionBuilder.java index 86e18b3e5a406..7a3bc3c67ba6d 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionBuilder.java @@ -211,13 +211,6 @@ private CompletionSuggestionBuilder contexts(XContentBuilder contextBuilder) { return this; } - /** - * Returns whether duplicate suggestions should be filtered out. - */ - public boolean skipDuplicates() { - return skipDuplicates; - } - /** * Should duplicates be filtered or not. Defaults to {@code false}. */ diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/FuzzyOptions.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/FuzzyOptions.java index f241b6f89633e..7d7d5516c50ae 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/FuzzyOptions.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/FuzzyOptions.java @@ -62,12 +62,12 @@ public static Builder builder() { return new Builder(); } - private int editDistance; - private boolean transpositions; - private int fuzzyMinLength; - private int fuzzyPrefixLength; - private boolean unicodeAware; - private int maxDeterminizedStates; + private final int editDistance; + private final boolean transpositions; + private final int fuzzyMinLength; + private final int fuzzyPrefixLength; + private final boolean unicodeAware; + private final int maxDeterminizedStates; private FuzzyOptions( int editDistance, diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/RegexOptions.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/RegexOptions.java index 0759b413dd664..fdfa1303b2d77 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/RegexOptions.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/RegexOptions.java @@ -61,8 +61,8 @@ static RegexOptions parse(XContentParser parser) throws IOException { return PARSER.parse(parser, null).build(); } - private int flagsValue; - private int maxDeterminizedStates; + private final int flagsValue; + private final int maxDeterminizedStates; private RegexOptions(int flagsValue, int maxDeterminizedStates) { this.flagsValue = flagsValue; diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/TopSuggestGroupDocsCollector.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/TopSuggestGroupDocsCollector.java index bac3b7491a661..31959df6b023e 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/TopSuggestGroupDocsCollector.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/TopSuggestGroupDocsCollector.java @@ -25,7 +25,7 @@ * the best one per document (sorted by weight) is kept. **/ class TopSuggestGroupDocsCollector extends TopSuggestDocsCollector { - private Map> docContexts = new HashMap<>(); + private final Map> docContexts = new HashMap<>(); /** * Sole constructor diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/CategoryQueryContext.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/CategoryQueryContext.java index ce0c58463bad2..65c464cac256d 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/CategoryQueryContext.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/CategoryQueryContext.java @@ -72,7 +72,7 @@ public boolean equals(Object o) { if (isPrefix != that.isPrefix) return false; if (boost != that.boost) return false; - return category != null ? category.equals(that.category) : that.category == null; + return Objects.equals(category, that.category); } diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextBuilder.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextBuilder.java index b180e6fd13335..2a83bf289bdef 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextBuilder.java @@ -13,7 +13,7 @@ */ public abstract class ContextBuilder> { - protected String name; + protected final String name; /** * @param name of the context mapper to build diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMapping.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMapping.java index c48a1ccb12e6f..d2edd460b926d 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMapping.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMapping.java @@ -165,41 +165,5 @@ public String toString() { } } - public static class InternalQueryContext { - public final String context; - public final int boost; - public final boolean isPrefix; - - public InternalQueryContext(String context, int boost, boolean isPrefix) { - this.context = context; - this.boost = boost; - this.isPrefix = isPrefix; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - InternalQueryContext that = (InternalQueryContext) o; - - if (boost != that.boost) return false; - if (isPrefix != that.isPrefix) return false; - return context != null ? context.equals(that.context) : that.context == null; - - } - - @Override - public int hashCode() { - int result = context != null ? context.hashCode() : 0; - result = 31 * result + boost; - result = 31 * result + (isPrefix ? 1 : 0); - return result; - } - - @Override - public String toString() { - return "QueryContext{" + "context='" + context + '\'' + ", boost=" + boost + ", isPrefix=" + isPrefix + '}'; - } - } + public record InternalQueryContext(String context, int boost, boolean isPrefix) {} } diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMappings.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMappings.java index 9a975fe930979..f7709d7aac911 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMappings.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMappings.java @@ -142,7 +142,7 @@ protected Iterable contexts() { if (typedContexts.isEmpty()) { throw new IllegalArgumentException("Contexts are mandatory in context enabled completion field [" + name + "]"); } - return new ArrayList(typedContexts); + return new ArrayList<>(typedContexts); } } @@ -166,8 +166,8 @@ public ContextQuery toContextQuery(CompletionQuery query, Map internalQueryContext = queryContexts.get(mapping.name()); if (internalQueryContext != null) { for (ContextMapping.InternalQueryContext context : internalQueryContext) { - scratch.append(context.context); - typedContextQuery.addContext(scratch.toCharsRef(), context.boost, context.isPrefix == false); + scratch.append(context.context()); + typedContextQuery.addContext(scratch.toCharsRef(), context.boost(), context.isPrefix() == false); scratch.setLength(1); hasContext = true; } @@ -193,12 +193,8 @@ public Map> getNamedContexts(List contexts) { int typeId = typedContext.charAt(0); assert typeId < contextMappings.size() : "Returned context has invalid type"; ContextMapping mapping = contextMappings.get(typeId); - Set contextEntries = contextMap.get(mapping.name()); - if (contextEntries == null) { - contextEntries = new HashSet<>(); - contextMap.put(mapping.name(), contextEntries); - } - contextEntries.add(typedContext.subSequence(1, typedContext.length()).toString()); + contextMap.computeIfAbsent(mapping.name(), k -> new HashSet<>()) + .add(typedContext.subSequence(1, typedContext.length()).toString()); } return contextMap; } @@ -273,7 +269,7 @@ public int hashCode() { @Override public boolean equals(Object obj) { - if (obj == null || (obj instanceof ContextMappings) == false) { + if ((obj instanceof ContextMappings) == false) { return false; } ContextMappings other = ((ContextMappings) obj); diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java index 933d2198a2dae..2cd7a751264bd 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java @@ -345,19 +345,6 @@ public Builder precision(String precision) { return precision(DistanceUnit.parse(precision, DistanceUnit.METERS, DistanceUnit.METERS)); } - /** - * Set the precision use o make suggestions - * - * @param precision - * precision value - * @param unit - * {@link DistanceUnit} to use - * @return this - */ - public Builder precision(double precision, DistanceUnit unit) { - return precision(unit.toMeters(precision)); - } - /** * Set the precision use o make suggestions * diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/CandidateGenerator.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/CandidateGenerator.java index 61dfb0f075d34..fc29d1ed7a567 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/CandidateGenerator.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/CandidateGenerator.java @@ -17,15 +17,8 @@ //TODO public for tests public abstract class CandidateGenerator { - public abstract boolean isKnownWord(BytesRef term) throws IOException; - public abstract TermStats termStats(BytesRef term) throws IOException; - public CandidateSet drawCandidates(BytesRef term) throws IOException { - CandidateSet set = new CandidateSet(Candidate.EMPTY, createCandidate(term, true)); - return drawCandidates(set); - } - public Candidate createCandidate(BytesRef term, boolean userInput) throws IOException { return createCandidate(term, termStats(term), 1.0, userInput); } diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/CandidateScorer.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/CandidateScorer.java index e379674d02eab..fdc05d12a2389 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/CandidateScorer.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/CandidateScorer.java @@ -28,13 +28,13 @@ public Correction[] findBestCandiates(CandidateSet[] sets, float errorFraction, if (sets.length == 0) { return Correction.EMPTY; } - PriorityQueue corrections = new PriorityQueue(maxNumCorrections) { + PriorityQueue corrections = new PriorityQueue<>(maxNumCorrections) { @Override protected boolean lessThan(Correction a, Correction b) { return a.compareTo(b) < 0; } }; - int numMissspellings = 1; + final int numMissspellings; if (errorFraction >= 1.0) { numMissspellings = (int) errorFraction; } else { @@ -62,11 +62,11 @@ public void findCandidates( CandidateSet current = candidates[ord]; if (ord == candidates.length - 1) { path[ord] = current.originalTerm; - updateTop(candidates, path, corrections, cutoffScore, pathScore + scorer.score(path, candidates, ord, gramSize)); + updateTop(candidates, path, corrections, cutoffScore, pathScore + scorer.score(path, ord, gramSize)); if (numMissspellingsLeft > 0) { for (int i = 0; i < current.candidates.length; i++) { path[ord] = current.candidates[i]; - updateTop(candidates, path, corrections, cutoffScore, pathScore + scorer.score(path, candidates, ord, gramSize)); + updateTop(candidates, path, corrections, cutoffScore, pathScore + scorer.score(path, ord, gramSize)); } } } else { @@ -79,7 +79,7 @@ public void findCandidates( numMissspellingsLeft, corrections, cutoffScore, - pathScore + scorer.score(path, candidates, ord, gramSize) + pathScore + scorer.score(path, ord, gramSize) ); for (int i = 0; i < current.candidates.length; i++) { path[ord] = current.candidates[i]; @@ -90,20 +90,12 @@ public void findCandidates( numMissspellingsLeft - 1, corrections, cutoffScore, - pathScore + scorer.score(path, candidates, ord, gramSize) + pathScore + scorer.score(path, ord, gramSize) ); } } else { path[ord] = current.originalTerm; - findCandidates( - candidates, - path, - ord + 1, - 0, - corrections, - cutoffScore, - pathScore + scorer.score(path, candidates, ord, gramSize) - ); + findCandidates(candidates, path, ord + 1, 0, corrections, cutoffScore, pathScore + scorer.score(path, ord, gramSize)); } } @@ -135,7 +127,7 @@ private void updateTop( public double score(Candidate[] path, CandidateSet[] candidates) throws IOException { double score = 0.0d; for (int i = 0; i < candidates.length; i++) { - score += scorer.score(path, candidates, i, gramSize); + score += scorer.score(path, i, gramSize); } return Math.exp(score); } diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java index 98143e0acf413..b95971d13c11d 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java @@ -97,14 +97,6 @@ public DirectCandidateGenerator( termsEnum = terms.iterator(); } - /* (non-Javadoc) - * @see org.elasticsearch.search.suggest.phrase.CandidateGenerator#isKnownWord(org.apache.lucene.util.BytesRef) - */ - @Override - public boolean isKnownWord(BytesRef term) throws IOException { - return termStats(term).docFreq > 0; - } - /* (non-Javadoc) * @see org.elasticsearch.search.suggest.phrase.CandidateGenerator#frequency(org.apache.lucene.util.BytesRef) */ @@ -128,10 +120,6 @@ public TermStats internalTermStats(BytesRef term) throws IOException { return new TermStats(0, 0); } - public String getField() { - return field; - } - @Override public CandidateSet drawCandidates(CandidateSet set) throws IOException { Candidate original = set.originalTerm; @@ -181,15 +169,14 @@ protected BytesRef preFilter(final BytesRef term, final CharsRefBuilder spare, f if (preFilter == null) { return term; } - final BytesRefBuilder result = byteSpare; analyze(preFilter, term, field, new TokenConsumer() { @Override - public void nextToken() throws IOException { - this.fillBytesRef(result); + public void nextToken() { + this.fillBytesRef(byteSpare); } }, spare); - return result.get(); + return byteSpare.get(); } protected void postFilter( @@ -344,11 +331,10 @@ public boolean equals(Object obj) { if (getClass() != obj.getClass()) return false; Candidate other = (Candidate) obj; if (term == null) { - if (other.term != null) return false; + return other.term == null; } else { - if (term.equals(other.term) == false) return false; + return term.equals(other.term) != false; } - return true; } /** Lower scores sort first; if scores are equal, then later (zzz) terms sort first */ @@ -364,7 +350,7 @@ public int compareTo(Candidate other) { } @Override - public Candidate createCandidate(BytesRef term, TermStats termStats, double channelScore, boolean userInput) throws IOException { + public Candidate createCandidate(BytesRef term, TermStats termStats, double channelScore, boolean userInput) { return new Candidate(term, termStats, channelScore, score(termStats, channelScore, sumTotalTermFreq), userInput); } diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorBuilder.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorBuilder.java index b3cb3444d2206..a153d4de54dcb 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorBuilder.java @@ -437,32 +437,24 @@ public PhraseSuggestionContext.DirectCandidateGenerator build(IndexAnalyzers ind private static SuggestMode resolveSuggestMode(String suggestMode) { suggestMode = suggestMode.toLowerCase(Locale.US); - if ("missing".equals(suggestMode)) { - return SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX; - } else if ("popular".equals(suggestMode)) { - return SuggestMode.SUGGEST_MORE_POPULAR; - } else if ("always".equals(suggestMode)) { - return SuggestMode.SUGGEST_ALWAYS; - } else { - throw new IllegalArgumentException("Illegal suggest mode " + suggestMode); - } + return switch (suggestMode) { + case "missing" -> SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX; + case "popular" -> SuggestMode.SUGGEST_MORE_POPULAR; + case "always" -> SuggestMode.SUGGEST_ALWAYS; + default -> throw new IllegalArgumentException("Illegal suggest mode " + suggestMode); + }; } static StringDistance resolveDistance(String distanceVal) { distanceVal = distanceVal.toLowerCase(Locale.ROOT); - if ("internal".equals(distanceVal)) { - return DirectSpellChecker.INTERNAL_LEVENSHTEIN; - } else if ("damerau_levenshtein".equals(distanceVal)) { - return new LuceneLevenshteinDistance(); - } else if ("levenshtein".equals(distanceVal)) { - return new LevenshteinDistance(); - } else if ("jaro_winkler".equals(distanceVal)) { - return new JaroWinklerDistance(); - } else if ("ngram".equals(distanceVal)) { - return new NGramDistance(); - } else { - throw new IllegalArgumentException("Illegal distance option " + distanceVal); - } + return switch (distanceVal) { + case "internal" -> DirectSpellChecker.INTERNAL_LEVENSHTEIN; + case "damerau_levenshtein" -> new LuceneLevenshteinDistance(); + case "levenshtein" -> new LevenshteinDistance(); + case "jaro_winkler" -> new JaroWinklerDistance(); + case "ngram" -> new NGramDistance(); + default -> throw new IllegalArgumentException("Illegal distance option " + distanceVal); + }; } private static void transferIfNotNull(T value, Consumer consumer) { diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/Laplace.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/Laplace.java index fe85dd70b7337..a14bddd03cdec 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/Laplace.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/Laplace.java @@ -42,7 +42,7 @@ public final class Laplace extends SmoothingModel { */ public static final double DEFAULT_LAPLACE_ALPHA = 0.5; - private double alpha = DEFAULT_LAPLACE_ALPHA; + private final double alpha; /** * Creates a Laplace smoothing model. diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/LaplaceScorer.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/LaplaceScorer.java index ff752a8e62985..dce063d6e655b 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/LaplaceScorer.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/LaplaceScorer.java @@ -15,7 +15,7 @@ import java.io.IOException; final class LaplaceScorer extends WordScorer { - private double alpha; + private final double alpha; LaplaceScorer(IndexReader reader, Terms terms, String field, double realWordLikelihood, BytesRef separator, double alpha) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/MultiCandidateGeneratorWrapper.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/MultiCandidateGeneratorWrapper.java index 80ebd9e45acf8..7e804c173da9c 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/MultiCandidateGeneratorWrapper.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/MultiCandidateGeneratorWrapper.java @@ -19,18 +19,13 @@ public final class MultiCandidateGeneratorWrapper extends CandidateGenerator { private final CandidateGenerator[] candidateGenerator; - private int numCandidates; + private final int numCandidates; public MultiCandidateGeneratorWrapper(int numCandidates, CandidateGenerator... candidateGenerators) { this.candidateGenerator = candidateGenerators; this.numCandidates = numCandidates; } - @Override - public boolean isKnownWord(BytesRef term) throws IOException { - return candidateGenerator[0].isKnownWord(term); - } - @Override public TermStats termStats(BytesRef term) throws IOException { return candidateGenerator[0].termStats(term); diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java index 52c15eb214da9..4400852ebbd5a 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java @@ -159,7 +159,7 @@ public Suggestion> innerExecute( return response; } - private static TokenStream tokenStream(Analyzer analyzer, BytesRef query, CharsRefBuilder spare, String field) throws IOException { + private static TokenStream tokenStream(Analyzer analyzer, BytesRef query, CharsRefBuilder spare, String field) { spare.copyUTF8Bytes(query); return analyzer.tokenStream(field, new CharArrayReader(spare.chars(), 0, spare.length())); } @@ -174,7 +174,7 @@ protected Suggestion> emptySuggestion( String name, PhraseSuggestionContext suggestion, CharsRefBuilder spare - ) throws IOException { + ) { PhraseSuggestion phraseSuggestion = new PhraseSuggestion(name, suggestion.getSize()); spare.copyUTF8Bytes(suggestion.getText()); phraseSuggestion.addTerm(new PhraseSuggestion.Entry(new Text(spare.toString()), 0, spare.length())); diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestion.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestion.java index 2cb04b73b7f5f..1c881a9887583 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestion.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestion.java @@ -73,13 +73,6 @@ public Entry(StreamInput in) throws IOException { cutoffScore = in.readDouble(); } - /** - * @return cutoff score for suggestions. input term score * confidence for phrase suggest, 0 otherwise - */ - public double getCutoffScore() { - return cutoffScore; - } - @Override protected void merge(Suggestion.Entry