diff --git a/.buildkite/pipelines/ecs-dynamic-template-tests.yml b/.buildkite/pipelines/ecs-dynamic-template-tests.yml index a8145c61a2d40..1c6c18983b082 100644 --- a/.buildkite/pipelines/ecs-dynamic-template-tests.yml +++ b/.buildkite/pipelines/ecs-dynamic-template-tests.yml @@ -10,5 +10,7 @@ steps: notify: - slack: "#es-delivery" if: build.state == "failed" + - slack: "#es-data-management" + if: build.state == "failed" - email: "logs-plus@elastic.co" if: build.state == "failed" diff --git a/.buildkite/pipelines/periodic.bwc.template.yml b/.buildkite/pipelines/periodic.bwc.template.yml index 8a8c43d75e3ef..34e9aa656e340 100644 --- a/.buildkite/pipelines/periodic.bwc.template.yml +++ b/.buildkite/pipelines/periodic.bwc.template.yml @@ -4,7 +4,7 @@ agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: $BWC_VERSION \ No newline at end of file diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 213bbff8e029c..88738c88ef5a0 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -8,7 +8,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.0.0 @@ -18,7 +18,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.0.1 @@ -28,7 +28,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.1.0 @@ -38,7 +38,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.1.1 @@ -48,7 +48,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.2.0 @@ -58,7 +58,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.2.1 @@ -68,7 +68,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.3.0 @@ -78,7 +78,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.3.1 @@ -88,7 +88,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.3.2 @@ -98,7 +98,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.4.0 @@ -108,7 +108,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.4.1 @@ -118,7 +118,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.4.2 @@ -128,7 +128,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.5.0 @@ -138,7 +138,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.5.1 @@ -148,7 +148,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.5.2 @@ -158,7 +158,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.6.0 @@ -168,7 +168,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.6.1 @@ -178,7 +178,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.6.2 @@ -188,7 +188,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.7.0 @@ -198,7 +198,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.7.1 @@ -208,7 +208,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.8.0 @@ -218,7 +218,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.8.1 @@ -228,7 +228,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.9.0 @@ -238,7 +238,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.9.1 @@ -248,7 +248,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.9.2 @@ -258,7 +258,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.9.3 @@ -268,7 +268,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.10.0 @@ -278,7 +278,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.10.1 @@ -288,7 +288,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.10.2 @@ -298,7 +298,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.11.0 @@ -308,7 +308,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.11.1 @@ -318,7 +318,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.11.2 @@ -328,7 +328,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.12.0 @@ -338,7 +338,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.12.1 @@ -348,7 +348,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.13.0 @@ -358,7 +358,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.13.1 @@ -368,7 +368,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.13.2 @@ -378,7 +378,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.13.3 @@ -388,7 +388,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.13.4 @@ -398,7 +398,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.14.0 @@ -408,7 +408,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.14.1 @@ -418,7 +418,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.14.2 @@ -428,7 +428,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.15.0 @@ -438,7 +438,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.15.1 @@ -448,7 +448,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.15.2 @@ -458,7 +458,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.16.0 @@ -468,7 +468,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.16.1 @@ -478,7 +478,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.16.2 @@ -488,7 +488,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.16.3 @@ -498,7 +498,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.0 @@ -508,7 +508,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.1 @@ -518,7 +518,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.2 @@ -528,7 +528,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.3 @@ -538,7 +538,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.4 @@ -548,7 +548,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.5 @@ -558,7 +558,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.6 @@ -568,7 +568,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.7 @@ -578,7 +578,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.8 @@ -588,7 +588,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.9 @@ -598,7 +598,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.10 @@ -608,7 +608,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.11 @@ -618,7 +618,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.12 @@ -628,7 +628,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.13 @@ -638,7 +638,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.14 @@ -648,7 +648,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.15 @@ -668,7 +668,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.0.0 @@ -678,7 +678,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.0.1 @@ -688,7 +688,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.1.0 @@ -698,7 +698,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.1.1 @@ -708,7 +708,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.1.2 @@ -718,7 +718,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.1.3 @@ -728,7 +728,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.2.0 @@ -738,7 +738,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.2.1 @@ -748,7 +748,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.2.2 @@ -758,7 +758,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.2.3 @@ -768,7 +768,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.3.0 @@ -778,7 +778,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.3.1 @@ -788,7 +788,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.3.2 @@ -798,7 +798,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.3.3 @@ -808,7 +808,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.4.0 @@ -818,7 +818,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.4.1 @@ -828,7 +828,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.4.2 @@ -838,7 +838,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.4.3 @@ -848,7 +848,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.5.0 @@ -858,7 +858,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.5.1 @@ -868,7 +868,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.5.2 @@ -878,7 +878,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.5.3 @@ -888,7 +888,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.6.0 @@ -898,7 +898,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.6.1 @@ -908,7 +908,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.6.2 @@ -918,7 +918,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.7.0 @@ -928,7 +928,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.7.1 @@ -938,7 +938,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.8.0 @@ -948,7 +948,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.8.1 @@ -958,7 +958,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.8.2 @@ -968,7 +968,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.9.0 @@ -978,7 +978,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.9.1 @@ -988,7 +988,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.9.2 @@ -998,7 +998,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.10.0 @@ -1008,7 +1008,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.10.1 @@ -1018,7 +1018,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.10.2 @@ -1028,7 +1028,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.10.3 @@ -1038,7 +1038,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.10.4 @@ -1048,7 +1048,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.11.0 @@ -1058,7 +1058,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.11.1 @@ -1068,7 +1068,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.11.2 @@ -1078,7 +1078,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.12.0 diff --git a/.buildkite/pipelines/pull-request/bwc-snapshots.yml b/.buildkite/pipelines/pull-request/bwc-snapshots.yml index 21873475056ea..5a9fc2d938ac0 100644 --- a/.buildkite/pipelines/pull-request/bwc-snapshots.yml +++ b/.buildkite/pipelines/pull-request/bwc-snapshots.yml @@ -16,5 +16,5 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk diff --git a/.buildkite/pull-requests.json b/.buildkite/pull-requests.json index b59bdc79ad293..c4aa43c775b1e 100644 --- a/.buildkite/pull-requests.json +++ b/.buildkite/pull-requests.json @@ -11,7 +11,7 @@ "set_commit_status": false, "build_on_commit": true, "build_on_comment": true, - "trigger_comment_regex": "run\\W+elasticsearch-ci.+", + "trigger_comment_regex": "(run\\W+elasticsearch-ci.+)|(^\\s*(buildkite\\s*)?test\\s+this(\\s+please)?)", "cancel_intermediate_builds": true, "cancel_intermediate_builds_on_comment": false }, diff --git a/.buildkite/scripts/pull-request/pipeline.test.ts b/.buildkite/scripts/pull-request/pipeline.test.ts index e13b1e1f73278..d0634752260e4 100644 --- a/.buildkite/scripts/pull-request/pipeline.test.ts +++ b/.buildkite/scripts/pull-request/pipeline.test.ts @@ -12,21 +12,28 @@ describe("generatePipelines", () => { process.env["GITHUB_PR_TRIGGER_COMMENT"] = ""; }); - test("should generate correct pipelines with a non-docs change", () => { - const pipelines = generatePipelines(`${import.meta.dir}/mocks/pipelines`, ["build.gradle", "docs/README.asciidoc"]); + // Helper for testing pipeline generations that should be the same when using the overall ci trigger comment "buildkite test this" + const testWithTriggerCheck = (directory: string, changedFiles?: string[]) => { + const pipelines = generatePipelines(directory, changedFiles); expect(pipelines).toMatchSnapshot(); + + process.env["GITHUB_PR_TRIGGER_COMMENT"] = "buildkite test this"; + const pipelinesWithTriggerComment = generatePipelines(directory, changedFiles); + expect(pipelinesWithTriggerComment).toEqual(pipelines); + }; + + test("should generate correct pipelines with a non-docs change", () => { + testWithTriggerCheck(`${import.meta.dir}/mocks/pipelines`, ["build.gradle", "docs/README.asciidoc"]); }); test("should generate correct pipelines with only docs changes", () => { - const pipelines = generatePipelines(`${import.meta.dir}/mocks/pipelines`, ["docs/README.asciidoc"]); - expect(pipelines).toMatchSnapshot(); + testWithTriggerCheck(`${import.meta.dir}/mocks/pipelines`, ["docs/README.asciidoc"]); }); test("should generate correct pipelines with full BWC expansion", () => { process.env["GITHUB_PR_LABELS"] = "test-full-bwc"; - const pipelines = generatePipelines(`${import.meta.dir}/mocks/pipelines`, ["build.gradle"]); - expect(pipelines).toMatchSnapshot(); + testWithTriggerCheck(`${import.meta.dir}/mocks/pipelines`, ["build.gradle"]); }); test("should generate correct pipeline when using a trigger comment for it", () => { diff --git a/.buildkite/scripts/pull-request/pipeline.ts b/.buildkite/scripts/pull-request/pipeline.ts index 600e0373d9cfc..65aec47fe3cc8 100644 --- a/.buildkite/scripts/pull-request/pipeline.ts +++ b/.buildkite/scripts/pull-request/pipeline.ts @@ -144,8 +144,12 @@ export const generatePipelines = ( (pipeline) => changedFilesIncludedCheck(pipeline, changedFiles), ]; - // When triggering via comment, we ONLY want to run pipelines that match the trigger phrase, regardless of labels, etc - if (process.env["GITHUB_PR_TRIGGER_COMMENT"]) { + // When triggering via the "run elasticsearch-ci/step-name" comment, we ONLY want to run pipelines that match the trigger phrase, regardless of labels, etc + // However, if we're using the overall CI trigger "[buildkite] test this [please]", we should use the regular filters above + if ( + process.env["GITHUB_PR_TRIGGER_COMMENT"] && + !process.env["GITHUB_PR_TRIGGER_COMMENT"].match(/^\s*(buildkite\s*)?test\s+this(\s+please)?/i) + ) { filters = [triggerCommentCheck]; } diff --git a/BUILDING.md b/BUILDING.md index 814a9fb60ded8..127d422fad089 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -3,7 +3,7 @@ Building Elasticsearch with Gradle Elasticsearch is built using the [Gradle](https://gradle.org/) open source build tools. -This document provides a general guidelines for using and working on the elasticsearch build logic. +This document provides a general guidelines for using and working on the Elasticsearch build logic. ## Build logic organisation @@ -11,56 +11,56 @@ The Elasticsearch project contains 3 build-related projects that are included in ### `build-conventions` -This project contains build conventions that are applied to all elasticsearch projects. +This project contains build conventions that are applied to all Elasticsearch projects. ### `build-tools` -This project contains all build logic that we publish for third party elasticsearch plugin authors. +This project contains all build logic that we publish for third party Elasticsearch plugin authors. We provide the following plugins: -- `elasticsearch.esplugin` - A gradle plugin for building an elasticsearch plugin. -- `elasticsearch.testclusters` - A gradle plugin for setting up es clusters for testing within a build. +- `elasticsearch.esplugin` - A Gradle plugin for building an elasticsearch plugin. +- `elasticsearch.testclusters` - A Gradle plugin for setting up es clusters for testing within a build. -This project is published as part of the elasticsearch release and accessible by +This project is published as part of the Elasticsearch release and accessible by `org.elasticsearch.gradle:build-tools:`. These build tools are also used by the `elasticsearch-hadoop` project maintained by elastic. ### `build-tools-internal` -This project contains all elasticsearch project specific build logic that is not meant to be shared +This project contains all Elasticsearch project specific build logic that is not meant to be shared with other internal or external projects. ## Build guidelines This is an intentionally small set of guidelines to build users and authors -to ensure we keep the build consistent. We also publish elasticsearch build logic -as `build-tools` to be usuable by thirdparty elasticsearch plugin authors. This is +to ensure we keep the build consistent. We also publish Elasticsearch build logic +as `build-tools` to be usable by thirdparty Elasticsearch plugin authors. This is also used by other elastic teams like `elasticsearch-hadoop`. Breaking changes should therefore be avoided and an appropriate deprecation cycle should be followed. ### Stay up to date -The elasticsearch build usually uses the latest Gradle GA release. We stay as close to the +The Elasticsearch build usually uses the latest Gradle GA release. We stay as close to the latest Gradle releases as possible. In certain cases an update is blocked by a breaking behaviour -in Gradle. We're usually in contact with the gradle team here or working on a fix +in Gradle. We're usually in contact with the Gradle team here or working on a fix in our build logic to resolve this. **The Elasticsearch build will fail if any deprecated Gradle API is used.** ### Follow Gradle best practices -Tony Robalik has compiled a good list of rules that aligns with ours when it comes to writing and maintaining elasticsearch -gradle build logic at http://autonomousapps.com/blog/rules-for-gradle-plugin-authors.html. +Tony Robalik has compiled a good list of rules that aligns with ours when it comes to writing and maintaining Elasticsearch +Gradle build logic at http://autonomousapps.com/blog/rules-for-gradle-plugin-authors.html. Our current build does not yet tick off all those rules everywhere but the ultimate goal is to follow these principles. -The reasons for following those rules besides better readability or maintenance are also the goal to support newer gradle +The reasons for following those rules besides better readability or maintenance are also the goal to support newer Gradle features that we will benefit from in terms of performance and reliability. E.g. [configuration-cache support](https://github.com/elastic/elasticsearch/issues/57918), [Project Isolation]([https://gradle.github.io/configuration-cache/#project_isolation) or [predictive test selection](https://gradle.com/gradle-enterprise-solutions/predictive-test-selection/) ### Make a change in the build -There are a few guidelines to follow that should make your life easier to make changes to the elasticsearch build. +There are a few guidelines to follow that should make your life easier to make changes to the Elasticsearch build. Please add a member of the `es-delivery` team as a reviewer if you're making non-trivial changes to the build. #### Adding or updating a dependency @@ -93,13 +93,13 @@ We prefer sha256 checksums as md5 and sha1 are not considered safe anymore these will have the `origin` attribute been set to `Generated by Gradle`. >A manual confirmation of the Gradle generated checksums is currently not mandatory. ->If you want to add a level of verification you can manually confirm the checksum (e.g by looking it up on the website of the library) +>If you want to add a level of verification you can manually confirm the checksum (e.g. by looking it up on the website of the library) >Please replace the content of the `origin` attribute by `official site` in that case. > -#### Custom Plugin and Task implementations +#### Custom plugin and task implementations -Build logic that is used across multiple subprojects should considered to be moved into a Gradle plugin with according Gradle task implmentation. +Build logic that is used across multiple subprojects should be considered to be moved into a Gradle plugin with according Gradle task implementation. Elasticsearch specific build logic is located in the `build-tools-internal` subproject including integration tests. - Gradle plugins and Tasks should be written in Java @@ -108,7 +108,7 @@ Elasticsearch specific build logic is located in the `build-tools-internal` subp #### Declaring tasks -The elasticsearch build makes use of the [task avoidance API](https://docs.gradle.org/current/userguide/task_configuration_avoidance.html) to keep the configuration time of the build low. +The Elasticsearch build makes use of the [task avoidance API](https://docs.gradle.org/current/userguide/task_configuration_avoidance.html) to keep the configuration time of the build low. When declaring tasks (in build scripts or custom plugins) this means that we want to _register_ a task like: @@ -118,18 +118,18 @@ instead of eagerly _creating_ the task: task someTask { ... } -The major difference between these two syntaxes is, that the configuration block of an registered task will only be executed when the task is actually created due to the build requires that task to run. The configuration block of an eagerly created tasks will be executed immediately. +The major difference between these two syntaxes is, that the configuration block of a registered task will only be executed when the task is actually created due to the build requires that task to run. The configuration block of an eagerly created tasks will be executed immediately. -By actually doing less in the gradle configuration time as only creating tasks that are requested as part of the build and by only running the configurations for those requested tasks, using the task avoidance api contributes a major part in keeping our build fast. +By actually doing less in the Gradle configuration time as only creating tasks that are requested as part of the build and by only running the configurations for those requested tasks, using the task avoidance api contributes a major part in keeping our build fast. #### Registering test clusters -When using the elasticsearch test cluster plugin we want to use (similar to the task avoidance API) a Gradle API to create domain objects lazy or only if required by the build. +When using the Elasticsearch test cluster plugin we want to use (similar to the task avoidance API) a Gradle API to create domain objects lazy or only if required by the build. Therefore we register test cluster by using the following syntax: def someClusterProvider = testClusters.register('someCluster') { ... } -This registers a potential testCluster named `somecluster` and provides a provider instance, but doesn't create it yet nor configures it. This makes the gradle configuration phase more efficient by +This registers a potential testCluster named `somecluster` and provides a provider instance, but doesn't create it yet nor configures it. This makes the Gradle configuration phase more efficient by doing less. To wire this registered cluster into a `TestClusterAware` task (e.g. `RestIntegTest`) you can resolve the actual cluster from the provider instance: @@ -139,23 +139,23 @@ To wire this registered cluster into a `TestClusterAware` task (e.g. `RestIntegT nonInputProperties.systemProperty 'tests.leader_host', "${-> someClusterProvider.get().getAllHttpSocketURI().get(0)}" } -#### Adding additional integration tests +#### Adding integration tests -Additional integration tests for a certain elasticsearch modules that are specific to certain cluster configuration can be declared in a separate so called `qa` subproject of your module. +Additional integration tests for a certain Elasticsearch modules that are specific to certain cluster configuration can be declared in a separate so called `qa` subproject of your module. The benefit of a dedicated project for these tests are: -- `qa` projects are dedicated two specific usecases and easier to maintain +- `qa` projects are dedicated two specific use-cases and easier to maintain - It keeps the specific test logic separated from the common test logic. - You can run those tests in parallel to other projects of the build. #### Using test fixtures -Sometimes we want to share test fixtures to setup the code under test across multiple projects. There are basically two ways doing so. +Sometimes we want to share test fixtures to set up the code under test across multiple projects. There are basically two ways doing so. -Ideally we would use the build-in [java-test-fixtures](https://docs.gradle.org/current/userguide/java_testing.html#sec:java_test_fixtures) gradle plugin. +Ideally we would use the build-in [java-test-fixtures](https://docs.gradle.org/current/userguide/java_testing.html#sec:java_test_fixtures) Gradle plugin. This plugin relies on having a separate sourceSet for the test fixtures code. -In the elasticsearch codebase we have test fixtures and actual tests within the same sourceSet. Therefore we introduced the `elasticsearch.internal-test-artifact` plugin to provides another build artifact of your project based on the `test` sourceSet. +In the Elasticsearch codebase we have test fixtures and actual tests within the same sourceSet. Therefore we introduced the `elasticsearch.internal-test-artifact` plugin to provides another build artifact of your project based on the `test` sourceSet. This artifact can be resolved by the consumer project as shown in the example below: @@ -168,9 +168,9 @@ dependencies { ``` This test artifact mechanism makes use of the concept of [component capabilities](https://docs.gradle.org/current/userguide/component_capabilities.html) -similar to how the gradle build-in `java-test-fixtures` plugin works. +similar to how the Gradle build-in `java-test-fixtures` plugin works. -`testArtifact` is a shortcut declared in the elasticsearch build. Alternatively you can declare the dependency via +`testArtifact` is a shortcut declared in the Elasticsearch build. Alternatively you can declare the dependency via ``` dependencies { @@ -186,7 +186,7 @@ dependencies { To test an unreleased development version of a third party dependency you have several options. -#### How to use a maven based third party dependency via mavenlocal? +#### How to use a Maven based third party dependency via `mavenlocal`? 1. Clone the third party repository locally 2. Run `mvn install` to install copy into your `~/.m2/repository` folder. @@ -200,16 +200,15 @@ To test an unreleased development version of a third party dependency you have s } ``` 4. Update the version in your dependency declaration accordingly (likely a snapshot version) -5. Run the gradle build as needed +5. Run the Gradle build as needed -#### How to use a maven built based third party dependency with jitpack repository? +#### How to use a Maven built based third party dependency with JitPack repository? -https://jitpack.io is an adhoc repository that supports building maven projects transparently in the background when -resolving unreleased snapshots from a github repository. This approach also works as temporally solution +https://jitpack.io is an adhoc repository that supports building Maven projects transparently in the background when +resolving unreleased snapshots from a GitHub repository. This approach also works as temporally solution and is compliant with our CI builds. 1. Add the JitPack repository to the root build file: - ``` allprojects { repositories { @@ -227,7 +226,7 @@ dependencies { As version you could also use a certain short commit hash or `main-SNAPSHOT`. In addition to snapshot builds JitPack supports building Pull Requests. Simply use PR-SNAPSHOT as the version. -3. Run the gradle build as needed. Keep in mind the initial resolution might take a bit longer as this needs to be built +3. Run the Gradle build as needed. Keep in mind the initial resolution might take a bit longer as this needs to be built by JitPack in the background before we can resolve the adhoc built dependency. --- @@ -240,7 +239,7 @@ not want to ship unreleased libraries into our releases. #### How to use a custom third party artifact? -For third party libraries that are not built with maven (e.g. ant) or provided as a plain jar artifact we can leverage +For third party libraries that are not built with Maven (e.g. Ant) or provided as a plain jar artifact we can leverage a flat directory repository that resolves artifacts from a flat directory on your filesystem. 1. Put the jar artifact with the format `artifactName-version.jar` into a directory named `localRepo` (you have to create this manually) @@ -264,7 +263,7 @@ allprojects { implementation 'x:jmxri:1.2.1' } ``` -4. Run the gradle build as needed with `--write-verification-metadata` to ensure the gradle dependency verification does not fail on your custom dependency. +4. Run the Gradle build as needed with `--write-verification-metadata` to ensure the Gradle dependency verification does not fail on your custom dependency. --- **NOTE** @@ -273,5 +272,5 @@ As Gradle prefers to use modules whose descriptor has been created from real met flat directory repositories cannot be used to override artifacts with real meta-data from other repositories declared in the build. For example, if Gradle finds only `jmxri-1.2.1.jar` in a flat directory repository, but `jmxri-1.2.1.pom` in another repository that supports meta-data, it will use the second repository to provide the module. -Therefore, it is recommended to declare a version that is not resolvable from public repositories we use (e.g. maven central) +Therefore, it is recommended to declare a version that is not resolvable from public repositories we use (e.g. Maven Central) --- diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index cb674221913de..db8cca17a5606 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,4 +1,4 @@ -Contributing to elasticsearch +Contributing to Elasticsearch ============================= Elasticsearch is a free and open project and we love to receive contributions from our community — you! There are many ways to contribute, from writing tutorials or blog posts, improving the documentation, submitting bug reports and feature requests or writing code which can be incorporated into Elasticsearch itself. @@ -54,7 +54,7 @@ The process for contributing to any of the [Elastic repositories](https://github ### Fork and clone the repository You will need to fork the main Elasticsearch code or documentation repository and clone it to your local machine. See -[github help page](https://help.github.com/articles/fork-a-repo) for help. +[GitHub help page](https://help.github.com/articles/fork-a-repo) for help. Further instructions for specific projects are given below. @@ -69,7 +69,7 @@ cycle. * Lines that are not part of your change should not be edited (e.g. don't format unchanged lines, don't reorder existing imports) * Add the appropriate [license headers](#license-headers) to any new files -* For contributions involving the elasticsearch build you can find details about the build setup in the +* For contributions involving the Elasticsearch build you can find details about the build setup in the [BUILDING](BUILDING.md) file ### Submitting your changes @@ -89,7 +89,6 @@ Once your changes and tests are ready to submit for review: Update your local repository with the most recent code from the main Elasticsearch repository, and rebase your branch on top of the latest main branch. We prefer your initial changes to be squashed into a single commit. Later, if we ask you to make changes, add them as separate commits. This makes them easier to review. As a final step before merging we will either ask you to squash all commits yourself or we'll do it for you. - 4. Submit a pull request Push your local changes to your forked copy of the repository and [submit a pull request](https://help.github.com/articles/using-pull-requests). In the pull request, choose a title which sums up the changes that you have made, and in the body provide more details about what your changes do. Also mention the number of the issue where discussion has taken place, eg "Closes #123". @@ -121,8 +120,7 @@ using the wrapper via the `gradlew` script on Unix systems or `gradlew.bat` script on Windows in the root of the repository. The examples below show the usage on Unix. -We support development in IntelliJ versions IntelliJ 2020.1 and -onwards. +We support development in [IntelliJ IDEA] versions 2020.1 and onwards. [Docker](https://docs.docker.com/install/) is required for building some Elasticsearch artifacts and executing certain test suites. You can run Elasticsearch without building all the artifacts with: @@ -135,7 +133,7 @@ specifically these lines tell you that Elasticsearch is ready: [2020-05-29T14:50:35,167][INFO ][o.e.h.AbstractHttpServerTransport] [runTask-0] publish_address {127.0.0.1:9200}, bound_addresses {[::1]:9200}, {127.0.0.1:9200} [2020-05-29T14:50:35,169][INFO ][o.e.n.Node ] [runTask-0] started -But to be honest its typically easier to wait until the console stops scrolling +But to be honest it's typically easier to wait until the console stops scrolling and then run `curl` in another window like this: curl -u elastic:password localhost:9200 @@ -143,7 +141,7 @@ and then run `curl` in another window like this: ### Importing the project into IntelliJ IDEA -The minimum IntelliJ IDEA version required to import the Elasticsearch project is 2020.1 +The minimum IntelliJ IDEA version required to import the Elasticsearch project is 2020.1. Elasticsearch builds using Java 17. When importing into IntelliJ you will need to define an appropriate SDK. The convention is that **this SDK should be named "17"** so that the project import will detect it automatically. For more details @@ -173,7 +171,7 @@ action is required. #### Formatting -Elasticsearch code is automatically formatted with [spotless], backed by the +Elasticsearch code is automatically formatted with [Spotless], backed by the Eclipse formatter. You can do the same in IntelliJ with the [Eclipse Code Formatter] so that you can apply the correct formatting directly in your IDE. The configuration for the plugin is held in @@ -198,7 +196,7 @@ Alternative manual steps for IntelliJ. 3. Navigate to the file `build-conventions/formatterConfig.xml` 4. Click "OK" -### REST Endpoint Conventions +### REST endpoint conventions Elasticsearch typically uses singular nouns rather than plurals in URLs. For example: @@ -214,7 +212,7 @@ but not: You may find counterexamples, but new endpoints should use the singular form. -### Java Language Formatting Guidelines +### Java language formatting guidelines Java files in the Elasticsearch codebase are automatically formatted using the [Spotless Gradle] plugin. All new projects are automatically formatted, @@ -249,13 +247,13 @@ Please follow these formatting guidelines: only do this where the benefit clearly outweighs the decrease in formatting consistency. * Note that Javadoc and block comments i.e. `/* ... */` are not formatted, - but line comments i.e `// ...` are. + but line comments i.e. `// ...` are. * Negative boolean expressions must use the form `foo == false` instead of `!foo` for better readability of the code. This is enforced via Checkstyle. Conversely, you should not write e.g. `if (foo == true)`, but just `if (foo)`. -#### Editor / IDE Support +#### Editor / IDE support IntelliJ IDEs can [import](https://blog.jetbrains.com/idea/2014/01/intellij-idea-13-importing-code-formatter-settings-from-eclipse/) @@ -316,7 +314,7 @@ is to be helpful, not to turn writing code into a chore. this is critical to understanding the code e.g. documenting the subtleties of the implementation of a private method. The point here is that implementations will change over time, and the Javadoc is - less likely to become out-of-date if it only talks about the what is + less likely to become out-of-date if it only talks about the purpose of the code, not what it does. 8. Examples in Javadoc can be very useful, so feel free to add some if you can reasonably do so i.e. if it takes a whole page of code to set @@ -362,7 +360,7 @@ Finally, use your judgement! Base your decisions on what will help other developers - including yourself, when you come back to some code 3 months in the future, having forgotten how it works. -### License Headers +### License headers We require license headers on all Java files. With the exception of the top-level `x-pack` directory, all contributed code should have the following @@ -433,7 +431,7 @@ In rare situations you may want to configure your `Logger` slightly differently, perhaps specifying a different class or maybe using one of the methods on `org.elasticsearch.common.logging.Loggers` instead. -If the log message includes values from your code then you must use use +If the log message includes values from your code then you must use placeholders rather than constructing the string yourself using simple concatenation. Consider wrapping the values in `[...]` to help distinguish them from the static part of the message: @@ -461,18 +459,18 @@ unit tests, especially if there is complex logic for computing what is logged and when to log it. You can use a `org.elasticsearch.test.MockLogAppender` to make assertions about the logs that are being emitted. -Logging is a powerful diagnostic technique but it is not the only possibility. +Logging is a powerful diagnostic technique, but it is not the only possibility. You should also consider exposing some information about your component via an -API instead of in logs. For instance you can implement APIs to report its +API instead of in logs. For instance, you can implement APIs to report its current status, various statistics, and maybe even details of recent failures. #### Log levels -Each log message is written at a particular _level_. By default Elasticsearch +Each log message is written at a particular _level_. By default, Elasticsearch will suppress messages at the two most verbose levels, `TRACE` and `DEBUG`, and will output messages at all other levels. Users can configure which levels of message are written by each logger at runtime, but you should expect everyone -to run with the default configuration almost all of the time and choose your +to run with the default configuration almost all the time and choose your levels accordingly. The guidance in this section is subjective in some areas. When in doubt, @@ -570,7 +568,7 @@ an index template is created or updated: `INFO`-level logging is enabled by default so its target audience is the general population of users and administrators. You should use user-facing terminology and ensure that messages at this level are self-contained. In -general you shouldn't log unusual events, particularly exceptions with stack +general, you shouldn't log unusual events, particularly exceptions with stack traces, at `INFO` level. If the event is relatively benign then use `DEBUG`, whereas if the user should be notified then use `WARN`. @@ -629,7 +627,7 @@ the logs. ##### `ERROR` -This is the next least verbose level after `WARN`. In theory it is possible for +This is the next least verbose level after `WARN`. In theory, it is possible for users to suppress messages at `WARN` and below, believing this to help them focus on the most important `ERROR` messages, but in practice in Elasticsearch this will hide so much useful information that the resulting logs will be @@ -660,7 +658,7 @@ numbering scheme separate to release version. The main ones are inter-node binary protocol and index data + metadata respectively. Separated version numbers are comprised of an integer number. The semantic -meaing of a version number are defined within each `*Version` class. There +meaning of a version number are defined within each `*Version` class. There is no direct mapping between separated version numbers and the release version. The versions used by any particular instance of Elasticsearch can be obtained by querying `/_nodes/info` on the node. @@ -692,12 +690,12 @@ feature in a cluster: in a class related to the change you're doing. 2. Return that constant from an instance of `FeatureSpecification.getFeatures`, either an existing implementation or a new implementation. Make sure - the implementation is added as a SPI implementation in `module-info.java` + the implementation is added as an SPI implementation in `module-info.java` and `META-INF/services`. 3. To check if all nodes in the cluster support the new feature, call `FeatureService.clusterHasFeature(ClusterState, NodeFeature)` -### Creating A Distribution +### Creating a distribution Run all build commands from within the root directory: @@ -727,7 +725,7 @@ The archive distributions (tar and zip) can be found under: ./distribution/archives/(darwin-tar|linux-tar|windows-zip|oss-darwin-tar|oss-linux-tar|oss-windows-zip)/build/distributions/ -### Running The Full Test Suite +### Running the full test suite Before submitting your changes, run the test suite to make sure that nothing is broken, with: @@ -752,14 +750,14 @@ a test that passes locally, may actually fail later due to random settings or data input. To make tests repeatable, a `REPRODUCE` line in CI will also include the `-Dtests.seed` parameter. -When running locally, gradle does its best to take advantage of cached results. +When running locally, Gradle does its best to take advantage of cached results. So, if the code is unchanged, running the same test with the same `-Dtests.seed` repeatedly may not actually run the test if it has passed with that seed in the previous execution. A way around this is to pass a separate parameter -to adjust the command options seen by gradle. +to adjust the command options seen by Gradle. A simple option may be to add the parameter `-Dtests.timestamp=$(date +%s)` which will give the current time stamp as a parameter, thus making the parameters -sent to gradle unique and bypassing the cache. +sent to Gradle unique and bypassing the cache. ### Project layout @@ -776,9 +774,9 @@ Builds our tar and zip archives and our rpm and deb packages. Libraries used to build other parts of the project. These are meant to be internal rather than general purpose. We have no plans to [semver](https://semver.org/) their APIs or accept feature requests for them. -We publish them to maven central because they are dependencies of our plugin -test framework, high level rest client, and jdbc driver but they really aren't -general purpose enough to *belong* in maven central. We're still working out +We publish them to Maven Central because they are dependencies of our plugin +test framework, high level rest client, and jdbc driver, but they really aren't +general purpose enough to *belong* in Maven Central. We're still working out what to do here. #### `modules` @@ -789,7 +787,7 @@ they depend on libraries that we don't believe *all* of Elasticsearch should depend on. For example, reindex requires the `connect` permission so it can perform -reindex-from-remote but we don't believe that the *all* of Elasticsearch should +reindex-from-remote, but we don't believe that the *all* of Elasticsearch should have the "connect". For another example, Painless is implemented using antlr4 and asm and we don't believe that *all* of Elasticsearch should have access to them. @@ -828,7 +826,7 @@ qa project, open a PR and be ready to discuss options. #### `server` The server component of Elasticsearch that contains all of the modules and -plugins. Right now things like the high level rest client depend on the server +plugins. Right now things like the high level rest client depend on the server, but we'd like to fix that in the future. #### `test` @@ -848,7 +846,7 @@ the `qa` subdirectory functions just like the top level `qa` subdirectory. The `plugin` subdirectory contains the x-pack module which runs inside the Elasticsearch process. -### Gradle Build +### Gradle build We use Gradle to build Elasticsearch because it is flexible enough to not only build and package Elasticsearch, but also orchestrate all of the ways that we @@ -865,16 +863,20 @@ common configurations in our build and how we use them: at compile and runtime but are not exposed as a compile dependency to other dependent projects. Dependencies added to the `implementation` configuration are considered an implementation detail that can be changed at a later date without affecting any dependent projects. +
`api`
Dependencies that are used as compile and runtime dependencies of a project - and are considered part of the external api of the project. + and are considered part of the external api of the project.
+
`runtimeOnly`
Dependencies that not on the classpath at compile time but are on the classpath at runtime. We mostly use this configuration to make sure that we do not accidentally compile against dependencies of our dependencies also known as "transitive" dependencies".
+
`compileOnly`
Code that is on the classpath at compile time but that should not be shipped with the project because it is "provided" by the runtime somehow. Elasticsearch plugins use this configuration to include dependencies that are bundled with Elasticsearch's server.
+
`testImplementation`
Code that is on the classpath for compiling tests that are part of this project but not production code. The canonical example of this is `junit`.
@@ -897,7 +899,7 @@ time is very limited. In some cases the time we would need to spend on reviews would outweigh the benefits of a change by preventing us from working on other more beneficial changes instead. -Please discuss your change in a Github issue before spending much time on its +Please discuss your change in a GitHub issue before spending much time on its implementation. We sometimes have to reject contributions that duplicate other efforts, take the wrong approach to solving a problem, or solve a problem which does not need solving. An up-front discussion often saves a good deal of wasted @@ -980,8 +982,8 @@ Finally, we require that you run `./gradlew check` before submitting a non-documentation contribution. This is mentioned above, but it is worth repeating in this section because it has come up in this context. -[intellij]: https://blog.jetbrains.com/idea/2017/07/intellij-idea-2017-2-is-here-smart-sleek-and-snappy/ +[IntelliJ IDEA]: https://www.jetbrains.com/idea/ [Checkstyle]: https://plugins.jetbrains.com/plugin/1065-checkstyle-idea -[spotless]: https://github.com/diffplug/spotless +[Spotless]: https://github.com/diffplug/spotless [Eclipse Code Formatter]: https://plugins.jetbrains.com/plugin/6546-eclipse-code-formatter [Spotless Gradle]: https://github.com/diffplug/spotless/tree/main/plugin-gradle diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/RestrictedBuildApiService.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/RestrictedBuildApiService.java index 93753f7c7ac56..f9786c4a0c484 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/RestrictedBuildApiService.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/RestrictedBuildApiService.java @@ -28,14 +28,8 @@ private static ListMultimap, String> createLegacyRestTestBasePluginUsag ListMultimap, String> map = ArrayListMultimap.create(1, 200); map.put(LegacyRestTestBasePlugin.class, ":docs"); map.put(LegacyRestTestBasePlugin.class, ":distribution:docker"); - map.put(LegacyRestTestBasePlugin.class, ":modules:analysis-common"); - map.put(LegacyRestTestBasePlugin.class, ":modules:ingest-attachment"); - map.put(LegacyRestTestBasePlugin.class, ":modules:ingest-common"); - map.put(LegacyRestTestBasePlugin.class, ":modules:ingest-user-agent"); - map.put(LegacyRestTestBasePlugin.class, ":modules:kibana"); map.put(LegacyRestTestBasePlugin.class, ":modules:lang-expression"); map.put(LegacyRestTestBasePlugin.class, ":modules:lang-mustache"); - map.put(LegacyRestTestBasePlugin.class, ":modules:lang-painless"); map.put(LegacyRestTestBasePlugin.class, ":modules:mapper-extras"); map.put(LegacyRestTestBasePlugin.class, ":modules:parent-join"); map.put(LegacyRestTestBasePlugin.class, ":modules:percolator"); @@ -43,7 +37,6 @@ private static ListMultimap, String> createLegacyRestTestBasePluginUsag map.put(LegacyRestTestBasePlugin.class, ":modules:reindex"); map.put(LegacyRestTestBasePlugin.class, ":modules:repository-s3"); map.put(LegacyRestTestBasePlugin.class, ":modules:repository-url"); - map.put(LegacyRestTestBasePlugin.class, ":modules:runtime-fields-common"); map.put(LegacyRestTestBasePlugin.class, ":modules:transport-netty4"); map.put(LegacyRestTestBasePlugin.class, ":plugins:analysis-icu"); map.put(LegacyRestTestBasePlugin.class, ":plugins:analysis-kuromoji"); @@ -74,14 +67,13 @@ private static ListMultimap, String> createLegacyRestTestBasePluginUsag map.put(LegacyRestTestBasePlugin.class, ":qa:system-indices"); map.put(LegacyRestTestBasePlugin.class, ":qa:unconfigured-node-name"); map.put(LegacyRestTestBasePlugin.class, ":qa:verify-version-constants"); + map.put(LegacyRestTestBasePlugin.class, ":test:external-modules:test-apm-integration"); map.put(LegacyRestTestBasePlugin.class, ":test:external-modules:test-delayed-aggs"); map.put(LegacyRestTestBasePlugin.class, ":test:external-modules:test-die-with-dignity"); - map.put(LegacyRestTestBasePlugin.class, ":test:external-modules:test-apm-integration"); map.put(LegacyRestTestBasePlugin.class, ":test:external-modules:test-error-query"); map.put(LegacyRestTestBasePlugin.class, ":test:external-modules:test-latency-simulating-directory"); map.put(LegacyRestTestBasePlugin.class, ":test:external-modules:test-seek-tracking-directory"); map.put(LegacyRestTestBasePlugin.class, ":test:yaml-rest-runner"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin"); map.put(LegacyRestTestBasePlugin.class, ":distribution:archives:integ-test-zip"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:core"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ent-search"); @@ -92,17 +84,13 @@ private static ListMultimap, String> createLegacyRestTestBasePluginUsag map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:mapper-version"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:vector-tile"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:wildcard"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:kerberos-tests"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:mixed-tier-cluster"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:password-protected-keystore"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:reindex-tests-with-security"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:repository-old-versions"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:rolling-upgrade"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:rolling-upgrade-basic"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:rolling-upgrade-multi-cluster"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:runtime-fields:core-with-mapped"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:runtime-fields:core-with-search"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:saml-idp-tests"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:security-example-spi-extension"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:security-setup-password-tests"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:smoke-test-plugins"); @@ -115,12 +103,10 @@ private static ListMultimap, String> createLegacyRestTestBasePluginUsag map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:multi-cluster-search-security:legacy-with-basic-license"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:multi-cluster-search-security:legacy-with-full-license"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:multi-cluster-search-security:legacy-with-restricted-trust"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:runtime-fields:with-security"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:third-party:jira"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:third-party:pagerduty"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:third-party:slack"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:async-search:qa:rest"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:async-search:qa:security"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:autoscaling:qa:rest"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ccr:qa:downgrade-to-basic-license"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ccr:qa:multi-cluster"); @@ -130,7 +116,6 @@ private static ListMultimap, String> createLegacyRestTestBasePluginUsag map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ccr:qa:security"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:deprecation:qa:early-deprecation-rest"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:deprecation:qa:rest"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:downsample:qa:rest"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:downsample:qa:with-security"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:enrich:qa:rest"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:enrich:qa:rest-with-advanced-security"); @@ -139,18 +124,14 @@ private static ListMultimap, String> createLegacyRestTestBasePluginUsag map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:eql:qa:ccs-rolling-upgrade"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:eql:qa:correctness"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:eql:qa:mixed-node"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:eql:qa:multi-cluster-with-security"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:esql:qa:security"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:esql:qa:server:multi-node"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:esql:qa:server:single-node"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:esql:qa:server:mixed-cluster"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:fleet:qa:rest"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:graph:qa:with-security"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:identity-provider:qa:idp-rest-tests"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ilm:qa:multi-cluster"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ilm:qa:multi-node"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ilm:qa:rest"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ilm:qa:with-security"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ml:qa:basic-multi-node"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ml:qa:disabled"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ml:qa:ml-with-security"); @@ -163,16 +144,11 @@ private static ListMultimap, String> createLegacyRestTestBasePluginUsag map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:searchable-snapshots:qa:rest"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:searchable-snapshots:qa:s3"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:searchable-snapshots:qa:url"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:security:qa:operator-privileges-tests"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:security:qa:profile"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:security:qa:security-disabled"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:security:qa:smoke-test-all-realms"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:security:qa:tls-basic"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:shutdown:qa:multi-node"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:shutdown:qa:rolling-upgrade"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:slm:qa:multi-node"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:slm:qa:rest"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:slm:qa:with-security"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:snapshot-based-recoveries:qa:fs"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:snapshot-based-recoveries:qa:license-enforcing"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:snapshot-based-recoveries:qa:s3"); @@ -186,9 +162,6 @@ private static ListMultimap, String> createLegacyRestTestBasePluginUsag map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:sql:qa:jdbc:security:with-ssl"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:sql:qa:jdbc:security:without-ssl"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:sql:qa:mixed-node"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:sql:qa:server:multi-cluster-with-security"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:sql:qa:server:multi-node"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:sql:qa:server:single-node"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:sql:qa:server:security:with-ssl"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:sql:qa:server:security:without-ssl"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:stack:qa:rest"); @@ -198,8 +171,8 @@ private static ListMultimap, String> createLegacyRestTestBasePluginUsag map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:transform:qa:single-node-tests"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:vector-tile:qa:multi-cluster"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:watcher:qa:rest"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:watcher:qa:with-monitoring"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:watcher:qa:with-security"); + map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:esql:qa:server:mixed-cluster"); return map; } diff --git a/client/rest-high-level/build.gradle b/client/rest-high-level/build.gradle index 743f64b3b28d3..bcbc73f643298 100644 --- a/client/rest-high-level/build.gradle +++ b/client/rest-high-level/build.gradle @@ -61,11 +61,6 @@ tasks.named('forbiddenApisMain').configure { signaturesFiles += files('src/main/resources/forbidden/rest-high-level-signatures.txt') } -tasks.named('splitPackagesAudit').configure { - // the client package should be owned by the client, but server has some classes there too - ignoreClasses 'org.elasticsearch.client.*' -} - // we don't have tests now, as HLRC is in the process of being removed tasks.named("test").configure {enabled = false } diff --git a/client/rest-high-level/roles.yml b/client/rest-high-level/roles.yml deleted file mode 100644 index d3d0630f43058..0000000000000 --- a/client/rest-high-level/roles.yml +++ /dev/null @@ -1,12 +0,0 @@ -admin: - cluster: - - all - indices: - - names: '*' - privileges: - - all - run_as: [ '*' ] - applications: - - application: '*' - privileges: [ '*' ] - resources: [ '*' ] diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java deleted file mode 100644 index fdbb5d0c86d6f..0000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ /dev/null @@ -1,528 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.client; - -import org.apache.http.HttpEntity; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.entity.NByteArrayEntity; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.action.DocWriteRequest; -import org.elasticsearch.action.bulk.BulkRequest; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.support.ActiveShardCount; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; -import org.elasticsearch.action.update.UpdateRequest; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.lucene.uid.Versions; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.core.Nullable; -import org.elasticsearch.core.SuppressForbidden; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.index.VersionType; -import org.elasticsearch.index.seqno.SequenceNumbers; -import org.elasticsearch.rest.action.search.RestSearchAction; -import org.elasticsearch.xcontent.DeprecationHandler; -import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentType; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; -import java.nio.charset.Charset; -import java.util.HashMap; -import java.util.Locale; -import java.util.Map; -import java.util.StringJoiner; - -final class RequestConverters { - static final XContentType REQUEST_BODY_CONTENT_TYPE = XContentType.JSON; - - private RequestConverters() { - // Contains only status utility methods - } - - static Request bulk(BulkRequest bulkRequest) throws IOException { - Request request = new Request(HttpPost.METHOD_NAME, "/_bulk"); - - Params parameters = new Params(); - parameters.withTimeout(bulkRequest.timeout()); - parameters.withRefreshPolicy(bulkRequest.getRefreshPolicy()); - parameters.withPipeline(bulkRequest.pipeline()); - parameters.withRouting(bulkRequest.routing()); - // Bulk API only supports newline delimited JSON or Smile. Before executing - // the bulk, we need to check that all requests have the same content-type - // and this content-type is supported by the Bulk API. - XContentType bulkContentType = null; - for (int i = 0; i < bulkRequest.numberOfActions(); i++) { - DocWriteRequest action = bulkRequest.requests().get(i); - - DocWriteRequest.OpType opType = action.opType(); - if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) { - bulkContentType = enforceSameContentType((IndexRequest) action, bulkContentType); - - } else if (opType == DocWriteRequest.OpType.UPDATE) { - UpdateRequest updateRequest = (UpdateRequest) action; - if (updateRequest.doc() != null) { - bulkContentType = enforceSameContentType(updateRequest.doc(), bulkContentType); - } - if (updateRequest.upsertRequest() != null) { - bulkContentType = enforceSameContentType(updateRequest.upsertRequest(), bulkContentType); - } - } - } - - if (bulkContentType == null) { - bulkContentType = XContentType.JSON; - } - - final byte separator = bulkContentType.xContent().streamSeparator(); - final ContentType requestContentType = createContentType(bulkContentType); - - ByteArrayOutputStream content = new ByteArrayOutputStream(); - for (DocWriteRequest action : bulkRequest.requests()) { - DocWriteRequest.OpType opType = action.opType(); - - try (XContentBuilder metadata = XContentBuilder.builder(bulkContentType.xContent())) { - metadata.startObject(); - { - metadata.startObject(opType.getLowercase()); - if (Strings.hasLength(action.index())) { - metadata.field("_index", action.index()); - } - if (Strings.hasLength(action.id())) { - metadata.field("_id", action.id()); - } - if (Strings.hasLength(action.routing())) { - metadata.field("routing", action.routing()); - } - if (action.version() != Versions.MATCH_ANY) { - metadata.field("version", action.version()); - } - - VersionType versionType = action.versionType(); - if (versionType != VersionType.INTERNAL) { - if (versionType == VersionType.EXTERNAL) { - metadata.field("version_type", "external"); - } else if (versionType == VersionType.EXTERNAL_GTE) { - metadata.field("version_type", "external_gte"); - } - } - - if (action.ifSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { - metadata.field("if_seq_no", action.ifSeqNo()); - metadata.field("if_primary_term", action.ifPrimaryTerm()); - } - - if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) { - IndexRequest indexRequest = (IndexRequest) action; - if (Strings.hasLength(indexRequest.getPipeline())) { - metadata.field("pipeline", indexRequest.getPipeline()); - } - } else if (opType == DocWriteRequest.OpType.UPDATE) { - UpdateRequest updateRequest = (UpdateRequest) action; - if (updateRequest.retryOnConflict() > 0) { - metadata.field("retry_on_conflict", updateRequest.retryOnConflict()); - } - if (updateRequest.fetchSource() != null) { - metadata.field("_source", updateRequest.fetchSource()); - } - } - metadata.endObject(); - } - metadata.endObject(); - - BytesRef metadataSource = BytesReference.bytes(metadata).toBytesRef(); - content.write(metadataSource.bytes, metadataSource.offset, metadataSource.length); - content.write(separator); - } - - BytesRef source = null; - if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) { - IndexRequest indexRequest = (IndexRequest) action; - BytesReference indexSource = indexRequest.source(); - XContentType indexXContentType = indexRequest.getContentType(); - - try ( - XContentParser parser = XContentHelper.createParser( - /* - * EMPTY and THROW are fine here because we just call - * copyCurrentStructure which doesn't touch the - * registry or deprecation. - */ - NamedXContentRegistry.EMPTY, - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - indexSource, - indexXContentType - ) - ) { - try (XContentBuilder builder = XContentBuilder.builder(bulkContentType.xContent())) { - builder.copyCurrentStructure(parser); - source = BytesReference.bytes(builder).toBytesRef(); - } - } - } else if (opType == DocWriteRequest.OpType.UPDATE) { - source = XContentHelper.toXContent((UpdateRequest) action, bulkContentType, false).toBytesRef(); - } - - if (source != null) { - content.write(source.bytes, source.offset, source.length); - content.write(separator); - } - } - request.addParameters(parameters.asMap()); - request.setEntity(new NByteArrayEntity(content.toByteArray(), 0, content.size(), requestContentType)); - return request; - } - - static Request index(IndexRequest indexRequest) { - String method = Strings.hasLength(indexRequest.id()) ? HttpPut.METHOD_NAME : HttpPost.METHOD_NAME; - - String endpoint; - if (indexRequest.opType() == DocWriteRequest.OpType.CREATE) { - endpoint = endpoint(indexRequest.index(), "_create", indexRequest.id()); - } else { - endpoint = endpoint(indexRequest.index(), indexRequest.id()); - } - - Request request = new Request(method, endpoint); - - Params parameters = new Params(); - parameters.withRouting(indexRequest.routing()); - parameters.withTimeout(indexRequest.timeout()); - parameters.withVersion(indexRequest.version()); - parameters.withVersionType(indexRequest.versionType()); - parameters.withIfSeqNo(indexRequest.ifSeqNo()); - parameters.withIfPrimaryTerm(indexRequest.ifPrimaryTerm()); - parameters.withPipeline(indexRequest.getPipeline()); - parameters.withRefreshPolicy(indexRequest.getRefreshPolicy()); - parameters.withWaitForActiveShards(indexRequest.waitForActiveShards()); - parameters.withRequireAlias(indexRequest.isRequireAlias()); - - BytesRef source = indexRequest.source().toBytesRef(); - ContentType contentType = createContentType(indexRequest.getContentType()); - request.addParameters(parameters.asMap()); - request.setEntity(new NByteArrayEntity(source.bytes, source.offset, source.length, contentType)); - return request; - } - - /** - * Convert a {@linkplain SearchRequest} into a {@linkplain Request}. - * @param searchRequest the request to convert - * @param searchEndpoint the name of the search endpoint. {@literal _search} - * for standard searches and {@literal _rollup_search} for rollup - * searches. - */ - static Request search(SearchRequest searchRequest, String searchEndpoint) throws IOException { - Request request = new Request(HttpPost.METHOD_NAME, endpoint(searchRequest.indices(), searchEndpoint)); - - Params params = new Params(); - addSearchRequestParams(params, searchRequest); - - if (searchRequest.source() != null) { - request.setEntity(createEntity(searchRequest.source(), REQUEST_BODY_CONTENT_TYPE)); - } - request.addParameters(params.asMap()); - return request; - } - - private static void addSearchRequestParams(Params params, SearchRequest searchRequest) { - params.putParam(RestSearchAction.TYPED_KEYS_PARAM, "true"); - params.withRouting(searchRequest.routing()); - params.withPreference(searchRequest.preference()); - if (SearchRequest.DEFAULT_INDICES_OPTIONS.equals(searchRequest.indicesOptions()) == false) { - params.withIndicesOptions(searchRequest.indicesOptions()); - } - params.withSearchType(searchRequest.searchType().name().toLowerCase(Locale.ROOT)); - if (searchRequest.isCcsMinimizeRoundtrips() != SearchRequest.defaultCcsMinimizeRoundtrips(searchRequest)) { - params.putParam("ccs_minimize_roundtrips", Boolean.toString(searchRequest.isCcsMinimizeRoundtrips())); - } - if (searchRequest.getPreFilterShardSize() != null) { - params.putParam("pre_filter_shard_size", Integer.toString(searchRequest.getPreFilterShardSize())); - } - params.withMaxConcurrentShardRequests(searchRequest.getMaxConcurrentShardRequests()); - if (searchRequest.requestCache() != null) { - params.withRequestCache(searchRequest.requestCache()); - } - if (searchRequest.allowPartialSearchResults() != null) { - params.withAllowPartialResults(searchRequest.allowPartialSearchResults()); - } - params.withBatchedReduceSize(searchRequest.getBatchedReduceSize()); - if (searchRequest.scroll() != null) { - params.putParam("scroll", searchRequest.scroll().keepAlive()); - } - } - - private static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException { - return createEntity(toXContent, xContentType, ToXContent.EMPTY_PARAMS); - } - - private static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType, ToXContent.Params toXContentParams) - throws IOException { - BytesRef source = XContentHelper.toXContent(toXContent, xContentType, toXContentParams, false).toBytesRef(); - return new NByteArrayEntity(source.bytes, source.offset, source.length, createContentType(xContentType)); - } - - private static String endpoint(String index, String type, String id) { - return new EndpointBuilder().addPathPart(index, type, id).build(); - } - - private static String endpoint(String index, String id) { - return new EndpointBuilder().addPathPart(index, "_doc", id).build(); - } - - private static String endpoint(String[] indices, String endpoint) { - return new EndpointBuilder().addCommaSeparatedPathParts(indices).addPathPartAsIs(endpoint).build(); - } - - /** - * Returns a {@link ContentType} from a given {@link XContentType}. - * - * @param xContentType the {@link XContentType} - * @return the {@link ContentType} - */ - @SuppressForbidden(reason = "Only allowed place to convert a XContentType to a ContentType") - private static ContentType createContentType(final XContentType xContentType) { - return ContentType.create(xContentType.mediaTypeWithoutParameters(), (Charset) null); - } - - /** - * Utility class to help with common parameter names and patterns. Wraps - * a {@link Request} and adds the parameters to it directly. - */ - private static class Params { - private final Map parameters = new HashMap<>(); - - Params() {} - - Params putParam(String name, String value) { - if (Strings.hasLength(value)) { - parameters.put(name, value); - } - return this; - } - - Params putParam(String key, TimeValue value) { - if (value != null) { - return putParam(key, value.getStringRep()); - } - return this; - } - - Map asMap() { - return parameters; - } - - Params withPipeline(String pipeline) { - return putParam("pipeline", pipeline); - } - - Params withPreference(String preference) { - return putParam("preference", preference); - } - - Params withSearchType(String searchType) { - return putParam("search_type", searchType); - } - - Params withMaxConcurrentShardRequests(int maxConcurrentShardRequests) { - return putParam("max_concurrent_shard_requests", Integer.toString(maxConcurrentShardRequests)); - } - - Params withBatchedReduceSize(int batchedReduceSize) { - return putParam("batched_reduce_size", Integer.toString(batchedReduceSize)); - } - - Params withRequestCache(boolean requestCache) { - return putParam("request_cache", Boolean.toString(requestCache)); - } - - Params withAllowPartialResults(boolean allowPartialSearchResults) { - return putParam("allow_partial_search_results", Boolean.toString(allowPartialSearchResults)); - } - - Params withRefreshPolicy(RefreshPolicy refreshPolicy) { - if (refreshPolicy != RefreshPolicy.NONE) { - return putParam("refresh", refreshPolicy.getValue()); - } - return this; - } - - Params withRouting(String routing) { - return putParam("routing", routing); - } - - Params withTimeout(TimeValue timeout) { - return putParam("timeout", timeout); - } - - Params withVersion(long version) { - if (version != Versions.MATCH_ANY) { - return putParam("version", Long.toString(version)); - } - return this; - } - - Params withVersionType(VersionType versionType) { - if (versionType != VersionType.INTERNAL) { - return putParam("version_type", versionType.name().toLowerCase(Locale.ROOT)); - } - return this; - } - - Params withIfSeqNo(long ifSeqNo) { - if (ifSeqNo != SequenceNumbers.UNASSIGNED_SEQ_NO) { - return putParam("if_seq_no", Long.toString(ifSeqNo)); - } - return this; - } - - Params withIfPrimaryTerm(long ifPrimaryTerm) { - if (ifPrimaryTerm != SequenceNumbers.UNASSIGNED_PRIMARY_TERM) { - return putParam("if_primary_term", Long.toString(ifPrimaryTerm)); - } - return this; - } - - Params withWaitForActiveShards(ActiveShardCount activeShardCount) { - return withWaitForActiveShards(activeShardCount, ActiveShardCount.DEFAULT); - } - - Params withWaitForActiveShards(ActiveShardCount activeShardCount, ActiveShardCount defaultActiveShardCount) { - if (activeShardCount != null && activeShardCount != defaultActiveShardCount) { - return putParam("wait_for_active_shards", activeShardCount.toString().toLowerCase(Locale.ROOT)); - } - return this; - } - - Params withRequireAlias(boolean requireAlias) { - if (requireAlias) { - return putParam("require_alias", Boolean.toString(requireAlias)); - } - return this; - } - - Params withIndicesOptions(IndicesOptions indicesOptions) { - if (indicesOptions != null) { - withIgnoreUnavailable(indicesOptions.ignoreUnavailable()); - putParam("allow_no_indices", Boolean.toString(indicesOptions.allowNoIndices())); - String expandWildcards; - if (indicesOptions.expandWildcardExpressions() == false) { - expandWildcards = "none"; - } else { - StringJoiner joiner = new StringJoiner(","); - if (indicesOptions.expandWildcardsOpen()) { - joiner.add("open"); - } - if (indicesOptions.expandWildcardsClosed()) { - joiner.add("closed"); - } - expandWildcards = joiner.toString(); - } - putParam("expand_wildcards", expandWildcards); - putParam("ignore_throttled", Boolean.toString(indicesOptions.ignoreThrottled())); - } - return this; - } - - Params withIgnoreUnavailable(boolean ignoreUnavailable) { - // Always explicitly place the ignore_unavailable value. - putParam("ignore_unavailable", Boolean.toString(ignoreUnavailable)); - return this; - } - } - - /** - * Ensure that the {@link IndexRequest}'s content type is supported by the Bulk API and that it conforms - * to the current {@link BulkRequest}'s content type (if it's known at the time of this method get called). - * - * @return the {@link IndexRequest}'s content type - */ - private static XContentType enforceSameContentType(IndexRequest indexRequest, @Nullable XContentType xContentType) { - XContentType requestContentType = indexRequest.getContentType(); - if (requestContentType.canonical() != XContentType.JSON && requestContentType.canonical() != XContentType.SMILE) { - throw new IllegalArgumentException( - "Unsupported content-type found for request with content-type [" - + requestContentType - + "], only JSON and SMILE are supported" - ); - } - if (xContentType == null) { - return requestContentType; - } - if (requestContentType.canonical() != xContentType.canonical()) { - throw new IllegalArgumentException( - "Mismatching content-type found for request with content-type [" - + requestContentType - + "], previous requests have content-type [" - + xContentType - + "]" - ); - } - return xContentType; - } - - /** - * Utility class to build request's endpoint given its parts as strings - */ - private static class EndpointBuilder { - - private final StringJoiner joiner = new StringJoiner("/", "/", ""); - - EndpointBuilder addPathPart(String... parts) { - for (String part : parts) { - if (Strings.hasLength(part)) { - joiner.add(encodePart(part)); - } - } - return this; - } - - EndpointBuilder addCommaSeparatedPathParts(String[] parts) { - addPathPart(String.join(",", parts)); - return this; - } - - EndpointBuilder addPathPartAsIs(String... parts) { - for (String part : parts) { - if (Strings.hasLength(part)) { - joiner.add(part); - } - } - return this; - } - - private String build() { - return joiner.toString(); - } - - private static String encodePart(String pathPart) { - try { - // encode each part (e.g. index, type and id) separately before merging them into the path - // we prepend "/" to the path part to make this path absolute, otherwise there can be issues with - // paths that start with `-` or contain `:` - // the authority must be an empty string and not null, else paths that being with slashes could have them - // misinterpreted as part of the authority. - URI uri = new URI(null, "", "/" + pathPart, null, null); - // manually encode any slash that each part may contain - return uri.getRawPath().substring(1).replaceAll("/", "%2F"); - } catch (URISyntaxException e) { - throw new IllegalArgumentException("Path part [" + pathPart + "] couldn't be encoded", e); - } - } - } -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java deleted file mode 100644 index 5d779ea17f534..0000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ /dev/null @@ -1,969 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.client; - -import org.apache.http.Header; -import org.apache.http.HttpEntity; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchStatusException; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.bulk.BulkRequest; -import org.elasticsearch.action.bulk.BulkResponse; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.aggregations.bucket.adjacency.AdjacencyMatrixAggregationBuilder; -import org.elasticsearch.aggregations.bucket.adjacency.ParsedAdjacencyMatrix; -import org.elasticsearch.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder; -import org.elasticsearch.aggregations.bucket.histogram.ParsedAutoDateHistogram; -import org.elasticsearch.aggregations.bucket.timeseries.ParsedTimeSeries; -import org.elasticsearch.aggregations.bucket.timeseries.TimeSeriesAggregationBuilder; -import org.elasticsearch.aggregations.pipeline.DerivativePipelineAggregationBuilder; -import org.elasticsearch.client.analytics.ParsedStringStats; -import org.elasticsearch.client.analytics.ParsedTopMetrics; -import org.elasticsearch.client.analytics.StringStatsAggregationBuilder; -import org.elasticsearch.client.analytics.TopMetricsAggregationBuilder; -import org.elasticsearch.client.core.MainResponse; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.util.concurrent.FutureUtils; -import org.elasticsearch.common.util.concurrent.ListenableFuture; -import org.elasticsearch.core.CheckedConsumer; -import org.elasticsearch.core.CheckedFunction; -import org.elasticsearch.plugins.spi.NamedXContentProvider; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.search.aggregations.Aggregation; -import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.composite.ParsedComposite; -import org.elasticsearch.search.aggregations.bucket.filter.FilterAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.filter.FiltersAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.filter.ParsedFilter; -import org.elasticsearch.search.aggregations.bucket.filter.ParsedFilters; -import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.geogrid.ParsedGeoHashGrid; -import org.elasticsearch.search.aggregations.bucket.geogrid.ParsedGeoTileGrid; -import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.global.ParsedGlobal; -import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.histogram.ParsedDateHistogram; -import org.elasticsearch.search.aggregations.bucket.histogram.ParsedHistogram; -import org.elasticsearch.search.aggregations.bucket.histogram.ParsedVariableWidthHistogram; -import org.elasticsearch.search.aggregations.bucket.histogram.VariableWidthHistogramAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.missing.MissingAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.missing.ParsedMissing; -import org.elasticsearch.search.aggregations.bucket.nested.NestedAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.nested.ParsedNested; -import org.elasticsearch.search.aggregations.bucket.nested.ParsedReverseNested; -import org.elasticsearch.search.aggregations.bucket.nested.ReverseNestedAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.range.DateRangeAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.range.GeoDistanceAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.range.IpRangeAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.range.ParsedBinaryRange; -import org.elasticsearch.search.aggregations.bucket.range.ParsedDateRange; -import org.elasticsearch.search.aggregations.bucket.range.ParsedGeoDistance; -import org.elasticsearch.search.aggregations.bucket.range.ParsedRange; -import org.elasticsearch.search.aggregations.bucket.range.RangeAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.sampler.InternalSampler; -import org.elasticsearch.search.aggregations.bucket.sampler.ParsedSampler; -import org.elasticsearch.search.aggregations.bucket.terms.DoubleTerms; -import org.elasticsearch.search.aggregations.bucket.terms.LongRareTerms; -import org.elasticsearch.search.aggregations.bucket.terms.LongTerms; -import org.elasticsearch.search.aggregations.bucket.terms.ParsedDoubleTerms; -import org.elasticsearch.search.aggregations.bucket.terms.ParsedLongRareTerms; -import org.elasticsearch.search.aggregations.bucket.terms.ParsedLongTerms; -import org.elasticsearch.search.aggregations.bucket.terms.ParsedSignificantLongTerms; -import org.elasticsearch.search.aggregations.bucket.terms.ParsedSignificantStringTerms; -import org.elasticsearch.search.aggregations.bucket.terms.ParsedStringRareTerms; -import org.elasticsearch.search.aggregations.bucket.terms.ParsedStringTerms; -import org.elasticsearch.search.aggregations.bucket.terms.SignificantLongTerms; -import org.elasticsearch.search.aggregations.bucket.terms.SignificantStringTerms; -import org.elasticsearch.search.aggregations.bucket.terms.StringRareTerms; -import org.elasticsearch.search.aggregations.bucket.terms.StringTerms; -import org.elasticsearch.search.aggregations.metrics.AvgAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.CardinalityAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.ExtendedStatsAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.GeoBoundsAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.GeoCentroidAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.InternalHDRPercentileRanks; -import org.elasticsearch.search.aggregations.metrics.InternalHDRPercentiles; -import org.elasticsearch.search.aggregations.metrics.InternalTDigestPercentileRanks; -import org.elasticsearch.search.aggregations.metrics.InternalTDigestPercentiles; -import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.MedianAbsoluteDeviationAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.MinAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.ParsedAvg; -import org.elasticsearch.search.aggregations.metrics.ParsedCardinality; -import org.elasticsearch.search.aggregations.metrics.ParsedExtendedStats; -import org.elasticsearch.search.aggregations.metrics.ParsedGeoBounds; -import org.elasticsearch.search.aggregations.metrics.ParsedGeoCentroid; -import org.elasticsearch.search.aggregations.metrics.ParsedHDRPercentileRanks; -import org.elasticsearch.search.aggregations.metrics.ParsedHDRPercentiles; -import org.elasticsearch.search.aggregations.metrics.ParsedMax; -import org.elasticsearch.search.aggregations.metrics.ParsedMedianAbsoluteDeviation; -import org.elasticsearch.search.aggregations.metrics.ParsedMin; -import org.elasticsearch.search.aggregations.metrics.ParsedScriptedMetric; -import org.elasticsearch.search.aggregations.metrics.ParsedStats; -import org.elasticsearch.search.aggregations.metrics.ParsedSum; -import org.elasticsearch.search.aggregations.metrics.ParsedTDigestPercentileRanks; -import org.elasticsearch.search.aggregations.metrics.ParsedTDigestPercentiles; -import org.elasticsearch.search.aggregations.metrics.ParsedTopHits; -import org.elasticsearch.search.aggregations.metrics.ParsedValueCount; -import org.elasticsearch.search.aggregations.metrics.ParsedWeightedAvg; -import org.elasticsearch.search.aggregations.metrics.ScriptedMetricAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.StatsAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.TopHitsAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.ValueCountAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.WeightedAvgAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.ExtendedStatsBucketPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.InternalBucketMetricValue; -import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue; -import org.elasticsearch.search.aggregations.pipeline.ParsedBucketMetricValue; -import org.elasticsearch.search.aggregations.pipeline.ParsedDerivative; -import org.elasticsearch.search.aggregations.pipeline.ParsedExtendedStatsBucket; -import org.elasticsearch.search.aggregations.pipeline.ParsedPercentilesBucket; -import org.elasticsearch.search.aggregations.pipeline.ParsedSimpleValue; -import org.elasticsearch.search.aggregations.pipeline.ParsedStatsBucket; -import org.elasticsearch.search.aggregations.pipeline.PercentilesBucketPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.StatsBucketPipelineAggregationBuilder; -import org.elasticsearch.search.suggest.Suggest; -import org.elasticsearch.search.suggest.completion.CompletionSuggestion; -import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder; -import org.elasticsearch.search.suggest.phrase.PhraseSuggestion; -import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder; -import org.elasticsearch.search.suggest.term.TermSuggestion; -import org.elasticsearch.search.suggest.term.TermSuggestionBuilder; -import org.elasticsearch.xcontent.ContextParser; -import org.elasticsearch.xcontent.DeprecationHandler; -import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; -import org.elasticsearch.xcontent.XContentType; - -import java.io.Closeable; -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.ServiceLoader; -import java.util.Set; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; -import java.util.function.Function; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -import static java.util.Collections.emptySet; -import static java.util.stream.Collectors.toList; - -/** - * High level REST client that wraps an instance of the low level {@link RestClient} and allows to build requests and read responses. The - * {@link RestClient} instance is internally built based on the provided {@link RestClientBuilder} and it gets closed automatically when - * closing the {@link RestHighLevelClient} instance that wraps it. - * - * @deprecated The High Level Rest Client is deprecated in favor of the - * - * Elasticsearch Java API Client - */ -@Deprecated(since = "7.16.0", forRemoval = true) -@SuppressWarnings("removal") -public class RestHighLevelClient implements Closeable { - - private static final Logger logger = LogManager.getLogger(RestHighLevelClient.class); - /** - * Environment variable determining whether to send the 7.x compatibility header - */ - private static final String API_VERSIONING_ENV_VARIABLE = "ELASTIC_CLIENT_APIVERSIONING"; - - // To be called using performClientRequest and performClientRequestAsync to ensure version compatibility check - private final RestClient client; - private final XContentParserConfiguration parserConfig; - private final CheckedConsumer doClose; - private final boolean useAPICompatibility; - - /** Do not access directly but through getVersionValidationFuture() */ - private volatile ListenableFuture> versionValidationFuture; - - /** - * Creates a {@link RestHighLevelClient} given the low level {@link RestClient} that it should use to perform requests and - * a list of entries that allow to parse custom response sections added to Elasticsearch through plugins. - * This constructor can be called by subclasses in case an externally created low-level REST client needs to be provided. - * The consumer argument allows to control what needs to be done when the {@link #close()} method is called. - * Also subclasses can provide parsers for custom response sections added to Elasticsearch through plugins. - */ - protected RestHighLevelClient( - RestClient restClient, - CheckedConsumer doClose, - List namedXContentEntries - ) { - this(restClient, doClose, namedXContentEntries, null); - } - - /** - * Creates a {@link RestHighLevelClient} given the low level {@link RestClient} that it should use to perform requests and - * a list of entries that allow to parse custom response sections added to Elasticsearch through plugins. - * This constructor can be called by subclasses in case an externally created low-level REST client needs to be provided. - * The consumer argument allows to control what needs to be done when the {@link #close()} method is called. - * Also subclasses can provide parsers for custom response sections added to Elasticsearch through plugins. - */ - private RestHighLevelClient( - RestClient restClient, - CheckedConsumer doClose, - List namedXContentEntries, - Boolean useAPICompatibility - ) { - this.client = Objects.requireNonNull(restClient, "restClient must not be null"); - this.doClose = Objects.requireNonNull(doClose, "doClose consumer must not be null"); - NamedXContentRegistry registry = new NamedXContentRegistry( - Stream.of(getDefaultNamedXContents().stream(), getProvidedNamedXContents().stream(), namedXContentEntries.stream()) - .flatMap(Function.identity()) - .collect(toList()) - ); - /* - * Ignores deprecation warnings. This is appropriate because it is only - * used to parse responses from Elasticsearch. Any deprecation warnings - * emitted there just mean that you are talking to an old version of - * Elasticsearch. There isn't anything you can do about the deprecation. - */ - this.parserConfig = XContentParserConfiguration.EMPTY.withRegistry(registry) - .withDeprecationHandler(DeprecationHandler.IGNORE_DEPRECATIONS); - if (useAPICompatibility == null && "true".equals(System.getenv(API_VERSIONING_ENV_VARIABLE))) { - this.useAPICompatibility = true; - } else { - this.useAPICompatibility = Boolean.TRUE.equals(useAPICompatibility); - } - } - - /** - * Returns the low-level client that the current high-level client instance is using to perform requests - */ - public final RestClient getLowLevelClient() { - return client; - } - - public final XContentParserConfiguration getParserConfig() { - return parserConfig; - } - - @Override - public final void close() throws IOException { - doClose.accept(client); - } - - /** - * Asynchronously executes a bulk request using the Bulk API. - * See Bulk API on elastic.co - * @param bulkRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @param listener the listener to be notified upon request completion - * @return cancellable that may be used to cancel the request - */ - public final Cancellable bulkAsync(BulkRequest bulkRequest, RequestOptions options, ActionListener listener) { - return performRequestAsyncAndParseEntity( - bulkRequest, - RequestConverters::bulk, - options, - BulkResponse::fromXContent, - listener, - emptySet() - ); - } - - /** - * Index a document using the Index API. - * See Index API on elastic.co - * @param indexRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @return the response - */ - public final IndexResponse index(IndexRequest indexRequest, RequestOptions options) throws IOException { - return performRequestAndParseEntity(indexRequest, RequestConverters::index, options, IndexResponse::fromXContent, emptySet()); - } - - /** - * Asynchronously executes a search using the Search API. - * See Search API on elastic.co - * @param searchRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @param listener the listener to be notified upon request completion - * @return cancellable that may be used to cancel the request - */ - public final Cancellable searchAsync(SearchRequest searchRequest, RequestOptions options, ActionListener listener) { - return performRequestAsyncAndParseEntity( - searchRequest, - r -> RequestConverters.search(r, "_search"), - options, - SearchResponse::fromXContent, - listener, - emptySet() - ); - } - - /** - * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. - */ - @Deprecated - private Resp performRequestAndParseEntity( - Req request, - CheckedFunction requestConverter, - RequestOptions options, - CheckedFunction entityParser, - Set ignores - ) throws IOException { - return performRequest(request, requestConverter, options, response -> parseEntity(response.getEntity(), entityParser), ignores); - } - - /** - * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. - */ - @Deprecated - private Resp performRequest( - Req request, - CheckedFunction requestConverter, - RequestOptions options, - CheckedFunction responseConverter, - Set ignores - ) throws IOException { - ActionRequestValidationException validationException = request.validate(); - if (validationException != null && validationException.validationErrors().isEmpty() == false) { - throw validationException; - } - return internalPerformRequest(request, requestConverter, options, responseConverter, ignores); - } - - /** - * Provides common functionality for performing a request. - */ - private Resp internalPerformRequest( - Req request, - CheckedFunction requestConverter, - RequestOptions options, - CheckedFunction responseConverter, - Set ignores - ) throws IOException { - Request req = requestConverter.apply(request); - req.setOptions(options); - Response response; - try { - response = performClientRequest(req); - } catch (ResponseException e) { - if (ignores.contains(e.getResponse().getStatusLine().getStatusCode())) { - try { - return responseConverter.apply(e.getResponse()); - } catch (Exception innerException) { - // the exception is ignored as we now try to parse the response as an error. - // this covers cases like get where 404 can either be a valid document not found response, - // or an error for which parsing is completely different. We try to consider the 404 response as a valid one - // first. If parsing of the response breaks, we fall back to parsing it as an error. - throw parseResponseException(e); - } - } - throw parseResponseException(e); - } - - try { - return responseConverter.apply(response); - } catch (Exception e) { - throw new IOException("Unable to parse response body for " + response, e); - } - } - - /** - * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. - * @return Cancellable instance that may be used to cancel the request - */ - @Deprecated - private Cancellable performRequestAsyncAndParseEntity( - Req request, - CheckedFunction requestConverter, - RequestOptions options, - CheckedFunction entityParser, - ActionListener listener, - Set ignores - ) { - return performRequestAsync( - request, - requestConverter, - options, - response -> parseEntity(response.getEntity(), entityParser), - listener, - ignores - ); - } - - /** - * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. - * @return Cancellable instance that may be used to cancel the request - */ - @Deprecated - private Cancellable performRequestAsync( - Req request, - CheckedFunction requestConverter, - RequestOptions options, - CheckedFunction responseConverter, - ActionListener listener, - Set ignores - ) { - ActionRequestValidationException validationException = request.validate(); - if (validationException != null && validationException.validationErrors().isEmpty() == false) { - listener.onFailure(validationException); - return Cancellable.NO_OP; - } - return internalPerformRequestAsync(request, requestConverter, options, responseConverter, listener, ignores); - } - - /** - * Provides common functionality for asynchronously performing a request. - * @return Cancellable instance that may be used to cancel the request - */ - private Cancellable internalPerformRequestAsync( - Req request, - CheckedFunction requestConverter, - RequestOptions options, - CheckedFunction responseConverter, - ActionListener listener, - Set ignores - ) { - Request req; - try { - req = requestConverter.apply(request); - } catch (Exception e) { - listener.onFailure(e); - return Cancellable.NO_OP; - } - req.setOptions(options); - - ResponseListener responseListener = wrapResponseListener(responseConverter, listener, ignores); - return performClientRequestAsync(req, responseListener); - } - - private ResponseListener wrapResponseListener( - CheckedFunction responseConverter, - ActionListener actionListener, - Set ignores - ) { - return new ResponseListener() { - @Override - public void onSuccess(Response response) { - try { - actionListener.onResponse(responseConverter.apply(response)); - } catch (Exception e) { - IOException ioe = new IOException("Unable to parse response body for " + response, e); - onFailure(ioe); - } - } - - @Override - public void onFailure(Exception exception) { - if (exception instanceof ResponseException responseException) { - Response response = responseException.getResponse(); - if (ignores.contains(response.getStatusLine().getStatusCode())) { - try { - actionListener.onResponse(responseConverter.apply(response)); - } catch (Exception innerException) { - // the exception is ignored as we now try to parse the response as an error. - // this covers cases like get where 404 can either be a valid document not found response, - // or an error for which parsing is completely different. We try to consider the 404 response as a valid one - // first. If parsing of the response breaks, we fall back to parsing it as an error. - actionListener.onFailure(parseResponseException(responseException)); - } - } else { - actionListener.onFailure(parseResponseException(responseException)); - } - } else { - actionListener.onFailure(exception); - } - } - }; - } - - /** - * Converts a {@link ResponseException} obtained from the low level REST client into an {@link ElasticsearchException}. - * If a response body was returned, tries to parse it as an error returned from Elasticsearch. - * If no response body was returned or anything goes wrong while parsing the error, returns a new {@link ElasticsearchStatusException} - * that wraps the original {@link ResponseException}. The potential exception obtained while parsing is added to the returned - * exception as a suppressed exception. This method is guaranteed to not throw any exception eventually thrown while parsing. - */ - private ElasticsearchStatusException parseResponseException(ResponseException responseException) { - Response response = responseException.getResponse(); - HttpEntity entity = response.getEntity(); - ElasticsearchStatusException elasticsearchException; - RestStatus restStatus = RestStatus.fromCode(response.getStatusLine().getStatusCode()); - - if (entity == null) { - elasticsearchException = new ElasticsearchStatusException(responseException.getMessage(), restStatus, responseException); - } else { - try { - elasticsearchException = parseEntity(entity, RestResponse::errorFromXContent); - elasticsearchException.addSuppressed(responseException); - } catch (Exception e) { - elasticsearchException = new ElasticsearchStatusException("Unable to parse response body", restStatus, responseException); - elasticsearchException.addSuppressed(e); - } - } - return elasticsearchException; - } - - private Resp parseEntity(final HttpEntity entity, final CheckedFunction entityParser) - throws IOException { - if (entity == null) { - throw new IllegalStateException("Response body expected but not returned"); - } - if (entity.getContentType() == null) { - throw new IllegalStateException("Elasticsearch didn't return the [Content-Type] header, unable to parse response body"); - } - XContentType xContentType = XContentType.fromMediaType(entity.getContentType().getValue()); - if (xContentType == null) { - throw new IllegalStateException("Unsupported Content-Type: " + entity.getContentType().getValue()); - } - try (XContentParser parser = xContentType.xContent().createParser(parserConfig, entity.getContent())) { - return entityParser.apply(parser); - } - } - - private enum EntityType { - JSON() { - @Override - public String header() { - return "application/json"; - } - - @Override - public String compatibleHeader() { - return "application/vnd.elasticsearch+json; compatible-with=7"; - } - }, - NDJSON() { - @Override - public String header() { - return "application/x-ndjson"; - } - - @Override - public String compatibleHeader() { - return "application/vnd.elasticsearch+x-ndjson; compatible-with=7"; - } - }, - STAR() { - @Override - public String header() { - return "application/*"; - } - - @Override - public String compatibleHeader() { - return "application/vnd.elasticsearch+json; compatible-with=7"; - } - }, - YAML() { - @Override - public String header() { - return "application/yaml"; - } - - @Override - public String compatibleHeader() { - return "application/vnd.elasticsearch+yaml; compatible-with=7"; - } - }, - SMILE() { - @Override - public String header() { - return "application/smile"; - } - - @Override - public String compatibleHeader() { - return "application/vnd.elasticsearch+smile; compatible-with=7"; - } - }, - CBOR() { - @Override - public String header() { - return "application/cbor"; - } - - @Override - public String compatibleHeader() { - return "application/vnd.elasticsearch+cbor; compatible-with=7"; - } - }; - - public abstract String header(); - - public abstract String compatibleHeader(); - - @Override - public String toString() { - return header(); - } - } - - private Cancellable performClientRequestAsync(Request request, ResponseListener listener) { - // Add compatibility request headers if compatibility mode has been enabled - if (this.useAPICompatibility) { - modifyRequestForCompatibility(request); - } - - ListenableFuture> versionCheck = getVersionValidationFuture(); - - // Create a future that tracks cancellation of this method's result and forwards cancellation to the actual LLRC request. - CompletableFuture cancellationForwarder = new CompletableFuture<>(); - Cancellable result = new Cancellable() { - @Override - public void cancel() { - // Raise the flag by completing the future - FutureUtils.cancel(cancellationForwarder); - } - - @Override - void runIfNotCancelled(Runnable runnable) { - if (cancellationForwarder.isCancelled()) { - throw newCancellationException(); - } - runnable.run(); - } - }; - - // Send the request after we have done the version compatibility check. Note that if it has already happened, the listener will - // be called immediately on the same thread with no asynchronous scheduling overhead. - versionCheck.addListener(new ActionListener<>() { - @Override - public void onResponse(Optional validation) { - if (validation.isPresent() == false) { - // Send the request and propagate cancellation - Cancellable call = client.performRequestAsync(request, listener); - cancellationForwarder.whenComplete((r, t) -> - // Forward cancellation to the actual request (no need to check parameters as the - // only way for cancellationForwarder to be completed is by being cancelled). - call.cancel()); - } else { - // Version validation wasn't successful, fail the request with the validation result. - listener.onFailure(new ElasticsearchException(validation.get())); - } - } - - @Override - public void onFailure(Exception e) { - // Propagate validation request failure. This will be transient since `getVersionValidationFuture` clears the validation - // future if the request fails, leading to retries at the next HLRC request (see comments below). - listener.onFailure(e); - } - }); - - return result; - } - - /** - * Go through all the request's existing headers, looking for {@code headerName} headers and if they exist, - * changing them to use version compatibility. If no request headers are changed, modify the entity type header if appropriate - */ - private boolean addCompatibilityFor(RequestOptions.Builder newOptions, Header entityHeader, String headerName) { - // Modify any existing "Content-Type" headers on the request to use the version compatibility, if available - boolean contentTypeModified = false; - for (Header header : new ArrayList<>(newOptions.getHeaders())) { - if (headerName.equalsIgnoreCase(header.getName()) == false) { - continue; - } - contentTypeModified = contentTypeModified || modifyHeader(newOptions, header, headerName); - } - - // If there were no request-specific headers, modify the request entity's header to be compatible - if (entityHeader != null && contentTypeModified == false) { - contentTypeModified = modifyHeader(newOptions, entityHeader, headerName); - } - - return contentTypeModified; - } - - /** - * Modify the given header to be version compatible, if necessary. - * Returns true if a modification was made, false otherwise. - */ - private boolean modifyHeader(RequestOptions.Builder newOptions, Header header, String headerName) { - for (EntityType type : EntityType.values()) { - final String headerValue = header.getValue(); - if (headerValue.startsWith(type.header())) { - String newHeaderValue = headerValue.replace(type.header(), type.compatibleHeader()); - newOptions.removeHeader(header.getName()); - newOptions.addHeader(headerName, newHeaderValue); - return true; - } - } - return false; - } - - /** - * Make all necessary changes to support API compatibility for the given request. This includes - * modifying the "Content-Type" and "Accept" headers if present, or modifying the header based - * on the request's entity type. - */ - private void modifyRequestForCompatibility(Request request) { - final Header entityHeader = request.getEntity() == null ? null : request.getEntity().getContentType(); - final RequestOptions.Builder newOptions = request.getOptions().toBuilder(); - - addCompatibilityFor(newOptions, entityHeader, "Content-Type"); - if (request.getOptions().containsHeader("Accept")) { - addCompatibilityFor(newOptions, entityHeader, "Accept"); - } else { - // There is no entity, and no existing accept header, but we still need one - // with compatibility, so use the compatible JSON (default output) format - newOptions.addHeader("Accept", EntityType.JSON.compatibleHeader()); - } - request.setOptions(newOptions); - } - - private Response performClientRequest(Request request) throws IOException { - // Add compatibility request headers if compatibility mode has been enabled - if (this.useAPICompatibility) { - modifyRequestForCompatibility(request); - } - - Optional versionValidation; - try { - final var future = new PlainActionFuture>(); - getVersionValidationFuture().addListener(future); - versionValidation = future.get(); - } catch (InterruptedException | ExecutionException e) { - // Unlikely to happen - throw new ElasticsearchException(e); - } - - if (versionValidation.isPresent() == false) { - return client.performRequest(request); - } else { - throw new ElasticsearchException(versionValidation.get()); - } - } - - /** - * Returns a future that asynchronously validates the Elasticsearch product version. Its result is an optional string: if empty then - * validation was successful, if present it contains the validation error. API requests should be chained to this future and check - * the validation result before going further. - *

- * This future is a memoization of the first successful request to the "/" endpoint and the subsequent compatibility check - * ({@see #versionValidationFuture}). Further client requests reuse its result. - *

- * If the version check request fails (e.g. network error), {@link #versionValidationFuture} is cleared so that a new validation - * request is sent at the next HLRC request. This allows retries to happen while avoiding a busy retry loop (LLRC retries on the node - * pool still happen). - */ - private ListenableFuture> getVersionValidationFuture() { - ListenableFuture> currentFuture = this.versionValidationFuture; - if (currentFuture != null) { - return currentFuture; - } else { - synchronized (this) { - // Re-check in synchronized block - currentFuture = this.versionValidationFuture; - if (currentFuture != null) { - return currentFuture; - } - ListenableFuture> future = new ListenableFuture<>(); - this.versionValidationFuture = future; - - // Asynchronously call the info endpoint and complete the future with the version validation result. - Request req = new Request("GET", "/"); - // These status codes are nominal in the context of product version verification - req.addParameter("ignore", "401,403"); - client.performRequestAsync(req, new ResponseListener() { - @Override - public void onSuccess(Response response) { - Optional validation; - try { - validation = getVersionValidation(response); - } catch (Exception e) { - logger.error("Failed to parse info response", e); - validation = Optional.of( - "Failed to parse info response. Check logs for detailed information - " + e.getMessage() - ); - } - future.onResponse(validation); - } - - @Override - public void onFailure(Exception exception) { - - // Fail the requests (this one and the ones waiting for it) and clear the future - // so that we retry the next time the client executes a request. - versionValidationFuture = null; - future.onFailure(exception); - } - }); - - return future; - } - } - } - - /** - * Validates that the response info() is a compatible Elasticsearch version. - * - * @return an optional string. If empty, version is compatible. Otherwise, it's the message to return to the application. - */ - private Optional getVersionValidation(Response response) throws IOException { - // Let requests go through if the client doesn't have permissions for the info endpoint. - int statusCode = response.getStatusLine().getStatusCode(); - if (statusCode == 401 || statusCode == 403) { - return Optional.empty(); - } - - MainResponse mainResponse; - try { - mainResponse = parseEntity(response.getEntity(), MainResponse::fromXContent); - } catch (ResponseException e) { - throw parseResponseException(e); - } - - String version = mainResponse.getVersion().getNumber(); - if (Strings.hasLength(version) == false) { - return Optional.of("Missing version.number in info response"); - } - - String[] parts = version.split("\\."); - if (parts.length < 2) { - return Optional.of("Wrong version.number format in info response"); - } - - int major = Integer.parseInt(parts[0]); - int minor = Integer.parseInt(parts[1]); - - if (major < 6) { - return Optional.of("Elasticsearch version 6 or more is required"); - } - - if (major == 6 || (major == 7 && minor < 14)) { - if ("You Know, for Search".equalsIgnoreCase(mainResponse.getTagline()) == false) { - return Optional.of("Invalid or missing tagline [" + mainResponse.getTagline() + "]"); - } - - return Optional.empty(); - } - - String header = response.getHeader("X-Elastic-Product"); - if (header == null) { - return Optional.of( - "Missing [X-Elastic-Product] header. Please check that you are connecting to an Elasticsearch " - + "instance, and that any networking filters are preserving that header." - ); - } - - if ("Elasticsearch".equals(header) == false) { - return Optional.of("Invalid value [" + header + "] for [X-Elastic-Product] header."); - } - - return Optional.empty(); - } - - private static List getDefaultNamedXContents() { - Map> map = new HashMap<>(); - map.put(CardinalityAggregationBuilder.NAME, (p, c) -> ParsedCardinality.fromXContent(p, (String) c)); - map.put(InternalHDRPercentiles.NAME, (p, c) -> ParsedHDRPercentiles.fromXContent(p, (String) c)); - map.put(InternalHDRPercentileRanks.NAME, (p, c) -> ParsedHDRPercentileRanks.fromXContent(p, (String) c)); - map.put(InternalTDigestPercentiles.NAME, (p, c) -> ParsedTDigestPercentiles.fromXContent(p, (String) c)); - map.put(InternalTDigestPercentileRanks.NAME, (p, c) -> ParsedTDigestPercentileRanks.fromXContent(p, (String) c)); - map.put(PercentilesBucketPipelineAggregationBuilder.NAME, (p, c) -> ParsedPercentilesBucket.fromXContent(p, (String) c)); - map.put(MedianAbsoluteDeviationAggregationBuilder.NAME, (p, c) -> ParsedMedianAbsoluteDeviation.fromXContent(p, (String) c)); - map.put(MinAggregationBuilder.NAME, (p, c) -> ParsedMin.fromXContent(p, (String) c)); - map.put(MaxAggregationBuilder.NAME, (p, c) -> ParsedMax.fromXContent(p, (String) c)); - map.put(SumAggregationBuilder.NAME, (p, c) -> ParsedSum.fromXContent(p, (String) c)); - map.put(AvgAggregationBuilder.NAME, (p, c) -> ParsedAvg.fromXContent(p, (String) c)); - map.put(WeightedAvgAggregationBuilder.NAME, (p, c) -> ParsedWeightedAvg.fromXContent(p, (String) c)); - map.put(ValueCountAggregationBuilder.NAME, (p, c) -> ParsedValueCount.fromXContent(p, (String) c)); - map.put(InternalSimpleValue.NAME, (p, c) -> ParsedSimpleValue.fromXContent(p, (String) c)); - map.put(DerivativePipelineAggregationBuilder.NAME, (p, c) -> ParsedDerivative.fromXContent(p, (String) c)); - map.put(InternalBucketMetricValue.NAME, (p, c) -> ParsedBucketMetricValue.fromXContent(p, (String) c)); - map.put(StatsAggregationBuilder.NAME, (p, c) -> ParsedStats.fromXContent(p, (String) c)); - map.put(StatsBucketPipelineAggregationBuilder.NAME, (p, c) -> ParsedStatsBucket.fromXContent(p, (String) c)); - map.put(ExtendedStatsAggregationBuilder.NAME, (p, c) -> ParsedExtendedStats.fromXContent(p, (String) c)); - map.put(ExtendedStatsBucketPipelineAggregationBuilder.NAME, (p, c) -> ParsedExtendedStatsBucket.fromXContent(p, (String) c)); - map.put(GeoBoundsAggregationBuilder.NAME, (p, c) -> ParsedGeoBounds.fromXContent(p, (String) c)); - map.put(GeoCentroidAggregationBuilder.NAME, (p, c) -> ParsedGeoCentroid.fromXContent(p, (String) c)); - map.put(HistogramAggregationBuilder.NAME, (p, c) -> ParsedHistogram.fromXContent(p, (String) c)); - map.put(DateHistogramAggregationBuilder.NAME, (p, c) -> ParsedDateHistogram.fromXContent(p, (String) c)); - map.put(AutoDateHistogramAggregationBuilder.NAME, (p, c) -> ParsedAutoDateHistogram.fromXContent(p, (String) c)); - map.put(VariableWidthHistogramAggregationBuilder.NAME, (p, c) -> ParsedVariableWidthHistogram.fromXContent(p, (String) c)); - map.put(StringTerms.NAME, (p, c) -> ParsedStringTerms.fromXContent(p, (String) c)); - map.put(LongTerms.NAME, (p, c) -> ParsedLongTerms.fromXContent(p, (String) c)); - map.put(DoubleTerms.NAME, (p, c) -> ParsedDoubleTerms.fromXContent(p, (String) c)); - map.put(LongRareTerms.NAME, (p, c) -> ParsedLongRareTerms.fromXContent(p, (String) c)); - map.put(StringRareTerms.NAME, (p, c) -> ParsedStringRareTerms.fromXContent(p, (String) c)); - map.put(MissingAggregationBuilder.NAME, (p, c) -> ParsedMissing.fromXContent(p, (String) c)); - map.put(NestedAggregationBuilder.NAME, (p, c) -> ParsedNested.fromXContent(p, (String) c)); - map.put(ReverseNestedAggregationBuilder.NAME, (p, c) -> ParsedReverseNested.fromXContent(p, (String) c)); - map.put(GlobalAggregationBuilder.NAME, (p, c) -> ParsedGlobal.fromXContent(p, (String) c)); - map.put(FilterAggregationBuilder.NAME, (p, c) -> ParsedFilter.fromXContent(p, (String) c)); - map.put(InternalSampler.PARSER_NAME, (p, c) -> ParsedSampler.fromXContent(p, (String) c)); - map.put(GeoHashGridAggregationBuilder.NAME, (p, c) -> ParsedGeoHashGrid.fromXContent(p, (String) c)); - map.put(GeoTileGridAggregationBuilder.NAME, (p, c) -> ParsedGeoTileGrid.fromXContent(p, (String) c)); - map.put(RangeAggregationBuilder.NAME, (p, c) -> ParsedRange.fromXContent(p, (String) c)); - map.put(DateRangeAggregationBuilder.NAME, (p, c) -> ParsedDateRange.fromXContent(p, (String) c)); - map.put(GeoDistanceAggregationBuilder.NAME, (p, c) -> ParsedGeoDistance.fromXContent(p, (String) c)); - map.put(FiltersAggregationBuilder.NAME, (p, c) -> ParsedFilters.fromXContent(p, (String) c)); - map.put(AdjacencyMatrixAggregationBuilder.NAME, (p, c) -> ParsedAdjacencyMatrix.fromXContent(p, (String) c)); - map.put(SignificantLongTerms.NAME, (p, c) -> ParsedSignificantLongTerms.fromXContent(p, (String) c)); - map.put(SignificantStringTerms.NAME, (p, c) -> ParsedSignificantStringTerms.fromXContent(p, (String) c)); - map.put(ScriptedMetricAggregationBuilder.NAME, (p, c) -> ParsedScriptedMetric.fromXContent(p, (String) c)); - map.put(IpRangeAggregationBuilder.NAME, (p, c) -> ParsedBinaryRange.fromXContent(p, (String) c)); - map.put(TopHitsAggregationBuilder.NAME, (p, c) -> ParsedTopHits.fromXContent(p, (String) c)); - map.put(CompositeAggregationBuilder.NAME, (p, c) -> ParsedComposite.fromXContent(p, (String) c)); - map.put(StringStatsAggregationBuilder.NAME, (p, c) -> ParsedStringStats.PARSER.parse(p, (String) c)); - map.put(TopMetricsAggregationBuilder.NAME, (p, c) -> ParsedTopMetrics.PARSER.parse(p, (String) c)); - map.put(TimeSeriesAggregationBuilder.NAME, (p, c) -> ParsedTimeSeries.fromXContent(p, (String) (c))); - List entries = map.entrySet() - .stream() - .map(entry -> new NamedXContentRegistry.Entry(Aggregation.class, new ParseField(entry.getKey()), entry.getValue())) - .collect(Collectors.toList()); - entries.add( - new NamedXContentRegistry.Entry( - Suggest.Suggestion.class, - new ParseField(TermSuggestionBuilder.SUGGESTION_NAME), - (parser, context) -> TermSuggestion.fromXContent(parser, (String) context) - ) - ); - entries.add( - new NamedXContentRegistry.Entry( - Suggest.Suggestion.class, - new ParseField(PhraseSuggestionBuilder.SUGGESTION_NAME), - (parser, context) -> PhraseSuggestion.fromXContent(parser, (String) context) - ) - ); - entries.add( - new NamedXContentRegistry.Entry( - Suggest.Suggestion.class, - new ParseField(CompletionSuggestionBuilder.SUGGESTION_NAME), - (parser, context) -> CompletionSuggestion.fromXContent(parser, (String) context) - ) - ); - return entries; - } - - /** - * Loads and returns the {@link NamedXContentRegistry.Entry} parsers provided by plugins. - */ - private static List getProvidedNamedXContents() { - List entries = new ArrayList<>(); - for (NamedXContentProvider service : ServiceLoader.load(NamedXContentProvider.class)) { - entries.addAll(service.getNamedXContentParsers()); - } - return entries; - } -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/MainResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/MainResponse.java deleted file mode 100644 index bf7b1a665e098..0000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/MainResponse.java +++ /dev/null @@ -1,220 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.client.core; - -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.XContentParser; - -import java.util.Objects; - -public class MainResponse { - - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - MainResponse.class.getName(), - true, - args -> { - return new MainResponse((String) args[0], (Version) args[1], (String) args[2], (String) args[3], (String) args[4]); - } - ); - - static { - PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("name")); - PARSER.declareObject(ConstructingObjectParser.constructorArg(), Version.PARSER, new ParseField("version")); - PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("cluster_name")); - PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("cluster_uuid")); - PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("tagline")); - - } - - private final String nodeName; - private final Version version; - private final String clusterName; - private final String clusterUuid; - private final String tagline; - - public MainResponse(String nodeName, Version version, String clusterName, String clusterUuid, String tagline) { - this.nodeName = nodeName; - this.version = version; - this.clusterName = clusterName; - this.clusterUuid = clusterUuid; - this.tagline = tagline; - } - - public String getNodeName() { - return nodeName; - } - - public Version getVersion() { - return version; - } - - public String getClusterName() { - return clusterName; - } - - public String getClusterUuid() { - return clusterUuid; - } - - public String getTagline() { - return tagline; - } - - public static MainResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - MainResponse that = (MainResponse) o; - return nodeName.equals(that.nodeName) - && version.equals(that.version) - && clusterName.equals(that.clusterName) - && clusterUuid.equals(that.clusterUuid) - && tagline.equals(that.tagline); - } - - @Override - public int hashCode() { - return Objects.hash(nodeName, version, clusterName, clusterUuid, tagline); - } - - public static class Version { - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - Version.class.getName(), - true, - args -> { - return new Version( - (String) args[0], - (String) args[1], - (String) args[2], - (String) args[3], - (String) args[4], - (Boolean) args[5], - (String) args[6], - (String) args[7], - (String) args[8] - ); - } - ); - - static { - PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("number")); - PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), new ParseField("build_flavor")); - PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), new ParseField("build_type")); - PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("build_hash")); - PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("build_date")); - PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), new ParseField("build_snapshot")); - PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("lucene_version")); - PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("minimum_wire_compatibility_version")); - PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("minimum_index_compatibility_version")); - } - private final String number; - private final String buildFlavor; - private final String buildType; - private final String buildHash; - private final String buildDate; - private final boolean isSnapshot; - private final String luceneVersion; - private final String minimumWireCompatibilityVersion; - private final String minimumIndexCompatibilityVersion; - - public Version( - String number, - String buildFlavor, - String buildType, - String buildHash, - String buildDate, - boolean isSnapshot, - String luceneVersion, - String minimumWireCompatibilityVersion, - String minimumIndexCompatibilityVersion - ) { - this.number = number; - this.buildFlavor = buildFlavor; - this.buildType = buildType; - this.buildHash = buildHash; - this.buildDate = buildDate; - this.isSnapshot = isSnapshot; - this.luceneVersion = luceneVersion; - this.minimumWireCompatibilityVersion = minimumWireCompatibilityVersion; - this.minimumIndexCompatibilityVersion = minimumIndexCompatibilityVersion; - } - - public String getNumber() { - return number; - } - - public String getBuildFlavor() { - return buildFlavor; - } - - public String getBuildType() { - return buildType; - } - - public String getBuildHash() { - return buildHash; - } - - public String getBuildDate() { - return buildDate; - } - - public boolean isSnapshot() { - return isSnapshot; - } - - public String getLuceneVersion() { - return luceneVersion; - } - - public String getMinimumWireCompatibilityVersion() { - return minimumWireCompatibilityVersion; - } - - public String getMinimumIndexCompatibilityVersion() { - return minimumIndexCompatibilityVersion; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - Version version = (Version) o; - return isSnapshot == version.isSnapshot - && number.equals(version.number) - && Objects.equals(buildFlavor, version.buildFlavor) - && Objects.equals(buildType, version.buildType) - && buildHash.equals(version.buildHash) - && buildDate.equals(version.buildDate) - && luceneVersion.equals(version.luceneVersion) - && minimumWireCompatibilityVersion.equals(version.minimumWireCompatibilityVersion) - && minimumIndexCompatibilityVersion.equals(version.minimumIndexCompatibilityVersion); - } - - @Override - public int hashCode() { - return Objects.hash( - number, - buildFlavor, - buildType, - buildHash, - buildDate, - isSnapshot, - luceneVersion, - minimumWireCompatibilityVersion, - minimumIndexCompatibilityVersion - ); - } - } -} diff --git a/client/rest-high-level/testnode.crt b/client/rest-high-level/testnode.crt deleted file mode 100644 index 08c160bcea5ff..0000000000000 --- a/client/rest-high-level/testnode.crt +++ /dev/null @@ -1,23 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID0zCCArugAwIBAgIJALi5bDfjMszLMA0GCSqGSIb3DQEBCwUAMEgxDDAKBgNV -BAoTA29yZzEWMBQGA1UECxMNZWxhc3RpY3NlYXJjaDEgMB4GA1UEAxMXRWxhc3Rp -Y3NlYXJjaCBUZXN0IE5vZGUwHhcNMTUwOTIzMTg1MjU3WhcNMTkwOTIyMTg1MjU3 -WjBIMQwwCgYDVQQKEwNvcmcxFjAUBgNVBAsTDWVsYXN0aWNzZWFyY2gxIDAeBgNV -BAMTF0VsYXN0aWNzZWFyY2ggVGVzdCBOb2RlMIIBIjANBgkqhkiG9w0BAQEFAAOC -AQ8AMIIBCgKCAQEA3rGZ1QbsW0+MuyrSLmMfDFKtLBkIFW8V0gRuurFg1PUKKNR1 -Mq2tMVwjjYETAU/UY0iKZOzjgvYPKhDTYBTte/WHR1ZK4CYVv7TQX/gtFQG/ge/c -7u0sLch9p7fbd+/HZiLS/rBEZDIohvgUvzvnA8+OIYnw4kuxKo/5iboAIS41klMg -/lATm8V71LMY68inht71/ZkQoAHKgcR9z4yNYvQ1WqKG8DG8KROXltll3sTrKbl5 -zJhn660es/1ZnR6nvwt6xnSTl/mNHMjkfv1bs4rJ/py3qPxicdoSIn/KyojUcgHV -F38fuAy2CQTdjVG5fWj9iz+mQvLm3+qsIYQdFwIDAQABo4G/MIG8MAkGA1UdEwQC -MAAwHQYDVR0OBBYEFEMMWLWQi/g83PzlHYqAVnty5L7HMIGPBgNVHREEgYcwgYSC -CWxvY2FsaG9zdIIVbG9jYWxob3N0LmxvY2FsZG9tYWluggpsb2NhbGhvc3Q0ghds -b2NhbGhvc3Q0LmxvY2FsZG9tYWluNIIKbG9jYWxob3N0NoIXbG9jYWxob3N0Ni5s -b2NhbGRvbWFpbjaHBH8AAAGHEAAAAAAAAAAAAAAAAAAAAAEwDQYJKoZIhvcNAQEL -BQADggEBAMjGGXT8Nt1tbl2GkiKtmiuGE2Ej66YuZ37WSJViaRNDVHLlg87TCcHe -k2rdO+6sFqQbbzEfwQ05T7xGmVu7tm54HwKMRugoQ3wct0bQC5wEWYN+oMDvSyO6 -M28mZwWb4VtR2IRyWP+ve5DHwTM9mxWa6rBlGzsQqH6YkJpZojzqk/mQTug+Y8aE -mVoqRIPMHq9ob+S9qd5lp09+MtYpwPfTPx/NN+xMEooXWW/ARfpGhWPkg/FuCu4z -1tFmCqHgNcWirzMm3dQpF78muE9ng6OB2MXQwL4VgnVkxmlZNHbkR2v/t8MyZJxC -y4g6cTMM3S/UMt5/+aIB2JAuMKyuD+A= ------END CERTIFICATE----- diff --git a/client/rest-high-level/testnode.jks b/client/rest-high-level/testnode.jks deleted file mode 100644 index ebe6146124e8f..0000000000000 Binary files a/client/rest-high-level/testnode.jks and /dev/null differ diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index 7154a2be5bbd8..ed087bef0ac76 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -87,6 +87,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static java.util.Collections.singletonList; +import static org.elasticsearch.client.RestClient.IGNORE_RESPONSE_CODES_PARAM; /** * Client that connects to an Elasticsearch cluster through HTTP. @@ -106,6 +107,9 @@ * Requests can be traced by enabling trace logging for "tracer". The trace logger outputs requests and responses in curl format. */ public class RestClient implements Closeable { + + public static final String IGNORE_RESPONSE_CODES_PARAM = "ignore"; + private static final Log logger = LogFactory.getLog(RestClient.class); private final CloseableHttpAsyncClient client; @@ -780,8 +784,8 @@ private class InternalRequest { this.request = request; Map params = new HashMap<>(request.getParameters()); params.putAll(request.getOptions().getParameters()); - // ignore is a special parameter supported by the clients, shouldn't be sent to es - String ignoreString = params.remove("ignore"); + // IGNORE_RESPONSE_CODES_PARAM is a special parameter supported by the clients, shouldn't be sent to es + String ignoreString = params.remove(IGNORE_RESPONSE_CODES_PARAM); this.ignoreErrorCodes = getIgnoreErrorCodes(ignoreString, request.getMethod()); URI uri = buildUri(pathPrefix, request.getEndpoint(), params); this.httpRequest = createHttpRequest(request.getMethod(), uri, request.getEntity(), compressionEnabled); diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java index a1c4d3fab076a..10d24242ae620 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java @@ -275,6 +275,7 @@ public void testErrorStatusCodes() throws Exception { try { Request request = new Request(method, "/" + errorStatusCode); if (false == ignoreParam.isEmpty()) { + // literal "ignore" rather than IGNORE_RESPONSE_CODES_PARAM since this is something on which callers might rely request.addParameter("ignore", ignoreParam); } Response response = restClient.performRequest(request); @@ -568,6 +569,7 @@ private HttpUriRequest performRandomRequest(String method) throws Exception { if (randomBoolean()) { ignore += "," + randomFrom(RestClientTestUtil.getAllErrorStatusCodes()); } + // literal "ignore" rather than IGNORE_RESPONSE_CODES_PARAM since this is something on which callers might rely request.addParameter("ignore", ignore); } URI uri = uriBuilder.build(); diff --git a/docs/build.gradle b/docs/build.gradle index 33e6cc6080a95..da3d83378e894 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -72,6 +72,9 @@ testClusters.matching { it.name == "yamlRestTest"}.configureEach { setting 'xpack.license.self_generated.type', 'trial' setting 'indices.lifecycle.history_index_enabled', 'false' keystorePassword 'keystore-password' + if (BuildParams.isSnapshotBuild() == false) { + requiresFeature 'es.failure_store_feature_flag_enabled', new Version(8, 12, 0) + } } // debug ccr test failures: diff --git a/docs/changelog/100921.yaml b/docs/changelog/100921.yaml new file mode 100644 index 0000000000000..e6e2caa93d465 --- /dev/null +++ b/docs/changelog/100921.yaml @@ -0,0 +1,5 @@ +pr: 100921 +summary: "Add support for Serbian Language Analyzer" +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/101409.yaml b/docs/changelog/101409.yaml new file mode 100644 index 0000000000000..82e7f339fdd89 --- /dev/null +++ b/docs/changelog/101409.yaml @@ -0,0 +1,5 @@ +pr: 101409 +summary: Adding a simulate ingest api +area: Ingest Node +type: feature +issues: [] diff --git a/docs/changelog/101845.yaml b/docs/changelog/101845.yaml new file mode 100644 index 0000000000000..0dd95bdabca57 --- /dev/null +++ b/docs/changelog/101845.yaml @@ -0,0 +1,5 @@ +pr: 101845 +summary: Introduce new endpoint to expose data stream lifecycle stats +area: Data streams +type: enhancement +issues: [] diff --git a/docs/changelog/102193.yaml b/docs/changelog/102193.yaml new file mode 100644 index 0000000000000..4d64493602ff2 --- /dev/null +++ b/docs/changelog/102193.yaml @@ -0,0 +1,5 @@ +pr: 102193 +summary: Fix cache invalidation on privilege modification +area: Authorization +type: bug +issues: [] diff --git a/docs/changelog/102220.yaml b/docs/changelog/102220.yaml new file mode 100644 index 0000000000000..d24dab1f91b31 --- /dev/null +++ b/docs/changelog/102220.yaml @@ -0,0 +1,5 @@ +pr: 102220 +summary: Upgrade xmlsec to 2.3.4 +area: Security +type: enhancement +issues: [] diff --git a/docs/changelog/102230.yaml b/docs/changelog/102230.yaml new file mode 100644 index 0000000000000..20e8d8d1f10a6 --- /dev/null +++ b/docs/changelog/102230.yaml @@ -0,0 +1,6 @@ +pr: 102230 +summary: Set region for the STS client via privileged calls in AWS SDK +area: Snapshot/Restore +type: bug +issues: + - 102173 diff --git a/docs/changelog/102240.yaml b/docs/changelog/102240.yaml new file mode 100644 index 0000000000000..5df0046ee92fc --- /dev/null +++ b/docs/changelog/102240.yaml @@ -0,0 +1,5 @@ +pr: 102240 +summary: Exclude stack traces from transform audit messages and health +area: Transform +type: bug +issues: [] diff --git a/docs/changelog/102244.yaml b/docs/changelog/102244.yaml new file mode 100644 index 0000000000000..3b160e033b57e --- /dev/null +++ b/docs/changelog/102244.yaml @@ -0,0 +1,5 @@ +pr: 102244 +summary: Expose reconciliation metrics via APM +area: Allocation +type: enhancement +issues: [] diff --git a/docs/changelog/102245.yaml b/docs/changelog/102245.yaml new file mode 100644 index 0000000000000..387540d96290c --- /dev/null +++ b/docs/changelog/102245.yaml @@ -0,0 +1,5 @@ +pr: 102245 +summary: Add non-green indicator names to `HealthPeriodicLogger` message +area: Health +type: enhancement +issues: [] diff --git a/docs/changelog/102250.yaml b/docs/changelog/102250.yaml new file mode 100644 index 0000000000000..755341d9a3a64 --- /dev/null +++ b/docs/changelog/102250.yaml @@ -0,0 +1,6 @@ +pr: 102250 +summary: "[ILM] Fix downsample to skip already downsampled indices" +area: ILM+SLM +type: bug +issues: + - 102249 diff --git a/docs/changelog/102259.yaml b/docs/changelog/102259.yaml new file mode 100644 index 0000000000000..3d8a1c6381f6d --- /dev/null +++ b/docs/changelog/102259.yaml @@ -0,0 +1,5 @@ +pr: 102259 +summary: "[Usage API] Count all the data streams that have lifecycle" +area: Data streams +type: bug +issues: [] diff --git a/docs/changelog/102273.yaml b/docs/changelog/102273.yaml new file mode 100644 index 0000000000000..78ecc8b2d2734 --- /dev/null +++ b/docs/changelog/102273.yaml @@ -0,0 +1,5 @@ +pr: 102273 +summary: Improve analyzer reload log message +area: Mapping +type: enhancement +issues: [] diff --git a/docs/changelog/102281.yaml b/docs/changelog/102281.yaml new file mode 100644 index 0000000000000..ac6c17591e013 --- /dev/null +++ b/docs/changelog/102281.yaml @@ -0,0 +1,5 @@ +pr: 102281 +summary: Improve failure handling in `ContinuousComputation` +area: Allocation +type: bug +issues: [] diff --git a/docs/changelog/102292.yaml b/docs/changelog/102292.yaml new file mode 100644 index 0000000000000..953c3ffdf6150 --- /dev/null +++ b/docs/changelog/102292.yaml @@ -0,0 +1,5 @@ +pr: 102292 +summary: Consider duplicate stacktraces in custom index +area: Application +type: enhancement +issues: [] diff --git a/docs/changelog/102317.yaml b/docs/changelog/102317.yaml new file mode 100644 index 0000000000000..89b2ae5432101 --- /dev/null +++ b/docs/changelog/102317.yaml @@ -0,0 +1,6 @@ +pr: 102317 +summary: "ESQL: Fix single value query" +area: ES|QL +type: bug +issues: + - 102298 diff --git a/docs/changelog/99134.yaml b/docs/changelog/99134.yaml new file mode 100644 index 0000000000000..10156b9b30066 --- /dev/null +++ b/docs/changelog/99134.yaml @@ -0,0 +1,5 @@ +pr: 99134 +summary: Add ability to create a data stream failure store +area: Data streams +type: feature +issues: [] diff --git a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc index 45cb725492f07..5273537389e3d 100644 --- a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc @@ -36,6 +36,7 @@ following types are supported: <>, <>, <>, +<>, <>, <>, <>, @@ -64,8 +65,8 @@ The following analyzers support setting custom `stem_exclusion` list: `arabic`, `armenian`, `basque`, `bengali`, `bulgarian`, `catalan`, `czech`, `dutch`, `english`, `finnish`, `french`, `galician`, `german`, `hindi`, `hungarian`, `indonesian`, `irish`, `italian`, `latvian`, -`lithuanian`, `norwegian`, `portuguese`, `romanian`, `russian`, `sorani`, -`spanish`, `swedish`, `turkish`. +`lithuanian`, `norwegian`, `portuguese`, `romanian`, `russian`, `serbian`, +`sorani`, `spanish`, `swedish`, `turkish`. ==== Reimplementing language analyzers @@ -1588,6 +1589,55 @@ PUT /russian_example <2> This filter should be removed unless there are words which should be excluded from stemming. +[[serbian-analyzer]] +===== `serbian` analyzer + +The `serbian` analyzer could be reimplemented as a `custom` analyzer as follows: + +[source,console] +---------------------------------------------------- +PUT /serbian_example +{ + "settings": { + "analysis": { + "filter": { + "serbian_stop": { + "type": "stop", + "stopwords": "_serbian_" <1> + }, + "serbian_keywords": { + "type": "keyword_marker", + "keywords": ["пример"] <2> + }, + "serbian_stemmer": { + "type": "stemmer", + "language": "serbian" + } + }, + "analyzer": { + "rebuilt_serbian": { + "tokenizer": "standard", + "filter": [ + "lowercase", + "serbian_stop", + "serbian_keywords", + "serbian_stemmer", + "serbian_normalization" + ] + } + } + } + } +} +---------------------------------------------------- +// TEST[s/"serbian_keywords",//] +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: serbian_example, first: serbian, second: rebuilt_serbian}\nendyaml\n/] + +<1> The default stopwords can be overridden with the `stopwords` +or `stopwords_path` parameters. +<2> This filter should be removed unless there are words which should +be excluded from stemming. + [[sorani-analyzer]] ===== `sorani` analyzer diff --git a/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc index a76bc6f6c5254..57e402988cc5a 100644 --- a/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc @@ -9,7 +9,7 @@ A filter that stems words using a Snowball-generated stemmer. The values: `Arabic`, `Armenian`, `Basque`, `Catalan`, `Danish`, `Dutch`, `English`, `Estonian`, `Finnish`, `French`, `German`, `German2`, `Hungarian`, `Italian`, `Irish`, `Kp`, `Lithuanian`, `Lovins`, `Norwegian`, `Porter`, `Portuguese`, `Romanian`, -`Russian`, `Spanish`, `Swedish`, `Turkish`. +`Russian`, `Serbian`, `Spanish`, `Swedish`, `Turkish`. For example: diff --git a/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc index 162164e12872d..b8d883b057823 100644 --- a/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc @@ -230,6 +230,9 @@ Russian:: https://snowballstem.org/algorithms/russian/stemmer.html[*`russian`*], https://doc.rero.ch/lm.php?url=1000%2C43%2C4%2C20091209094227-CA%2FDolamic_Ljiljana_-_Indexing_and_Searching_Strategies_for_the_Russian_20091209.pdf[`light_russian`] +Serbian:: +https://snowballstem.org/algorithms/serbian/stemmer.html[*`serbian`*] + Spanish:: https://www.ercim.eu/publication/ws-proceedings/CLEF2/savoy.pdf[*`light_spanish`*], https://snowballstem.org/algorithms/spanish/stemmer.html[`spanish`] diff --git a/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc index 12e0d76f9901b..abba633b643dc 100644 --- a/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc @@ -356,6 +356,10 @@ parameter and a link to their predefined stop words in Lucene. `_russian_`:: {lucene-stop-word-link}/snowball/russian_stop.txt[Russian stop words] +[[serbian-stop-words]] +`_serbian_`:: +{lucene-stop-word-link}/sr/stopwords.txt[Serbian stop words] + [[sorani-stop-words]] `_sorani_`:: {lucene-stop-word-link}/ckb/stopwords.txt[Sorani stop words] diff --git a/docs/reference/data-streams/change-mappings-and-settings.asciidoc b/docs/reference/data-streams/change-mappings-and-settings.asciidoc index 3922ef018a713..86d72cf52c9e9 100644 --- a/docs/reference/data-streams/change-mappings-and-settings.asciidoc +++ b/docs/reference/data-streams/change-mappings-and-settings.asciidoc @@ -601,7 +601,7 @@ stream's oldest backing index. // TESTRESPONSE[s/"index_uuid": "_eEfRrFHS9OyhqWntkgHAQ"/"index_uuid": $body.data_streams.0.indices.1.index_uuid/] // TESTRESPONSE[s/"index_name": ".ds-my-data-stream-2099.03.07-000001"/"index_name": $body.data_streams.0.indices.0.index_name/] // TESTRESPONSE[s/"index_name": ".ds-my-data-stream-2099.03.08-000002"/"index_name": $body.data_streams.0.indices.1.index_name/] -// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW"/] +// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_indices":[],"failure_store":false/] <1> First item in the `indices` array for `my-data-stream`. This item contains information about the stream's oldest backing index, @@ -704,4 +704,4 @@ Use the <> to update an existing data stream's aliases. Changing an existing data stream's aliases in its index pattern has no effect. -include::../alias.asciidoc[tag=alias-multiple-actions-example] \ No newline at end of file +include::../alias.asciidoc[tag=alias-multiple-actions-example] diff --git a/docs/reference/data-streams/data-stream-apis.asciidoc b/docs/reference/data-streams/data-stream-apis.asciidoc index d3580ca4448a7..3c2e703d264ff 100644 --- a/docs/reference/data-streams/data-stream-apis.asciidoc +++ b/docs/reference/data-streams/data-stream-apis.asciidoc @@ -25,6 +25,8 @@ preview:[] preview:[] * <> preview:[] +* <> +preview:[] The following API is available for <>: @@ -55,4 +57,6 @@ include::{es-repo-dir}/data-streams/lifecycle/apis/delete-lifecycle.asciidoc[] include::{es-repo-dir}/data-streams/lifecycle/apis/explain-lifecycle.asciidoc[] +include::{es-repo-dir}/data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc[] + include::{es-repo-dir}/indices/downsample-data-stream.asciidoc[] diff --git a/docs/reference/data-streams/downsampling-manual.asciidoc b/docs/reference/data-streams/downsampling-manual.asciidoc index b7d46b6301884..5bdfaf428d169 100644 --- a/docs/reference/data-streams/downsampling-manual.asciidoc +++ b/docs/reference/data-streams/downsampling-manual.asciidoc @@ -17,7 +17,7 @@ DELETE _ingest/pipeline/my-timestamp-pipeline The recommended way to downsample a time series data stream (TSDS) is <>. However, if you're not using ILM, you can downsample a TSDS manually. This guide shows you -how, using typical Kubernetes cluster monitoring data. +how, using typical Kubernetes cluster monitoring data. To test out manual downsampling, follow these steps: @@ -32,13 +32,13 @@ To test out manual downsampling, follow these steps: ==== Prerequisites * Refer to the <>. -* It is not possible to downsample a data stream directly, nor +* It is not possible to downsample a data stream directly, nor multiple indices at once. It's only possible to downsample one time series index (TSDS backing index). * In order to downsample an index, it needs to be read-only. For a TSDS write index, this means it needs to be rolled over and made read-only first. * Downsampling uses UTC timestamps. -* Downsampling needs at least one metric field to exist in the time series +* Downsampling needs at least one metric field to exist in the time series index. [discrete] @@ -51,8 +51,8 @@ First, you'll create a TSDS. For simplicity, in the time series mapping all be used. The `time_series_metric` values determine the kind of statistical representations that are used during downsampling. -The index template includes a set of static -<>: `host`, `namespace`, +The index template includes a set of static +<>: `host`, `namespace`, `node`, and `pod`. The time series dimensions are not changed by the downsampling process. @@ -388,6 +388,7 @@ This returns: // TESTRESPONSE[s/"ltOJGmqgTVm4T-Buoe7Acg"/$body.data_streams.0.indices.0.index_uuid/] // TESTRESPONSE[s/"2023-07-26T09:26:42.000Z"/$body.data_streams.0.time_series.temporal_ranges.0.start/] // TESTRESPONSE[s/"2023-07-26T13:26:42.000Z"/$body.data_streams.0.time_series.temporal_ranges.0.end/] +// TESTRESPONSE[s/"replicated": false/"replicated": false,"failure_indices":[],"failure_store":false/] <1> The backing index for this data stream. Before a backing index can be downsampled, the TSDS needs to be rolled over and diff --git a/docs/reference/data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc b/docs/reference/data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc new file mode 100644 index 0000000000000..6fa82dc2a810c --- /dev/null +++ b/docs/reference/data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc @@ -0,0 +1,93 @@ +[[data-streams-get-lifecycle-stats]] +=== Get data stream lifecycle stats +++++ +Get Data Stream Lifecycle +++++ + +preview::[] + +Gets stats about the execution of data stream lifecycle. + +[[get-lifecycle-stats-api-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have the `monitor` or +`manage` <> to use this API. + +[[data-streams-get-lifecycle-stats-request]] +==== {api-request-title} + +`GET _lifecycle/stats` + +[[data-streams-get-lifecycle-stats-desc]] +==== {api-description-title} + +Gets stats about the execution of the data stream lifecycle. The data stream level stats include only stats about data streams +managed by the data stream lifecycle. + +[[get-lifecycle-stats-api-response-body]] +==== {api-response-body-title} + +`last_run_duration_in_millis`:: +(Optional, long) +The duration of the last data stream lifecycle execution. +`time_between_starts_in_millis`:: +(Optional, long) +The time passed between the start of the last two data stream lifecycle executions. This should amount approximately to +<>. +`data_stream_count`:: +(integer) +The count of data streams currently being managed by the data stream lifecycle. +`data_streams`:: +(array of objects) +Contains information about the retrieved data stream lifecycles. ++ +.Properties of objects in `data_streams` +[%collapsible%open] +==== +`name`:: +(string) +The name of the data stream. +`backing_indices_in_total`:: +(integer) +The count of the backing indices of this data stream that are managed by the data stream lifecycle. +`backing_indices_in_error`:: +(integer) +The count of the backing indices of this data stream that are managed by the data stream lifecycle and have encountered an error. +==== + +[[data-streams-get-lifecycle-stats-example]] +==== {api-examples-title} + +Let's retrieve the data stream lifecycle stats of a cluster that has already executed the lifecycle more than once: + +[source,console] +-------------------------------------------------- +GET _lifecycle/stats?human&pretty +-------------------------------------------------- +// TEST[skip:this is for demonstration purposes only, we cannot ensure that DSL has run] + +The response will look like the following: + +[source,console-result] +-------------------------------------------------- +{ + "last_run_duration_in_millis": 2, + "last_run_duration": "2ms", + "time_between_starts_in_millis": 9998, + "time_between_starts": "9.99s", + "data_streams_count": 2, + "data_streams": [ + { + "name": "my-data-stream", + "backing_indices_in_total": 2, + "backing_indices_in_error": 0 + }, + { + "name": "my-other-stream", + "backing_indices_in_total": 2, + "backing_indices_in_error": 1 + } + ] +} +-------------------------------------------------- \ No newline at end of file diff --git a/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc b/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc index a6c13e5aae708..d5b80315375d3 100644 --- a/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc +++ b/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc @@ -1,16 +1,16 @@ [role="xpack"] [[tutorial-migrate-data-stream-from-ilm-to-dsl]] -=== Tutorial: Migrate ILM managed data stream to Data stream lifecycle +=== Tutorial: Migrate ILM managed data stream to Data stream lifecycle preview::[] -In this tutorial we'll look at migrating an existing data stream from {ilm-init} to -Data stream lifecycle. The existing {ilm-init} managed backing indices will continue +In this tutorial we'll look at migrating an existing data stream from {ilm-init} to +Data stream lifecycle. The existing {ilm-init} managed backing indices will continue to be managed by {ilm-init} until they age out and get deleted by {ilm-init}; however, -the new backing indices will be managed by Data stream lifecycle. -This way, a data stream is gradually migrated away from being managed by {ilm-cap} to +the new backing indices will be managed by Data stream lifecycle. +This way, a data stream is gradually migrated away from being managed by {ilm-cap} to being managed by Data stream lifecycle. As we'll see, {ilm-cap} and Data stream lifecycle -can co-manage a data stream; however, an index can only be managed by one system at +can co-manage a data stream; however, an index can only be managed by one system at a time. Let's first create a data stream with two backing indices managed by {ilm-cap}. @@ -77,7 +77,7 @@ POST dsl-data-stream/_rollover ---- // TEST[continued] -We'll use the <> API to inspect the state of +We'll use the <> API to inspect the state of the data stream: [source,console] @@ -87,7 +87,7 @@ GET _data_stream/dsl-data-stream // TEST[continued] Inspecting the response we'll see that both backing indices are managed by {ilm-init} -and that the next generation index will also be managed by {ilm-init}: +and that the next generation index will also be managed by {ilm-init}: [source,console-result] ---- @@ -100,7 +100,7 @@ and that the next generation index will also be managed by {ilm-init}: }, "indices": [ { - "index_name": ".ds-dsl-data-stream-2023.10.19-000001", <1> + "index_name": ".ds-dsl-data-stream-2023.10.19-000001", <1> "index_uuid": "xCEhwsp8Tey0-FLNFYVwSg", "prefer_ilm": true, <2> "ilm_policy": "pre-dsl-ilm-policy", <3> @@ -132,37 +132,37 @@ and that the next generation index will also be managed by {ilm-init}: // TESTRESPONSE[s/"index_uuid": "xCEhwsp8Tey0-FLNFYVwSg"/"index_uuid": $body.data_streams.0.indices.0.index_uuid/] // TESTRESPONSE[s/"index_name": ".ds-dsl-data-stream-2023.10.19-000002"/"index_name": $body.data_streams.0.indices.1.index_name/] // TESTRESPONSE[s/"index_uuid": "PA_JquKGSiKcAKBA8DJ5gw"/"index_uuid": $body.data_streams.0.indices.1.index_uuid/] -// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW"/] +// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_indices":[],"failure_store":false/] <1> The name of the backing index. -<2> For each backing index we display the value of the <> +<2> For each backing index we display the value of the <> configuration which will indicate if {ilm-init} takes precedence over data stream lifecycle in case both systems are configured for an index. <3> The {ilm-init} policy configured for this index. -<4> The system that manages this index (possible values are "Index Lifecycle Management", +<4> The system that manages this index (possible values are "Index Lifecycle Management", "Data stream lifecycle", or "Unmanaged") -<5> The system that will manage the next generation index (the new write index of this -data stream, once the data stream is rolled over). The possible values are +<5> The system that will manage the next generation index (the new write index of this +data stream, once the data stream is rolled over). The possible values are "Index Lifecycle Management", "Data stream lifecycle", or "Unmanaged". <6> The <> value configured in the index template that's backing the data stream. This value will be configured for all the new backing indices. If it's not configured in the index template the backing indices will receive the `true` -default value ({ilm-init} takes precedence over data stream lifecycle by default as it's +default value ({ilm-init} takes precedence over data stream lifecycle by default as it's currently richer in features). -<7> The {ilm-init} policy configured in the index template that's backing this data -stream (which will be configured on all the new backing indices, as long as it exists +<7> The {ilm-init} policy configured in the index template that's backing this data +stream (which will be configured on all the new backing indices, as long as it exists in the index template). To migrate the `dsl-data-stream` to data stream lifecycle we'll have to execute two steps: -1. Update the index template that's backing the index template to configure <> -to `false`, and to configure data stream lifecycle. +1. Update the index template that's backing the index template to configure <> +to `false`, and to configure data stream lifecycle. 2. Configure the data stream lifecycle for the _existing_ `dsl-data-stream` using the <>. IMPORTANT: The data stream lifecycle configuration that's added to the index template, -being a data stream configuration, will only apply to **new** data streams. +being a data stream configuration, will only apply to **new** data streams. Our data stream exists already, so even though we added a data stream lifecycle configuration in the index template it will not be applied to `dsl-data-stream`. @@ -191,9 +191,9 @@ PUT _index_template/dsl-data-stream-template // TEST[continued] <1> The `prefer_ilm` setting will now be configured on the **new** backing indices -(created by rolling over the data stream) such that {ilm-init} does _not_ take +(created by rolling over the data stream) such that {ilm-init} does _not_ take precedence over Data stream lifecycle. -<2> We're configuring the data stream lifecycle so _new_ data streams will be +<2> We're configuring the data stream lifecycle so _new_ data streams will be managed by Data stream lifecycle. We've now made sure that new data streams will be managed by Data stream lifecycle. @@ -209,7 +209,7 @@ PUT _data_stream/dsl-data-stream/_lifecycle ---- // TEST[continued] -We can inspect the data stream to check that the next generation will indeed be +We can inspect the data stream to check that the next generation will indeed be managed by Data stream lifecycle: [source,console] @@ -229,10 +229,10 @@ GET _data_stream/dsl-data-stream }, "indices": [ { - "index_name": ".ds-dsl-data-stream-2023.10.19-000001", + "index_name": ".ds-dsl-data-stream-2023.10.19-000001", "index_uuid": "xCEhwsp8Tey0-FLNFYVwSg", - "prefer_ilm": true, - "ilm_policy": "pre-dsl-ilm-policy", + "prefer_ilm": true, + "ilm_policy": "pre-dsl-ilm-policy", "managed_by": "Index Lifecycle Management" <1> }, { @@ -250,7 +250,7 @@ GET _data_stream/dsl-data-stream "enabled": true, "data_retention": "7d" }, - "ilm_policy": "pre-dsl-ilm-policy", + "ilm_policy": "pre-dsl-ilm-policy", "next_generation_managed_by": "Data stream lifecycle", <3> "prefer_ilm": false, <4> "hidden": false, @@ -265,7 +265,7 @@ GET _data_stream/dsl-data-stream // TESTRESPONSE[s/"index_uuid": "xCEhwsp8Tey0-FLNFYVwSg"/"index_uuid": $body.data_streams.0.indices.0.index_uuid/] // TESTRESPONSE[s/"index_name": ".ds-dsl-data-stream-2023.10.19-000002"/"index_name": $body.data_streams.0.indices.1.index_name/] // TESTRESPONSE[s/"index_uuid": "PA_JquKGSiKcAKBA8DJ5gw"/"index_uuid": $body.data_streams.0.indices.1.index_uuid/] -// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW"/] +// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_indices":[],"failure_store":false/] <1> The existing backing index will continue to be managed by {ilm-init} <2> The existing backing index will continue to be managed by {ilm-init} @@ -273,7 +273,7 @@ GET _data_stream/dsl-data-stream <4> The `prefer_ilm` setting value we configured in the index template is reflected and will be configured accordingly for new backing indices. -We'll now rollover the data stream to see the new generation index being managed by +We'll now rollover the data stream to see the new generation index being managed by Data stream lifecycle: [source,console] @@ -299,11 +299,11 @@ GET _data_stream/dsl-data-stream }, "indices": [ { - "index_name": ".ds-dsl-data-stream-2023.10.19-000001", + "index_name": ".ds-dsl-data-stream-2023.10.19-000001", "index_uuid": "xCEhwsp8Tey0-FLNFYVwSg", - "prefer_ilm": true, - "ilm_policy": "pre-dsl-ilm-policy", - "managed_by": "Index Lifecycle Management" <1> + "prefer_ilm": true, + "ilm_policy": "pre-dsl-ilm-policy", + "managed_by": "Index Lifecycle Management" <1> }, { "index_name": ".ds-dsl-data-stream-2023.10.19-000002", @@ -327,9 +327,9 @@ GET _data_stream/dsl-data-stream "enabled": true, "data_retention": "7d" }, - "ilm_policy": "pre-dsl-ilm-policy", - "next_generation_managed_by": "Data stream lifecycle", - "prefer_ilm": false, + "ilm_policy": "pre-dsl-ilm-policy", + "next_generation_managed_by": "Data stream lifecycle", + "prefer_ilm": false, "hidden": false, "system": false, "allow_custom_routing": false, @@ -344,7 +344,7 @@ GET _data_stream/dsl-data-stream // TESTRESPONSE[s/"index_uuid": "PA_JquKGSiKcAKBA8DJ5gw"/"index_uuid": $body.data_streams.0.indices.1.index_uuid/] // TESTRESPONSE[s/"index_name": ".ds-dsl-data-stream-2023.10.19-000003"/"index_name": $body.data_streams.0.indices.2.index_name/] // TESTRESPONSE[s/"index_uuid": "PA_JquKGSiKcAKBA8abcd1"/"index_uuid": $body.data_streams.0.indices.2.index_uuid/] -// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW"/] +// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_indices":[],"failure_store":false/] <1> The backing indices that existed before rollover will continue to be managed by {ilm-init} <2> The backing indices that existed before rollover will continue to be managed by {ilm-init} @@ -352,27 +352,27 @@ GET _data_stream/dsl-data-stream in the index template <4> The new write index is managed by `Data stream lifecycle` -We can easily change this data stream to be managed by {ilm-cap} because we didn't remove -the {ilm-cap} policy when we <>. +We can easily change this data stream to be managed by {ilm-cap} because we didn't remove +the {ilm-cap} policy when we <>. We can achieve this in two ways: 1. <> from the data streams 2. Disable Data stream lifecycle by configuring the `enabled` flag to `false`. -Let's implement option 2 and disable the data stream lifecycle: +Let's implement option 2 and disable the data stream lifecycle: [source,console] ---- PUT _data_stream/dsl-data-stream/_lifecycle { "data_retention": "7d", - "enabled": false <1> + "enabled": false <1> } ---- // TEST[continued] -<1> The `enabled` flag can be ommitted and defaults to `true` however, here we +<1> The `enabled` flag can be ommitted and defaults to `true` however, here we explicitly configure it to `false` Let's check the state of the data stream: @@ -393,23 +393,23 @@ GET _data_stream/dsl-data-stream }, "indices": [ { - "index_name": ".ds-dsl-data-stream-2023.10.19-000001", + "index_name": ".ds-dsl-data-stream-2023.10.19-000001", "index_uuid": "xCEhwsp8Tey0-FLNFYVwSg", - "prefer_ilm": true, - "ilm_policy": "pre-dsl-ilm-policy", - "managed_by": "Index Lifecycle Management" + "prefer_ilm": true, + "ilm_policy": "pre-dsl-ilm-policy", + "managed_by": "Index Lifecycle Management" }, { "index_name": ".ds-dsl-data-stream-2023.10.19-000002", "index_uuid": "PA_JquKGSiKcAKBA8DJ5gw", "prefer_ilm": true, "ilm_policy": "pre-dsl-ilm-policy", - "managed_by": "Index Lifecycle Management" + "managed_by": "Index Lifecycle Management" }, { "index_name": ".ds-dsl-data-stream-2023.10.19-000003", "index_uuid": "PA_JquKGSiKcAKBA8abcd1", - "prefer_ilm": false, + "prefer_ilm": false, "ilm_policy": "pre-dsl-ilm-policy", "managed_by": "Index Lifecycle Management" <1> } @@ -421,9 +421,9 @@ GET _data_stream/dsl-data-stream "enabled": false, <2> "data_retention": "7d" }, - "ilm_policy": "pre-dsl-ilm-policy", - "next_generation_managed_by": "Index Lifecycle Management", <3> - "prefer_ilm": false, + "ilm_policy": "pre-dsl-ilm-policy", + "next_generation_managed_by": "Index Lifecycle Management", <3> + "prefer_ilm": false, "hidden": false, "system": false, "allow_custom_routing": false, @@ -438,13 +438,13 @@ GET _data_stream/dsl-data-stream // TESTRESPONSE[s/"index_uuid": "PA_JquKGSiKcAKBA8DJ5gw"/"index_uuid": $body.data_streams.0.indices.1.index_uuid/] // TESTRESPONSE[s/"index_name": ".ds-dsl-data-stream-2023.10.19-000003"/"index_name": $body.data_streams.0.indices.2.index_name/] // TESTRESPONSE[s/"index_uuid": "PA_JquKGSiKcAKBA8abcd1"/"index_uuid": $body.data_streams.0.indices.2.index_uuid/] -// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW"/] +// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_indices":[],"failure_store":false/] <1> The write index is now managed by {ilm-cap} -<2> The `lifecycle` configured on the data stream is now disabled. +<2> The `lifecycle` configured on the data stream is now disabled. <3> The next write index will be managed by {ilm-cap} Had we removed the {ilm-cap} policy from the index template when we <> -it, the write index of the data stream will now be `Unmanaged` because the index +it, the write index of the data stream will now be `Unmanaged` because the index wouldn't have the {ilm-cap} policy configured to fallback onto. ////////////////////////// diff --git a/docs/reference/esql/functions/ltrim.asciidoc b/docs/reference/esql/functions/ltrim.asciidoc index 6e6d30a73b865..e5230e4edd41a 100644 --- a/docs/reference/esql/functions/ltrim.asciidoc +++ b/docs/reference/esql/functions/ltrim.asciidoc @@ -1,6 +1,9 @@ [discrete] [[esql-ltrim]] === `LTRIM` +[.text-center] +image::esql/functions/signature/ltrim.svg[Embedded,opts=inline] + Removes leading whitespaces from strings. [source.merge.styled,esql] @@ -11,3 +14,7 @@ include::{esql-specs}/string.csv-spec[tag=ltrim] |=== include::{esql-specs}/string.csv-spec[tag=ltrim-result] |=== + +Supported types: + +include::types/rtrim.asciidoc[] diff --git a/docs/reference/esql/functions/rtrim.asciidoc b/docs/reference/esql/functions/rtrim.asciidoc index 3224331e9ed6a..8eb0494e90d9e 100644 --- a/docs/reference/esql/functions/rtrim.asciidoc +++ b/docs/reference/esql/functions/rtrim.asciidoc @@ -1,6 +1,9 @@ [discrete] [[esql-rtrim]] === `RTRIM` +[.text-center] +image::esql/functions/signature/rtrim.svg[Embedded,opts=inline] + Removes trailing whitespaces from strings. [source.merge.styled,esql] @@ -11,3 +14,7 @@ include::{esql-specs}/string.csv-spec[tag=rtrim] |=== include::{esql-specs}/string.csv-spec[tag=rtrim-result] |=== + +Supported types: + +include::types/rtrim.asciidoc[] diff --git a/docs/reference/esql/functions/signature/case.svg b/docs/reference/esql/functions/signature/case.svg deleted file mode 100644 index 09e8f7efa2835..0000000000000 --- a/docs/reference/esql/functions/signature/case.svg +++ /dev/null @@ -1 +0,0 @@ -CASE(arg1,arg2) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/ltrim.svg b/docs/reference/esql/functions/signature/ltrim.svg index ad7a4da0248e6..327e75b92ca19 100644 --- a/docs/reference/esql/functions/signature/ltrim.svg +++ b/docs/reference/esql/functions/signature/ltrim.svg @@ -1 +1 @@ -LTRIM(arg1) \ No newline at end of file +LTRIM(str) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/rtrim.svg b/docs/reference/esql/functions/signature/rtrim.svg index 3d95ddf5ef6ef..b830bb59c5c31 100644 --- a/docs/reference/esql/functions/signature/rtrim.svg +++ b/docs/reference/esql/functions/signature/rtrim.svg @@ -1 +1 @@ -RTRIM(arg1) \ No newline at end of file +RTRIM(str) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/trim.svg b/docs/reference/esql/functions/signature/trim.svg index 6f1273142fa51..5fc865d306f11 100644 --- a/docs/reference/esql/functions/signature/trim.svg +++ b/docs/reference/esql/functions/signature/trim.svg @@ -1 +1 @@ -TRIM(arg1) \ No newline at end of file +TRIM(str) \ No newline at end of file diff --git a/docs/reference/esql/functions/types/ltrim.asciidoc b/docs/reference/esql/functions/types/ltrim.asciidoc index 11c02c8f0c3bb..26f4e7633d8ae 100644 --- a/docs/reference/esql/functions/types/ltrim.asciidoc +++ b/docs/reference/esql/functions/types/ltrim.asciidoc @@ -1,6 +1,6 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== -arg1 | result +str | result keyword | keyword text | text |=== diff --git a/docs/reference/esql/functions/types/rtrim.asciidoc b/docs/reference/esql/functions/types/rtrim.asciidoc index 11c02c8f0c3bb..26f4e7633d8ae 100644 --- a/docs/reference/esql/functions/types/rtrim.asciidoc +++ b/docs/reference/esql/functions/types/rtrim.asciidoc @@ -1,6 +1,6 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== -arg1 | result +str | result keyword | keyword text | text |=== diff --git a/docs/reference/esql/functions/types/trim.asciidoc b/docs/reference/esql/functions/types/trim.asciidoc index 11c02c8f0c3bb..26f4e7633d8ae 100644 --- a/docs/reference/esql/functions/types/trim.asciidoc +++ b/docs/reference/esql/functions/types/trim.asciidoc @@ -1,6 +1,6 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== -arg1 | result +str | result keyword | keyword text | text |=== diff --git a/docs/reference/indices/get-data-stream.asciidoc b/docs/reference/indices/get-data-stream.asciidoc index 1faee74ae953c..7701aa9f64cfe 100644 --- a/docs/reference/indices/get-data-stream.asciidoc +++ b/docs/reference/indices/get-data-stream.asciidoc @@ -156,8 +156,8 @@ Universally unique identifier (UUID) for the index. `prefer_ilm`:: (boolean) -Functionality in preview:[]. Indicates if this index is configured to prefer {ilm} -when both {ilm-cap} and <> are configured to +Functionality in preview:[]. Indicates if this index is configured to prefer {ilm} +when both {ilm-cap} and <> are configured to manage this index. `managed_by`:: @@ -223,8 +223,8 @@ Functionality in preview:[]. Indicates the system that will managed the next gen `prefer_ilm`:: (boolean) -Functionality in preview:[]. Indicates if the index template used to create the data -stream's backing indices is configured to prefer {ilm-cap} when both {ilm-cap} and +Functionality in preview:[]. Indicates if the index template used to create the data +stream's backing indices is configured to prefer {ilm-cap} when both {ilm-cap} and <> are configured to manage this index. `hidden`:: @@ -351,3 +351,4 @@ The API returns the following response: // TESTRESPONSE[s/"index_name": ".ds-my-data-stream-two-2099.03.08-000001"/"index_name": $body.data_streams.1.indices.0.index_name/] // TESTRESPONSE[s/"index_uuid": "3liBu2SYS5axasRt6fUIpA"/"index_uuid": $body.data_streams.1.indices.0.index_uuid/] // TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW"/] +// TESTRESPONSE[s/"replicated": false/"replicated": false,"failure_indices":[],"failure_store":false/] diff --git a/docs/reference/ingest/apis/index.asciidoc b/docs/reference/ingest/apis/index.asciidoc index 772c35d542c2f..04fcd500a9721 100644 --- a/docs/reference/ingest/apis/index.asciidoc +++ b/docs/reference/ingest/apis/index.asciidoc @@ -29,3 +29,4 @@ include::delete-pipeline.asciidoc[] include::geoip-stats-api.asciidoc[] include::get-pipeline.asciidoc[] include::simulate-pipeline.asciidoc[] +include::simulate-ingest.asciidoc[] diff --git a/docs/reference/ingest/apis/simulate-ingest.asciidoc b/docs/reference/ingest/apis/simulate-ingest.asciidoc new file mode 100644 index 0000000000000..36f1f089ce90e --- /dev/null +++ b/docs/reference/ingest/apis/simulate-ingest.asciidoc @@ -0,0 +1,361 @@ + +[[simulate-ingest-api]] +=== Simulate ingest API +++++ +Simulate ingest +++++ + +Executes ingest pipelines against a set of provided documents, optionally +with substitute pipeline definitions. This API is meant to be used for +troubleshooting or pipeline development, as it does not actually index any +data into {es}. + +//// +[source,console] +---- +PUT /_ingest/pipeline/my-pipeline +{ + "description" : "example pipeline to simulate", + "processors": [ + { + "set" : { + "field" : "field1", + "value" : "value1" + } + } + ] +} + +PUT /_ingest/pipeline/my-final-pipeline +{ + "description" : "example final pipeline to simulate", + "processors": [ + { + "set" : { + "field" : "field2", + "value" : "value2" + } + } + ] +} + +PUT /my-index +{ + "settings": { + "index": { + "default_pipeline": "my-pipeline", + "final_pipeline": "my-final-pipeline" + } + } +} +---- +// TESTSETUP +//// + +[source,console] +---- +POST /_ingest/_simulate +{ + "docs": [ + { + "_index": "my-index", + "_id": "id", + "_source": { + "foo": "bar" + } + }, + { + "_index": "my-index", + "_id": "id", + "_source": { + "foo": "rab" + } + } + ], + "pipeline_substitutions": { <1> + "my-pipeline": { + "processors": [ + { + "set": { + "field": "field3", + "value": "value3" + } + } + ] + } + } +} +---- + +<1> This replaces the existing `my-pipeline` pipeline with the contents given here for the duration of this request. + +[[simulate-ingest-api-request]] +==== {api-request-title} + +`POST /_ingest/_simulate` + +`GET /_ingest/_simulate` + +`POST /_ingest//_simulate` + +`GET /_ingest//_simulate` + +[[simulate-ingest-api-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have the +`index` or `create` <> +to use this API. + +[[simulate-ingest-api-desc]] +==== {api-description-title} + +The simulate ingest API simulates ingesting data into an index. It +executes the default and final pipeline for that index against a set +of documents provided in the body of the request. If a pipeline +contains a <>, it follows that +reroute processor to the new index, executing that index's pipelines +as well the same way that a non-simulated ingest would. No data is +indexed into {es}. Instead, the transformed document is returned, +along with the list of pipelines that have been executed and the name +of the index where the document would have been indexed if this were +not a simulation. This differs from the +<> in that you specify a +single pipeline for that API, and it only runs that one pipeline. The +simulate pipeline API is more useful for developing a single pipeline, +while the simulate ingest API is more useful for troubleshooting the +interaction of the various pipelines that get applied when ingesting +into an index. + + +By default, the pipeline definitions that are currently in the system +are used. However, you can supply substitute pipeline definitions in the +body of the request. These will be used in place of the pipeline +definitions that are already in the system. This can be used to replace +existing pipeline definitions or to create new ones. The pipeline +substitutions are only used within this request. + +[[simulate-ingest-api-path-params]] +==== {api-path-parms-title} + +``:: +(Optional, string) +The index to simulate ingesting into. This can be overridden by specifying an index +on each document. If you provide a in the request path, it is used for any +documents that don’t explicitly specify an index argument. + +[[simulate-ingest-api-query-params]] +==== {api-query-parms-title} + +`pipeline`:: +(Optional, string) +Pipeline to use as the default pipeline. This can be used to override the default pipeline +of the index being ingested into. + + +[role="child_attributes"] +[[simulate-ingest-api-request-body]] +==== {api-request-body-title} + +`docs`:: +(Required, array of objects) +Sample documents to test in the pipeline. ++ +.Properties of `docs` objects +[%collapsible%open] +==== +`_id`:: +(Optional, string) +Unique identifier for the document. + +`_index`:: +(Optional, string) +Name of the index that the document will be ingested into. + +`_source`:: +(Required, object) +JSON body for the document. +==== + +`pipeline_substitutions`:: +(Optional, map of strings to objects) +Map of pipeline IDs to substitute pipeline definition objects. ++ +.Properties of pipeline definition objects +[%collapsible%open] +==== +include::put-pipeline.asciidoc[tag=pipeline-object] +==== + +[[simulate-ingest-api-example]] +==== {api-examples-title} + + +[[simulate-ingest-api-pre-existing-pipelines-ex]] +===== Use pre-existing pipeline definitions +In this example the index `index` has a default pipeline called `my-pipeline` and a final +pipeline called `my-final-pipeline`. Since both documents are being ingested into `index`, +both pipelines are executed using the pipeline definitions that are already in the system. + +[source,console] +---- +POST /_ingest/_simulate +{ + "docs": [ + { + "_index": "my-index", + "_id": "123", + "_source": { + "foo": "bar" + } + }, + { + "_index": "my-index", + "_id": "456", + "_source": { + "foo": "rab" + } + } + ] +} +---- + +The API returns the following response: + +[source,console-result] +---- +{ + "docs": [ + { + "doc": { + "_id": "123", + "_index": "my-index", + "_version": -3, + "_source": { + "field1": "value1", + "field2": "value2", + "foo": "bar" + }, + "executed_pipelines": [ + "my-pipeline", + "my-final-pipeline" + ] + } + }, + { + "doc": { + "_id": "456", + "_index": "my-index", + "_version": -3, + "_source": { + "field1": "value1", + "field2": "value2", + "foo": "rab" + }, + "executed_pipelines": [ + "my-pipeline", + "my-final-pipeline" + ] + } + } + ] +} +---- + +[[simulate-ingest-api-request-body-ex]] +===== Specify a pipeline substitution in the request body +In this example the index `index` has a default pipeline called `my-pipeline` and a final +pipeline called `my-final-pipeline`. But a substitute definition of `my-pipeline` is +provided in `pipeline_substitutions`. The substitute `my-pipeline` will be used in place of +the `my-pipeline` that is in the system, and then the `my-final-pipeline` that is already +defined in the system will be executed. + +[source,console] +---- +POST /_ingest/_simulate +{ + "docs": [ + { + "_index": "my-index", + "_id": "123", + "_source": { + "foo": "bar" + } + }, + { + "_index": "my-index", + "_id": "456", + "_source": { + "foo": "rab" + } + } + ], + "pipeline_substitutions": { + "my-pipeline": { + "processors": [ + { + "uppercase": { + "field": "foo" + } + } + ] + } + } +} +---- + +The API returns the following response: + +[source,console-result] +---- +{ + "docs": [ + { + "doc": { + "_id": "123", + "_index": "my-index", + "_version": -3, + "_source": { + "field2": "value2", + "foo": "BAR" + }, + "executed_pipelines": [ + "my-pipeline", + "my-final-pipeline" + ] + } + }, + { + "doc": { + "_id": "456", + "_index": "my-index", + "_version": -3, + "_source": { + "field2": "value2", + "foo": "RAB" + }, + "executed_pipelines": [ + "my-pipeline", + "my-final-pipeline" + ] + } + } + ] +} +---- + +//// +[source,console] +---- +DELETE /my-index + +DELETE /_ingest/pipeline/* +---- + +[source,console-result] +---- +{ + "acknowledged": true +} +---- +//// diff --git a/docs/reference/release-notes.asciidoc b/docs/reference/release-notes.asciidoc index 65a5c741a83c5..011c44216cc0c 100644 --- a/docs/reference/release-notes.asciidoc +++ b/docs/reference/release-notes.asciidoc @@ -7,6 +7,7 @@ This section summarizes the changes in each release. * <> +* <> * <> * <> * <> @@ -55,6 +56,7 @@ This section summarizes the changes in each release. -- include::release-notes/8.12.0.asciidoc[] +include::release-notes/8.11.1.asciidoc[] include::release-notes/8.11.0.asciidoc[] include::release-notes/8.10.4.asciidoc[] include::release-notes/8.10.3.asciidoc[] diff --git a/docs/reference/release-notes/8.11.1.asciidoc b/docs/reference/release-notes/8.11.1.asciidoc new file mode 100644 index 0000000000000..f40df70971317 --- /dev/null +++ b/docs/reference/release-notes/8.11.1.asciidoc @@ -0,0 +1,38 @@ +[[release-notes-8.11.1]] +== {es} version 8.11.1 + +Also see <>. + +[[bug-8.11.1]] +[float] +=== Bug fixes + +Allocation:: +* Avoid negative `DesiredBalanceStats#lastConvergedIndex` {es-pull}101998[#101998] + +Authentication:: +* Fix memory leak from JWT cache (and fix the usage of the JWT auth cache) {es-pull}101799[#101799] + +Machine Learning:: +* Fix inference timeout from the Inference Ingest Processor {es-pull}101971[#101971] + +Mapping:: +* Fix incorrect dynamic mapping for non-numeric-value arrays #101965 {es-pull}101967[#101967] + +Network:: +* Fail listener on exception in `TcpTransport#openConnection` {es-pull}101907[#101907] (issue: {es-issue}100510[#100510]) + +Search:: +* Dry up `AsyncTaskIndexService` memory management and fix inefficient circuit breaker use {es-pull}101892[#101892] + +Snapshot/Restore:: +* Respect regional AWS STS endpoints {es-pull}101705[#101705] (issue: {es-issue}89175[#89175]) + +[[enhancement-8.11.1]] +[float] +=== Enhancements + +Machine Learning:: +* Add inference counts by model to the machine learning usage stats {es-pull}101915[#101915] + + diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index e340efb0c6987..caf8c9f8c0cec 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -2894,14 +2894,9 @@ - - - - - - - - + + + diff --git a/libs/core/src/main/java/org/elasticsearch/core/CheckedConsumer.java b/libs/core/src/main/java/org/elasticsearch/core/CheckedConsumer.java index 6698b47f62f3c..56325dc21bb4a 100644 --- a/libs/core/src/main/java/org/elasticsearch/core/CheckedConsumer.java +++ b/libs/core/src/main/java/org/elasticsearch/core/CheckedConsumer.java @@ -8,12 +8,20 @@ package org.elasticsearch.core; -import java.util.function.Consumer; +import java.util.Objects; /** - * A {@link Consumer}-like interface which allows throwing checked exceptions. + * A {@link java.util.function.Consumer}-like interface which allows throwing checked exceptions. */ @FunctionalInterface public interface CheckedConsumer { void accept(T t) throws E; + + default CheckedConsumer andThen(CheckedConsumer after) throws E { + Objects.requireNonNull(after); + return (T t) -> { + accept(t); + after.accept(t); + }; + } } diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index 90a8d3379775f..35face57b8294 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -91,6 +91,7 @@ import org.apache.lucene.analysis.ru.RussianAnalyzer; import org.apache.lucene.analysis.shingle.ShingleFilter; import org.apache.lucene.analysis.snowball.SnowballFilter; +import org.apache.lucene.analysis.sr.SerbianAnalyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.analysis.sv.SwedishAnalyzer; import org.apache.lucene.analysis.th.ThaiAnalyzer; @@ -197,6 +198,7 @@ public Map>> getAn analyzers.put("portuguese", PortugueseAnalyzerProvider::new); analyzers.put("romanian", RomanianAnalyzerProvider::new); analyzers.put("russian", RussianAnalyzerProvider::new); + analyzers.put("serbian", SerbianAnalyzerProvider::new); analyzers.put("sorani", SoraniAnalyzerProvider::new); analyzers.put("spanish", SpanishAnalyzerProvider::new); analyzers.put("swedish", SwedishAnalyzerProvider::new); @@ -447,6 +449,7 @@ public List getPreBuiltAnalyzerProviderFactorie analyzers.add(new PreBuiltAnalyzerProviderFactory("portuguese", CachingStrategy.LUCENE, PortugueseAnalyzer::new)); analyzers.add(new PreBuiltAnalyzerProviderFactory("romanian", CachingStrategy.LUCENE, RomanianAnalyzer::new)); analyzers.add(new PreBuiltAnalyzerProviderFactory("russian", CachingStrategy.LUCENE, RussianAnalyzer::new)); + analyzers.add(new PreBuiltAnalyzerProviderFactory("serbian", CachingStrategy.LUCENE, SerbianAnalyzer::new)); analyzers.add(new PreBuiltAnalyzerProviderFactory("sorani", CachingStrategy.LUCENE, SoraniAnalyzer::new)); analyzers.add(new PreBuiltAnalyzerProviderFactory("spanish", CachingStrategy.LUCENE, SpanishAnalyzer::new)); analyzers.add(new PreBuiltAnalyzerProviderFactory("swedish", CachingStrategy.LUCENE, SwedishAnalyzer::new)); diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SerbianAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SerbianAnalyzerProvider.java new file mode 100644 index 0000000000000..567502b75bced --- /dev/null +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SerbianAnalyzerProvider.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.analysis.common; + +import org.apache.lucene.analysis.CharArraySet; +import org.apache.lucene.analysis.sr.SerbianAnalyzer; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; + +public class SerbianAnalyzerProvider extends AbstractIndexAnalyzerProvider { + + private final SerbianAnalyzer analyzer; + + SerbianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + super(name, settings); + analyzer = new SerbianAnalyzer( + Analysis.parseStopWords(env, settings, SerbianAnalyzer.getDefaultStopSet()), + Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET) + ); + } + + @Override + public SerbianAnalyzer get() { + return this.analyzer; + } +} diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactory.java index 8f9a882e29d2a..7385987567fb0 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactory.java @@ -70,6 +70,7 @@ import org.tartarus.snowball.ext.PortugueseStemmer; import org.tartarus.snowball.ext.RomanianStemmer; import org.tartarus.snowball.ext.RussianStemmer; +import org.tartarus.snowball.ext.SerbianStemmer; import org.tartarus.snowball.ext.SpanishStemmer; import org.tartarus.snowball.ext.SwedishStemmer; import org.tartarus.snowball.ext.TurkishStemmer; @@ -237,6 +238,9 @@ public TokenStream create(TokenStream tokenStream) { } else if ("light_russian".equalsIgnoreCase(language) || "lightRussian".equalsIgnoreCase(language)) { return new RussianLightStemFilter(tokenStream); + } else if ("serbian".equalsIgnoreCase(language)) { + return new SnowballFilter(tokenStream, new SerbianStemmer()); + // Spanish stemmers } else if ("spanish".equalsIgnoreCase(language)) { return new SnowballFilter(tokenStream, new SpanishStemmer()); diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/20_analyzers.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/20_analyzers.yml index dcec02729a44e..c03bdb3111050 100644 --- a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/20_analyzers.yml +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/20_analyzers.yml @@ -988,6 +988,35 @@ - length: { tokens: 1 } - match: { tokens.0.token: вмест } +--- +"serbian": + - do: + indices.create: + index: test + body: + settings: + analysis: + analyzer: + my_analyzer: + type: serbian + + - do: + indices.analyze: + body: + text: будите шампиони + analyzer: serbian + - length: { tokens: 1 } + - match: { tokens.0.token: sampion } + + - do: + indices.analyze: + index: test + body: + text: будите шампиони + analyzer: my_analyzer + - length: { tokens: 1 } + - match: { tokens.0.token: sampion } + --- "sorani": - do: diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java index 4f1c33819fee9..0fd9a036b4a55 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java @@ -1795,7 +1795,9 @@ public ClusterState execute(ClusterState currentState) throws Exception { original.isSystem(), original.isAllowCustomRouting(), original.getIndexMode(), - original.getLifecycle() + original.getLifecycle(), + original.isFailureStore(), + original.getFailureIndices() ); brokenDataStreamHolder.set(broken); return ClusterState.builder(currentState) diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java index 5bbc007cfb272..7ac86c8aee614 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java @@ -622,35 +622,6 @@ public void testDataLifecycleServiceConfiguresTheMergePolicy() throws Exception }); } - private static List getBackingIndices(String dataStreamName) { - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName }); - GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) - .actionGet(); - assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); - assertThat(getDataStreamResponse.getDataStreams().get(0).getDataStream().getName(), equalTo(dataStreamName)); - return getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices().stream().map(Index::getName).toList(); - } - - static void indexDocs(String dataStream, int numDocs) { - BulkRequest bulkRequest = new BulkRequest(); - for (int i = 0; i < numDocs; i++) { - String value = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(System.currentTimeMillis()); - bulkRequest.add( - new IndexRequest(dataStream).opType(DocWriteRequest.OpType.CREATE) - .source(String.format(Locale.ROOT, "{\"%s\":\"%s\"}", DEFAULT_TIMESTAMP_FIELD, value), XContentType.JSON) - ); - } - BulkResponse bulkResponse = client().bulk(bulkRequest).actionGet(); - assertThat(bulkResponse.getItems().length, equalTo(numDocs)); - String backingIndexPrefix = DataStream.BACKING_INDEX_PREFIX + dataStream; - for (BulkItemResponse itemResponse : bulkResponse) { - assertThat(itemResponse.getFailureMessage(), nullValue()); - assertThat(itemResponse.status(), equalTo(RestStatus.CREATED)); - assertThat(itemResponse.getIndex(), startsWith(backingIndexPrefix)); - } - indicesAdmin().refresh(new RefreshRequest(dataStream)).actionGet(); - } - public void testReenableDataStreamLifecycle() throws Exception { // start with a lifecycle that's not enabled DataStreamLifecycle lifecycle = new DataStreamLifecycle(null, null, false); @@ -700,6 +671,35 @@ public void testReenableDataStreamLifecycle() throws Exception { }); } + private static List getBackingIndices(String dataStreamName) { + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName }); + GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) + .actionGet(); + assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); + assertThat(getDataStreamResponse.getDataStreams().get(0).getDataStream().getName(), equalTo(dataStreamName)); + return getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices().stream().map(Index::getName).toList(); + } + + static void indexDocs(String dataStream, int numDocs) { + BulkRequest bulkRequest = new BulkRequest(); + for (int i = 0; i < numDocs; i++) { + String value = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(System.currentTimeMillis()); + bulkRequest.add( + new IndexRequest(dataStream).opType(DocWriteRequest.OpType.CREATE) + .source(String.format(Locale.ROOT, "{\"%s\":\"%s\"}", DEFAULT_TIMESTAMP_FIELD, value), XContentType.JSON) + ); + } + BulkResponse bulkResponse = client().bulk(bulkRequest).actionGet(); + assertThat(bulkResponse.getItems().length, equalTo(numDocs)); + String backingIndexPrefix = DataStream.BACKING_INDEX_PREFIX + dataStream; + for (BulkItemResponse itemResponse : bulkResponse) { + assertThat(itemResponse.getFailureMessage(), nullValue()); + assertThat(itemResponse.status(), equalTo(RestStatus.CREATED)); + assertThat(itemResponse.getIndex(), startsWith(backingIndexPrefix)); + } + indicesAdmin().refresh(new RefreshRequest(dataStream)).actionGet(); + } + static void putComposableIndexTemplate( String id, @Nullable String mappings, diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleStatsIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleStatsIT.java new file mode 100644 index 0000000000000..cce9132d99d19 --- /dev/null +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleStatsIT.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.datastreams.lifecycle; + +import org.elasticsearch.client.Request; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.datastreams.DisabledSecurityDataStreamTestCase; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; + +public class DataStreamLifecycleStatsIT extends DisabledSecurityDataStreamTestCase { + + @Before + public void updateClusterSettings() throws IOException { + updateClusterSettings( + Settings.builder() + .put("data_streams.lifecycle.poll_interval", "1s") + .put("cluster.lifecycle.default.rollover", "min_docs=1,max_docs=1") + .build() + ); + } + + @After + public void cleanUp() throws IOException { + adminClient().performRequest(new Request("DELETE", "_data_stream/*?expand_wildcards=hidden")); + } + + @SuppressWarnings("unchecked") + public void testStats() throws Exception { + // Check empty stats and wait until we have 2 executions + assertBusy(() -> { + Request request = new Request("GET", "/_lifecycle/stats"); + Map response = entityAsMap(client().performRequest(request)); + assertThat(response.get("data_stream_count"), is(0)); + assertThat(response.get("data_streams"), is(List.of())); + assertThat(response.containsKey("last_run_duration_in_millis"), is(true)); + assertThat(response.containsKey("time_between_starts_in_millis"), is(true)); + }); + + // Create a template + Request putComposableIndexTemplateRequest = new Request("POST", "/_index_template/1"); + putComposableIndexTemplateRequest.setJsonEntity(""" + { + "index_patterns": ["my-data-stream-*"], + "data_stream": {}, + "template": { + "lifecycle": {} + } + } + """); + assertOK(client().performRequest(putComposableIndexTemplateRequest)); + + // Create two data streams with one doc each + Request createDocRequest = new Request("POST", "/my-data-stream-1/_doc?refresh=true"); + createDocRequest.setJsonEntity("{ \"@timestamp\": \"2022-12-12\"}"); + assertOK(client().performRequest(createDocRequest)); + createDocRequest = new Request("POST", "/my-data-stream-2/_doc?refresh=true"); + createDocRequest.setJsonEntity("{ \"@timestamp\": \"2022-12-12\"}"); + assertOK(client().performRequest(createDocRequest)); + + Request request = new Request("GET", "/_lifecycle/stats"); + Map response = entityAsMap(client().performRequest(request)); + assertThat(response.get("data_stream_count"), is(2)); + List> dataStreams = (List>) response.get("data_streams"); + assertThat(dataStreams.get(0).get("name"), is("my-data-stream-1")); + assertThat((Integer) dataStreams.get(0).get("backing_indices_in_total"), greaterThanOrEqualTo(1)); + assertThat((Integer) dataStreams.get(0).get("backing_indices_in_error"), is(0)); + assertThat(dataStreams.get(1).get("name"), is("my-data-stream-2")); + assertThat((Integer) dataStreams.get(1).get("backing_indices_in_total"), greaterThanOrEqualTo(1)); + assertThat((Integer) dataStreams.get(0).get("backing_indices_in_error"), is(0)); + assertThat(response.containsKey("last_run_duration_in_millis"), is(true)); + assertThat(response.containsKey("time_between_starts_in_millis"), is(true)); + } +} diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java index 2cf44dc0e3218..dd8e13cf18408 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java @@ -40,11 +40,14 @@ import org.elasticsearch.datastreams.lifecycle.action.DeleteDataStreamLifecycleAction; import org.elasticsearch.datastreams.lifecycle.action.ExplainDataStreamLifecycleAction; import org.elasticsearch.datastreams.lifecycle.action.GetDataStreamLifecycleAction; +import org.elasticsearch.datastreams.lifecycle.action.GetDataStreamLifecycleStatsAction; import org.elasticsearch.datastreams.lifecycle.action.PutDataStreamLifecycleAction; import org.elasticsearch.datastreams.lifecycle.action.TransportDeleteDataStreamLifecycleAction; import org.elasticsearch.datastreams.lifecycle.action.TransportExplainDataStreamLifecycleAction; import org.elasticsearch.datastreams.lifecycle.action.TransportGetDataStreamLifecycleAction; +import org.elasticsearch.datastreams.lifecycle.action.TransportGetDataStreamLifecycleStatsAction; import org.elasticsearch.datastreams.lifecycle.action.TransportPutDataStreamLifecycleAction; +import org.elasticsearch.datastreams.lifecycle.rest.RestDataStreamLifecycleStatsAction; import org.elasticsearch.datastreams.lifecycle.rest.RestDeleteDataStreamLifecycleAction; import org.elasticsearch.datastreams.lifecycle.rest.RestExplainDataStreamLifecycleAction; import org.elasticsearch.datastreams.lifecycle.rest.RestGetDataStreamLifecycleAction; @@ -189,6 +192,7 @@ public Collection createComponents(PluginServices services) { actions.add(new ActionHandler<>(GetDataStreamLifecycleAction.INSTANCE, TransportGetDataStreamLifecycleAction.class)); actions.add(new ActionHandler<>(DeleteDataStreamLifecycleAction.INSTANCE, TransportDeleteDataStreamLifecycleAction.class)); actions.add(new ActionHandler<>(ExplainDataStreamLifecycleAction.INSTANCE, TransportExplainDataStreamLifecycleAction.class)); + actions.add(new ActionHandler<>(GetDataStreamLifecycleStatsAction.INSTANCE, TransportGetDataStreamLifecycleStatsAction.class)); return actions; } @@ -218,6 +222,7 @@ public List getRestHandlers( handlers.add(new RestGetDataStreamLifecycleAction()); handlers.add(new RestDeleteDataStreamLifecycleAction()); handlers.add(new RestExplainDataStreamLifecycleAction()); + handlers.add(new RestDataStreamLifecycleStatsAction()); return handlers; } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportAction.java index de81ca9bef18c..e44ee5107711f 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportAction.java @@ -136,19 +136,9 @@ static GetDataStreamAction.Response innerOperation( Map backingIndicesSettingsValues = new HashMap<>(); Metadata metadata = state.getMetadata(); - for (Index index : dataStream.getIndices()) { - IndexMetadata indexMetadata = metadata.index(index); - Boolean preferIlm = PREFER_ILM_SETTING.get(indexMetadata.getSettings()); - assert preferIlm != null : "must use the default prefer ilm setting value, if nothing else"; - ManagedBy managedBy; - if (metadata.isIndexManagedByILM(indexMetadata)) { - managedBy = ManagedBy.ILM; - } else if (dataStream.isIndexManagedByDataStreamLifecycle(index, metadata::index)) { - managedBy = ManagedBy.LIFECYCLE; - } else { - managedBy = ManagedBy.UNMANAGED; - } - backingIndicesSettingsValues.put(index, new IndexProperties(preferIlm, indexMetadata.getLifecyclePolicyName(), managedBy)); + collectIndexSettingsValues(dataStream, backingIndicesSettingsValues, metadata, dataStream.getIndices()); + if (DataStream.isFailureStoreEnabled() && dataStream.getFailureIndices().isEmpty() == false) { + collectIndexSettingsValues(dataStream, backingIndicesSettingsValues, metadata, dataStream.getFailureIndices()); } GetDataStreamAction.Response.TimeSeries timeSeries = null; @@ -213,6 +203,28 @@ static GetDataStreamAction.Response innerOperation( ); } + private static void collectIndexSettingsValues( + DataStream dataStream, + Map backingIndicesSettingsValues, + Metadata metadata, + List backingIndices + ) { + for (Index index : backingIndices) { + IndexMetadata indexMetadata = metadata.index(index); + Boolean preferIlm = PREFER_ILM_SETTING.get(indexMetadata.getSettings()); + assert preferIlm != null : "must use the default prefer ilm setting value, if nothing else"; + ManagedBy managedBy; + if (metadata.isIndexManagedByILM(indexMetadata)) { + managedBy = ManagedBy.ILM; + } else if (dataStream.isIndexManagedByDataStreamLifecycle(index, metadata::index)) { + managedBy = ManagedBy.LIFECYCLE; + } else { + managedBy = ManagedBy.UNMANAGED; + } + backingIndicesSettingsValues.put(index, new IndexProperties(preferIlm, indexMetadata.getLifecyclePolicyName(), managedBy)); + } + } + static List getDataStreams( ClusterState clusterState, IndexNameExpressionResolver iner, diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleErrorStore.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleErrorStore.java index 47589fd7276f4..01ccbdbe3ffec 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleErrorStore.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleErrorStore.java @@ -13,7 +13,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.core.Nullable; -import java.util.List; +import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.function.LongSupplier; @@ -87,7 +87,7 @@ public ErrorEntry getError(String indexName) { /** * Return an immutable view (a snapshot) of the tracked indices at the moment this method is called. */ - public List getAllIndices() { - return List.copyOf(indexNameToError.keySet()); + public Set getAllIndices() { + return Set.copyOf(indexNameToError.keySet()); } } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java index 03d1340c14dbb..9f9a90704167d 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java @@ -175,6 +175,13 @@ public class DataStreamLifecycleService implements ClusterStateListener, Closeab */ private volatile int signallingErrorRetryInterval; + /** + * The following stats are tracking how the data stream lifecycle runs are performing time wise + */ + private volatile Long lastRunStartedAt = null; + private volatile Long lastRunDuration = null; + private volatile Long timeBetweenStarts = null; + private static final SimpleBatchedExecutor FORCE_MERGE_STATE_UPDATE_TASK_EXECUTOR = new SimpleBatchedExecutor<>() { @Override @@ -299,6 +306,11 @@ public void triggered(SchedulerEngine.Event event) { */ // default visibility for testing purposes void run(ClusterState state) { + long startTime = nowSupplier.getAsLong(); + if (lastRunStartedAt != null) { + timeBetweenStarts = startTime - lastRunStartedAt; + } + lastRunStartedAt = startTime; int affectedIndices = 0; int affectedDataStreams = 0; for (DataStream dataStream : state.metadata().dataStreams().values()) { @@ -396,8 +408,10 @@ void run(ClusterState state) { affectedIndices += indicesToExcludeForRemainingRun.size(); affectedDataStreams++; } + lastRunDuration = nowSupplier.getAsLong() - lastRunStartedAt; logger.trace( - "Data stream lifecycle service performed operations on [{}] indices, part of [{}] data streams", + "Data stream lifecycle service run for {} and performed operations on [{}] indices, part of [{}] data streams", + TimeValue.timeValueMillis(lastRunDuration).toHumanReadableString(2), affectedIndices, affectedDataStreams ); @@ -1193,6 +1207,22 @@ static TimeValue getRetentionConfiguration(DataStream dataStream) { return dataStream.getLifecycle().getEffectiveDataRetention(); } + /** + * @return the duration of the last run in millis or null if the service hasn't completed a run yet. + */ + @Nullable + public Long getLastRunDuration() { + return lastRunDuration; + } + + /** + * @return the time passed between the start times of the last two consecutive runs or null if the service hasn't started twice yet. + */ + @Nullable + public Long getTimeBetweenStarts() { + return timeBetweenStarts; + } + /** * Action listener that records the encountered failure using the provided recordError callback for the * provided target index. If the listener is notified of success it will clear the recorded entry for the provided diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleStatsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleStatsAction.java new file mode 100644 index 0000000000000..c3444a67b847c --- /dev/null +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleStatsAction.java @@ -0,0 +1,154 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.datastreams.lifecycle.action; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.master.MasterNodeReadRequest; +import org.elasticsearch.common.collect.Iterators; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ChunkedToXContentObject; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.xcontent.ToXContent; + +import java.io.IOException; +import java.util.Iterator; +import java.util.List; +import java.util.Objects; + +/** + * This action retrieves the data stream lifecycle stats from the master node. + */ +public class GetDataStreamLifecycleStatsAction extends ActionType { + + public static final GetDataStreamLifecycleStatsAction INSTANCE = new GetDataStreamLifecycleStatsAction(); + public static final String NAME = "cluster:monitor/data_stream/lifecycle/stats"; + + private GetDataStreamLifecycleStatsAction() { + super(NAME, Response::new); + } + + public static class Request extends MasterNodeReadRequest { + + public Request(StreamInput in) throws IOException { + super(in); + } + + public Request() {} + + @Override + public ActionRequestValidationException validate() { + return null; + } + } + + public static class Response extends ActionResponse implements ChunkedToXContentObject { + + private final Long runDuration; + private final Long timeBetweenStarts; + private final List dataStreamStats; + + public Response(@Nullable Long runDuration, @Nullable Long timeBetweenStarts, List dataStreamStats) { + this.runDuration = runDuration; + this.timeBetweenStarts = timeBetweenStarts; + this.dataStreamStats = dataStreamStats; + } + + public Response(StreamInput in) throws IOException { + super(in); + this.runDuration = in.readOptionalVLong(); + this.timeBetweenStarts = in.readOptionalVLong(); + this.dataStreamStats = in.readCollectionAsImmutableList(DataStreamStats::read); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalVLong(runDuration); + out.writeOptionalVLong(timeBetweenStarts); + out.writeCollection(dataStreamStats, (o, v) -> v.writeTo(o)); + } + + public Long getRunDuration() { + return runDuration; + } + + public Long getTimeBetweenStarts() { + return timeBetweenStarts; + } + + public List getDataStreamStats() { + return dataStreamStats; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Response other = (Response) o; + return Objects.equals(runDuration, other.runDuration) + && Objects.equals(timeBetweenStarts, other.timeBetweenStarts) + && Objects.equals(dataStreamStats, other.dataStreamStats); + } + + @Override + public int hashCode() { + return Objects.hash(runDuration, timeBetweenStarts, dataStreamStats); + } + + @Override + public Iterator toXContentChunked(ToXContent.Params outerParams) { + return Iterators.concat(Iterators.single((builder, params) -> { + builder.startObject(); + if (runDuration != null) { + builder.field("last_run_duration_in_millis", runDuration); + if (builder.humanReadable()) { + builder.field("last_run_duration", TimeValue.timeValueMillis(runDuration).toHumanReadableString(2)); + } + } + if (timeBetweenStarts != null) { + builder.field("time_between_starts_in_millis", timeBetweenStarts); + if (builder.humanReadable()) { + builder.field("time_between_starts", TimeValue.timeValueMillis(timeBetweenStarts).toHumanReadableString(2)); + } + } + builder.field("data_stream_count", dataStreamStats.size()); + builder.startArray("data_streams"); + return builder; + }), Iterators.map(dataStreamStats.iterator(), stat -> (builder, params) -> { + builder.startObject(); + builder.field("name", stat.dataStreamName); + builder.field("backing_indices_in_total", stat.backingIndicesInTotal); + builder.field("backing_indices_in_error", stat.backingIndicesInError); + builder.endObject(); + return builder; + }), Iterators.single((builder, params) -> { + builder.endArray(); + builder.endObject(); + return builder; + })); + } + + public record DataStreamStats(String dataStreamName, int backingIndicesInTotal, int backingIndicesInError) implements Writeable { + + public static DataStreamStats read(StreamInput in) throws IOException { + return new DataStreamStats(in.readString(), in.readVInt(), in.readVInt()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(dataStreamName); + out.writeVInt(backingIndicesInTotal); + out.writeVInt(backingIndicesInError); + } + } + } +} diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleStatsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleStatsAction.java new file mode 100644 index 0000000000000..03bc1d129eaba --- /dev/null +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleStatsAction.java @@ -0,0 +1,110 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.datastreams.lifecycle.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService; +import org.elasticsearch.index.Index; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.util.ArrayList; +import java.util.Comparator; +import java.util.List; +import java.util.Set; + +/** + * Exposes stats about the latest lifecycle run and the error store. + */ +public class TransportGetDataStreamLifecycleStatsAction extends TransportMasterNodeReadAction< + GetDataStreamLifecycleStatsAction.Request, + GetDataStreamLifecycleStatsAction.Response> { + + private final DataStreamLifecycleService lifecycleService; + + @Inject + public TransportGetDataStreamLifecycleStatsAction( + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + DataStreamLifecycleService lifecycleService + ) { + super( + GetDataStreamLifecycleStatsAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + GetDataStreamLifecycleStatsAction.Request::new, + indexNameExpressionResolver, + GetDataStreamLifecycleStatsAction.Response::new, + EsExecutors.DIRECT_EXECUTOR_SERVICE + ); + this.lifecycleService = lifecycleService; + } + + @Override + protected void masterOperation( + Task task, + GetDataStreamLifecycleStatsAction.Request request, + ClusterState state, + ActionListener listener + ) throws Exception { + listener.onResponse(collectStats(state)); + } + + // Visible for testing + GetDataStreamLifecycleStatsAction.Response collectStats(ClusterState state) { + Metadata metadata = state.metadata(); + Set indicesInErrorStore = lifecycleService.getErrorStore().getAllIndices(); + List dataStreamStats = new ArrayList<>(); + for (DataStream dataStream : state.metadata().dataStreams().values()) { + if (dataStream.getLifecycle() != null && dataStream.getLifecycle().isEnabled()) { + int total = 0; + int inError = 0; + for (Index index : dataStream.getIndices()) { + if (dataStream.isIndexManagedByDataStreamLifecycle(index, metadata::index)) { + total++; + if (indicesInErrorStore.contains(index.getName())) { + inError++; + } + } + } + dataStreamStats.add(new GetDataStreamLifecycleStatsAction.Response.DataStreamStats(dataStream.getName(), total, inError)); + } + } + return new GetDataStreamLifecycleStatsAction.Response( + lifecycleService.getLastRunDuration(), + lifecycleService.getTimeBetweenStarts(), + dataStreamStats.isEmpty() + ? dataStreamStats + : dataStreamStats.stream() + .sorted(Comparator.comparing(GetDataStreamLifecycleStatsAction.Response.DataStreamStats::dataStreamName)) + .toList() + ); + } + + @Override + protected ClusterBlockException checkBlock(GetDataStreamLifecycleStatsAction.Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + } +} diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDataStreamLifecycleStatsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDataStreamLifecycleStatsAction.java new file mode 100644 index 0000000000000..2daff2a05940c --- /dev/null +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDataStreamLifecycleStatsAction.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.datastreams.lifecycle.rest; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.datastreams.lifecycle.action.GetDataStreamLifecycleStatsAction; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; +import org.elasticsearch.rest.action.RestChunkedToXContentListener; + +import java.util.List; + +import static org.elasticsearch.rest.RestRequest.Method.GET; + +@ServerlessScope(Scope.PUBLIC) +public class RestDataStreamLifecycleStatsAction extends BaseRestHandler { + + @Override + public String getName() { + return "data_stream_lifecycle_stats_action"; + } + + @Override + public List routes() { + return List.of(new Route(GET, "/_lifecycle/stats")); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { + String masterNodeTimeout = restRequest.param("master_timeout"); + GetDataStreamLifecycleStatsAction.Request request = new GetDataStreamLifecycleStatsAction.Request(); + if (masterNodeTimeout != null) { + request.masterNodeTimeout(masterNodeTimeout); + } + return channel -> client.execute(GetDataStreamLifecycleStatsAction.INSTANCE, request, new RestChunkedToXContentListener<>(channel)); + } +} diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java index 23a86b657b82d..e622d16b5d4c9 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java @@ -315,7 +315,9 @@ public void testGetAdditionalIndexSettingsDataStreamAlreadyCreatedTimeSettingsMi ds.isSystem(), ds.isAllowCustomRouting(), IndexMode.TIME_SERIES, - ds.getLifecycle() + ds.getLifecycle(), + ds.isFailureStore(), + ds.getFailureIndices() ) ); Metadata metadata = mb.build(); diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeServiceTests.java index 989bebc68061d..c383991dba19c 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeServiceTests.java @@ -151,7 +151,9 @@ public void testUpdateTimeSeriesTemporalRange_NoUpdateBecauseReplicated() { d.isSystem(), d.isAllowCustomRouting(), d.getIndexMode(), - d.getLifecycle() + d.getLifecycle(), + d.isFailureStore(), + d.getFailureIndices() ) ) .build(); diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsResponseTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsResponseTests.java index 12e1604d10c1f..5ebea62fc596a 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsResponseTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsResponseTests.java @@ -33,6 +33,7 @@ import java.util.Map; import static org.elasticsearch.cluster.metadata.DataStream.getDefaultBackingIndexName; +import static org.elasticsearch.cluster.metadata.DataStream.getDefaultFailureStoreName; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; @@ -63,14 +64,16 @@ protected Response mutateInstance(Response instance) { @SuppressWarnings("unchecked") public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Exception { - // we'll test a data stream with 3 backing indices - two managed by ILM (having the ILM policy configured for them) - // and one without any ILM policy configured + // we'll test a data stream with 3 backing indices and a failure store - two backing indices managed by ILM (having the ILM policy + // configured for them) and the remainder without any ILM policy configured String dataStreamName = "logs"; Index firstGenerationIndex = new Index(getDefaultBackingIndexName(dataStreamName, 1), UUIDs.base64UUID()); Index secondGenerationIndex = new Index(getDefaultBackingIndexName(dataStreamName, 2), UUIDs.base64UUID()); Index writeIndex = new Index(getDefaultBackingIndexName(dataStreamName, 3), UUIDs.base64UUID()); + Index failureStoreIndex = new Index(getDefaultFailureStoreName(dataStreamName, 1, System.currentTimeMillis()), UUIDs.base64UUID()); List indices = List.of(firstGenerationIndex, secondGenerationIndex, writeIndex); + List failureStores = List.of(failureStoreIndex); { // data stream has an enabled lifecycle DataStream logs = new DataStream( @@ -83,7 +86,9 @@ public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Excepti false, true, IndexMode.STANDARD, - new DataStreamLifecycle() + new DataStreamLifecycle(), + true, + failureStores ); String ilmPolicyName = "rollover-30days"; @@ -93,6 +98,8 @@ public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Excepti secondGenerationIndex, new Response.IndexProperties(false, ilmPolicyName, ManagedBy.LIFECYCLE), writeIndex, + new Response.IndexProperties(false, null, ManagedBy.LIFECYCLE), + failureStoreIndex, new Response.IndexProperties(false, null, ManagedBy.LIFECYCLE) ); @@ -156,6 +163,18 @@ public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Excepti writeIndexRepresentation.get(Response.DataStreamInfo.MANAGED_BY.getPreferredName()), is(ManagedBy.LIFECYCLE.displayValue) ); + + List failureStoresRepresentation = (List) dataStreamMap.get( + DataStream.FAILURE_INDICES_FIELD.getPreferredName() + ); + Map failureStoreRepresentation = (Map) failureStoresRepresentation.get(0); + assertThat(failureStoreRepresentation.get("index_name"), is(failureStoreIndex.getName())); + assertThat(failureStoreRepresentation.get(Response.DataStreamInfo.PREFER_ILM.getPreferredName()), is(false)); + assertThat(failureStoreRepresentation.get(Response.DataStreamInfo.ILM_POLICY_FIELD.getPreferredName()), is(nullValue())); + assertThat( + failureStoreRepresentation.get(Response.DataStreamInfo.MANAGED_BY.getPreferredName()), + is(ManagedBy.LIFECYCLE.displayValue) + ); } } @@ -171,7 +190,9 @@ public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Excepti false, true, IndexMode.STANDARD, - new DataStreamLifecycle(null, null, false) + new DataStreamLifecycle(null, null, false), + true, + failureStores ); String ilmPolicyName = "rollover-30days"; @@ -181,6 +202,8 @@ public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Excepti secondGenerationIndex, new Response.IndexProperties(true, ilmPolicyName, ManagedBy.ILM), writeIndex, + new Response.IndexProperties(false, null, ManagedBy.UNMANAGED), + failureStoreIndex, new Response.IndexProperties(false, null, ManagedBy.UNMANAGED) ); @@ -233,6 +256,18 @@ public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Excepti writeIndexRepresentation.get(Response.DataStreamInfo.MANAGED_BY.getPreferredName()), is(ManagedBy.UNMANAGED.displayValue) ); + + List failureStoresRepresentation = (List) dataStreamMap.get( + DataStream.FAILURE_INDICES_FIELD.getPreferredName() + ); + Map failureStoreRepresentation = (Map) failureStoresRepresentation.get(0); + assertThat(failureStoreRepresentation.get("index_name"), is(failureStoreIndex.getName())); + assertThat(failureStoreRepresentation.get(Response.DataStreamInfo.PREFER_ILM.getPreferredName()), is(false)); + assertThat(failureStoreRepresentation.get(Response.DataStreamInfo.ILM_POLICY_FIELD.getPreferredName()), is(nullValue())); + assertThat( + failureStoreRepresentation.get(Response.DataStreamInfo.MANAGED_BY.getPreferredName()), + is(ManagedBy.UNMANAGED.displayValue) + ); } } } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleErrorStoreTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleErrorStoreTests.java index c1255cc9e3a72..9f1928374eb5f 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleErrorStoreTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleErrorStoreTests.java @@ -12,12 +12,13 @@ import org.elasticsearch.test.ESTestCase; import org.junit.Before; -import java.util.List; +import java.util.Set; import java.util.stream.Stream; import static org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleErrorStore.MAX_ERROR_MESSAGE_LENGTH; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -36,7 +37,7 @@ public void testRecordAndRetrieveError() { assertThat(existingRecordedError, is(nullValue())); assertThat(errorStore.getError("test"), is(notNullValue())); assertThat(errorStore.getAllIndices().size(), is(1)); - assertThat(errorStore.getAllIndices().get(0), is("test")); + assertThat(errorStore.getAllIndices(), hasItem("test")); existingRecordedError = errorStore.recordError("test", new IllegalStateException("bad state")); assertThat(existingRecordedError, is(notNullValue())); @@ -51,7 +52,7 @@ public void testRetrieveAfterClear() { public void testGetAllIndicesIsASnapshotViewOfTheStore() { Stream.iterate(0, i -> i + 1).limit(5).forEach(i -> errorStore.recordError("test" + i, new NullPointerException("testing"))); - List initialAllIndices = errorStore.getAllIndices(); + Set initialAllIndices = errorStore.getAllIndices(); assertThat(initialAllIndices.size(), is(5)); assertThat( initialAllIndices, diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java index 0ee168d130986..2445e6b0d72df 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java @@ -94,6 +94,7 @@ import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.cluster.metadata.IndexMetadata.APIBlock.WRITE; @@ -119,6 +120,7 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Mockito.mock; public class DataStreamLifecycleServiceTests extends ESTestCase { @@ -280,7 +282,9 @@ public void testRetentionNotExecutedForTSIndicesWithinTimeBounds() { dataStream.isSystem(), dataStream.isAllowCustomRouting(), dataStream.getIndexMode(), - DataStreamLifecycle.newBuilder().dataRetention(0L).build() + DataStreamLifecycle.newBuilder().dataRetention(0L).build(), + dataStream.isFailureStore(), + dataStream.getFailureIndices() ) ); clusterState = ClusterState.builder(clusterState).metadata(builder).build(); @@ -1376,6 +1380,31 @@ public void testTimeSeriesIndicesStillWithinTimeBounds() { } } + public void testTrackingTimeStats() { + AtomicLong now = new AtomicLong(0); + long delta = randomLongBetween(10, 10000); + DataStreamLifecycleService service = new DataStreamLifecycleService( + Settings.EMPTY, + getTransportRequestsRecordingClient(), + clusterService, + Clock.systemUTC(), + threadPool, + () -> now.getAndAdd(delta), + new DataStreamLifecycleErrorStore(() -> Clock.systemUTC().millis()), + mock(AllocationService.class) + ); + assertThat(service.getLastRunDuration(), is(nullValue())); + assertThat(service.getTimeBetweenStarts(), is(nullValue())); + + service.run(ClusterState.EMPTY_STATE); + assertThat(service.getLastRunDuration(), is(delta)); + assertThat(service.getTimeBetweenStarts(), is(nullValue())); + + service.run(ClusterState.EMPTY_STATE); + assertThat(service.getLastRunDuration(), is(delta)); + assertThat(service.getTimeBetweenStarts(), is(2 * delta)); + } + /* * Creates a test cluster state with the given indexName. If customDataStreamLifecycleMetadata is not null, it is added as the value * of the index's custom metadata named "data_stream_lifecycle". diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/action/DataStreamLifecycleStatsResponseTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/action/DataStreamLifecycleStatsResponseTests.java new file mode 100644 index 0000000000000..b8e4b252645dd --- /dev/null +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/action/DataStreamLifecycleStatsResponseTests.java @@ -0,0 +1,154 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.datastreams.lifecycle.action; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.stream.IntStream; + +import static org.elasticsearch.xcontent.ToXContent.EMPTY_PARAMS; +import static org.hamcrest.Matchers.is; + +public class DataStreamLifecycleStatsResponseTests extends AbstractWireSerializingTestCase { + + @Override + protected GetDataStreamLifecycleStatsAction.Response createTestInstance() { + boolean hasRun = usually(); + var runDuration = hasRun ? randomLongBetween(10, 100000000) : null; + var timeBetweenStarts = hasRun && usually() ? randomLongBetween(10, 100000000) : null; + var dataStreams = IntStream.range(0, randomInt(10)) + .mapToObj( + ignored -> new GetDataStreamLifecycleStatsAction.Response.DataStreamStats( + randomAlphaOfLength(10), + randomIntBetween(1, 1000), + randomIntBetween(0, 100) + ) + ) + .toList(); + return new GetDataStreamLifecycleStatsAction.Response(runDuration, timeBetweenStarts, dataStreams); + } + + @Override + protected GetDataStreamLifecycleStatsAction.Response mutateInstance(GetDataStreamLifecycleStatsAction.Response instance) { + var runDuration = instance.getRunDuration(); + var timeBetweenStarts = instance.getTimeBetweenStarts(); + var dataStreams = instance.getDataStreamStats(); + switch (randomInt(2)) { + case 0 -> runDuration = runDuration != null && randomBoolean() + ? null + : randomValueOtherThan(runDuration, () -> randomLongBetween(10, 100000000)); + case 1 -> timeBetweenStarts = timeBetweenStarts != null && randomBoolean() + ? null + : randomValueOtherThan(timeBetweenStarts, () -> randomLongBetween(10, 100000000)); + default -> dataStreams = mutateDataStreamStats(dataStreams); + } + return new GetDataStreamLifecycleStatsAction.Response(runDuration, timeBetweenStarts, dataStreams); + } + + private List mutateDataStreamStats( + List dataStreamStats + ) { + // change the stats of a data stream + List mutated = new ArrayList<>(dataStreamStats); + if (randomBoolean() && dataStreamStats.isEmpty() == false) { + int i = randomInt(dataStreamStats.size() - 1); + GetDataStreamLifecycleStatsAction.Response.DataStreamStats instance = dataStreamStats.get(i); + mutated.set(i, switch (randomInt(2)) { + case 0 -> new GetDataStreamLifecycleStatsAction.Response.DataStreamStats( + instance.dataStreamName() + randomAlphaOfLength(2), + instance.backingIndicesInTotal(), + instance.backingIndicesInError() + ); + case 1 -> new GetDataStreamLifecycleStatsAction.Response.DataStreamStats( + instance.dataStreamName(), + instance.backingIndicesInTotal() + randomIntBetween(1, 10), + instance.backingIndicesInError() + ); + default -> new GetDataStreamLifecycleStatsAction.Response.DataStreamStats( + instance.dataStreamName(), + instance.backingIndicesInTotal(), + instance.backingIndicesInError() + randomIntBetween(1, 10) + ); + + }); + } else if (dataStreamStats.isEmpty() || randomBoolean()) { + mutated.add( + new GetDataStreamLifecycleStatsAction.Response.DataStreamStats( + randomAlphaOfLength(10), + randomIntBetween(1, 1000), + randomIntBetween(0, 100) + ) + ); + } else { + mutated.remove(randomInt(dataStreamStats.size() - 1)); + } + return mutated; + } + + @SuppressWarnings("unchecked") + public void testXContentSerialization() throws IOException { + GetDataStreamLifecycleStatsAction.Response testInstance = createTestInstance(); + try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { + builder.humanReadable(true); + testInstance.toXContentChunked(ToXContent.EMPTY_PARAMS).forEachRemaining(xcontent -> { + try { + xcontent.toXContent(builder, EMPTY_PARAMS); + } catch (IOException e) { + logger.error(e.getMessage(), e); + fail(e.getMessage()); + } + }); + Map xContentMap = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2(); + assertThat(xContentMap.get("last_run_duration_in_millis"), is(testInstance.getRunDuration().intValue())); + assertThat( + xContentMap.get("last_run_duration"), + is(TimeValue.timeValueMillis(testInstance.getRunDuration()).toHumanReadableString(2)) + ); + assertThat(xContentMap.get("time_between_starts_in_millis"), is(testInstance.getTimeBetweenStarts().intValue())); + assertThat( + xContentMap.get("time_between_starts"), + is(TimeValue.timeValueMillis(testInstance.getTimeBetweenStarts()).toHumanReadableString(2)) + ); + assertThat(xContentMap.get("data_stream_count"), is(testInstance.getDataStreamStats().size())); + List> dataStreams = (List>) xContentMap.get("data_streams"); + if (testInstance.getDataStreamStats().isEmpty()) { + assertThat(dataStreams.isEmpty(), is(true)); + } else { + assertThat(dataStreams.size(), is(testInstance.getDataStreamStats().size())); + for (int i = 0; i < dataStreams.size(); i++) { + assertThat(dataStreams.get(i).get("name"), is(testInstance.getDataStreamStats().get(i).dataStreamName())); + assertThat( + dataStreams.get(i).get("backing_indices_in_total"), + is(testInstance.getDataStreamStats().get(i).backingIndicesInTotal()) + ); + assertThat( + dataStreams.get(i).get("backing_indices_in_error"), + is(testInstance.getDataStreamStats().get(i).backingIndicesInError()) + ); + } + } + } + } + + @Override + protected Writeable.Reader instanceReader() { + return GetDataStreamLifecycleStatsAction.Response::new; + } +} diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleStatsActionTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleStatsActionTests.java new file mode 100644 index 0000000000000..8c423107ea2f4 --- /dev/null +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleStatsActionTests.java @@ -0,0 +1,153 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.datastreams.lifecycle.action; + +import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition; +import org.elasticsearch.action.admin.indices.rollover.RolloverInfo; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamLifecycle; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleErrorStore; +import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.junit.Before; + +import java.time.Clock; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.newInstance; +import static org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleFixtures.createDataStream; +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TransportGetDataStreamLifecycleStatsActionTests extends ESTestCase { + + private final DataStreamLifecycleService dataStreamLifecycleService = mock(DataStreamLifecycleService.class); + private final DataStreamLifecycleErrorStore errorStore = mock(DataStreamLifecycleErrorStore.class); + private final TransportGetDataStreamLifecycleStatsAction action = new TransportGetDataStreamLifecycleStatsAction( + mock(TransportService.class), + mock(ClusterService.class), + mock(ThreadPool.class), + mock(ActionFilters.class), + mock(IndexNameExpressionResolver.class), + dataStreamLifecycleService + ); + private Long lastRunDuration; + private Long timeBetweenStarts; + + @Before + public void setUp() throws Exception { + super.setUp(); + lastRunDuration = randomBoolean() ? randomLongBetween(0, 100000) : null; + timeBetweenStarts = randomBoolean() ? randomLongBetween(0, 100000) : null; + when(dataStreamLifecycleService.getLastRunDuration()).thenReturn(lastRunDuration); + when(dataStreamLifecycleService.getTimeBetweenStarts()).thenReturn(timeBetweenStarts); + when(dataStreamLifecycleService.getErrorStore()).thenReturn(errorStore); + when(errorStore.getAllIndices()).thenReturn(Set.of()); + } + + public void testEmptyClusterState() { + GetDataStreamLifecycleStatsAction.Response response = action.collectStats(ClusterState.EMPTY_STATE); + assertThat(response.getRunDuration(), is(lastRunDuration)); + assertThat(response.getTimeBetweenStarts(), is(timeBetweenStarts)); + assertThat(response.getDataStreamStats().isEmpty(), is(true)); + } + + public void testMixedDataStreams() { + Set indicesInError = new HashSet<>(); + int numBackingIndices = 3; + Metadata.Builder builder = Metadata.builder(); + DataStream ilmDataStream = createDataStream( + builder, + "ilm-managed-index", + numBackingIndices, + Settings.builder() + .put(IndexMetadata.LIFECYCLE_NAME, "ILM_policy") + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()), + null, + Clock.systemUTC().millis() + ); + builder.put(ilmDataStream); + DataStream dslDataStream = createDataStream( + builder, + "dsl-managed-index", + numBackingIndices, + settings(IndexVersion.current()), + DataStreamLifecycle.newBuilder().dataRetention(TimeValue.timeValueDays(10)).build(), + Clock.systemUTC().millis() + ); + indicesInError.add(dslDataStream.getIndices().get(randomInt(numBackingIndices - 1)).getName()); + builder.put(dslDataStream); + { + String dataStreamName = "mixed"; + final List backingIndices = new ArrayList<>(); + for (int k = 1; k <= 2; k++) { + IndexMetadata.Builder indexMetaBuilder = IndexMetadata.builder(DataStream.getDefaultBackingIndexName(dataStreamName, k)) + .settings( + Settings.builder() + .put(IndexMetadata.LIFECYCLE_NAME, "ILM_policy") + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + ) + .numberOfShards(1) + .numberOfReplicas(1) + .creationDate(Clock.systemUTC().millis()); + + IndexMetadata indexMetadata = indexMetaBuilder.build(); + builder.put(indexMetadata, false); + backingIndices.add(indexMetadata.getIndex()); + } + // DSL managed write index + IndexMetadata.Builder indexMetaBuilder = IndexMetadata.builder(DataStream.getDefaultBackingIndexName(dataStreamName, 3)) + .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())) + .numberOfShards(1) + .numberOfReplicas(1) + .creationDate(Clock.systemUTC().millis()); + MaxAgeCondition rolloverCondition = new MaxAgeCondition(TimeValue.timeValueMillis(Clock.systemUTC().millis() - 2000L)); + indexMetaBuilder.putRolloverInfo( + new RolloverInfo(dataStreamName, List.of(rolloverCondition), Clock.systemUTC().millis() - 2000L) + ); + IndexMetadata indexMetadata = indexMetaBuilder.build(); + builder.put(indexMetadata, false); + backingIndices.add(indexMetadata.getIndex()); + builder.put(newInstance(dataStreamName, backingIndices, 3, null, false, DataStreamLifecycle.newBuilder().build())); + } + ClusterState state = ClusterState.builder(ClusterName.DEFAULT).metadata(builder).build(); + when(errorStore.getAllIndices()).thenReturn(indicesInError); + GetDataStreamLifecycleStatsAction.Response response = action.collectStats(state); + assertThat(response.getRunDuration(), is(lastRunDuration)); + assertThat(response.getTimeBetweenStarts(), is(timeBetweenStarts)); + assertThat(response.getDataStreamStats().size(), is(2)); + for (GetDataStreamLifecycleStatsAction.Response.DataStreamStats stats : response.getDataStreamStats()) { + if (stats.dataStreamName().equals("dsl-managed-index")) { + assertThat(stats.backingIndicesInTotal(), is(3)); + assertThat(stats.backingIndicesInError(), is(1)); + } + if (stats.dataStreamName().equals("mixed")) { + assertThat(stats.backingIndicesInTotal(), is(1)); + assertThat(stats.backingIndicesInError(), is(0)); + } + } + } +} diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml index b420e8421bfba..6496930764ab8 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml @@ -206,6 +206,103 @@ setup: - do: indices.delete_index_template: name: my-template3 + +--- +"Create data stream with failure store": + - skip: + version: " - 8.10.99" + reason: "data stream failure stores only creatable in 8.11+" + + - do: + allowed_warnings: + - "index template [my-template4] has index patterns [failure-data-stream1, failure-data-stream2] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template4] will take precedence during new index creation" + indices.put_index_template: + name: my-template4 + body: + index_patterns: [ failure-data-stream1, failure-data-stream2 ] + data_stream: + failure_store: true + + - do: + indices.create_data_stream: + name: failure-data-stream1 + - is_true: acknowledged + + - do: + indices.create_data_stream: + name: failure-data-stream2 + - is_true: acknowledged + + - do: + cluster.health: + wait_for_status: green + + - do: + indices.get_data_stream: + name: "*" + - match: { data_streams.0.name: failure-data-stream1 } + - match: { data_streams.0.timestamp_field.name: '@timestamp' } + - match: { data_streams.0.generation: 1 } + - length: { data_streams.0.indices: 1 } + - match: { data_streams.0.indices.0.index_name: '/\.ds-failure-data-stream1-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { data_streams.0.status: 'GREEN' } + - match: { data_streams.0.template: 'my-template4' } + - match: { data_streams.0.hidden: false } + - match: { data_streams.0.failure_store: true } + - length: { data_streams.0.failure_indices: 1 } + - match: { data_streams.0.failure_indices.0.index_name: '/\.fs-failure-data-stream1-(\d{4}\.\d{2}\.\d{2}-)?000001/'} + + - match: { data_streams.1.name: failure-data-stream2 } + - match: { data_streams.1.timestamp_field.name: '@timestamp' } + - match: { data_streams.1.generation: 1 } + - length: { data_streams.1.indices: 1 } + - match: { data_streams.1.indices.0.index_name: '/\.ds-failure-data-stream2-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { data_streams.1.template: 'my-template4' } + - match: { data_streams.1.hidden: false } + - match: { data_streams.1.failure_store: true } + - length: { data_streams.1.failure_indices: 1 } + - match: { data_streams.1.failure_indices.0.index_name: '/\.fs-failure-data-stream2-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + + # save the backing index names for later use + - set: { data_streams.0.indices.0.index_name: idx0name } + - set: { data_streams.0.failure_indices.0.index_name: fsidx0name } + - set: { data_streams.1.indices.0.index_name: idx1name } + - set: { data_streams.1.failure_indices.0.index_name: fsidx1name } + + - do: + indices.get_mapping: + index: $idx0name + expand_wildcards: hidden + - match: { .$idx0name.mappings.properties.@timestamp.type: 'date' } + + - do: + indices.get_mapping: + index: $fsidx0name + expand_wildcards: hidden + - match: { .$fsidx0name.mappings.properties.@timestamp.type: 'date' } + + - do: + indices.get_mapping: + index: $idx1name + expand_wildcards: hidden + - match: { .$idx1name.mappings.properties.@timestamp.type: 'date' } + + - do: + indices.get_mapping: + index: $fsidx1name + expand_wildcards: hidden + - match: { .$fsidx1name.mappings.properties.@timestamp.type: 'date' } + + - do: + indices.delete_data_stream: + name: failure-data-stream1 + - is_true: acknowledged + + - do: + indices.delete_data_stream: + name: failure-data-stream2 + - is_true: acknowledged + --- "Create data stream with invalid name": - skip: diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/30_auto_create_data_stream.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/30_auto_create_data_stream.yml index 2a6beb4330e68..303a584555f8f 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/30_auto_create_data_stream.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/30_auto_create_data_stream.yml @@ -46,3 +46,56 @@ indices.delete_data_stream: name: logs-foobar - is_true: acknowledged + +--- +"Put index template with failure store": + - skip: + version: " - 8.10.99" + reason: "data stream failure stores only creatable in 8.11+" + features: allowed_warnings + + - do: + allowed_warnings: + - "index template [generic_logs_template] has index patterns [logs-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [generic_logs_template] will take precedence during new index creation" + indices.put_index_template: + name: generic_logs_template + body: + index_patterns: logs-* + data_stream: + failure_store: true + template: + settings: + number_of_shards: 1 + number_of_replicas: 1 + + - do: + index: + index: logs-foobar + refresh: true + body: + '@timestamp': '2020-12-12' + foo: bar + + - do: + search: + index: logs-foobar + body: { query: { match_all: {} } } + - length: { hits.hits: 1 } + - match: { hits.hits.0._index: "/\\.ds-logs-foobar-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000001/" } + - match: { hits.hits.0._source.foo: 'bar' } + + - do: + indices.get_data_stream: + name: logs-foobar + - match: { data_streams.0.name: logs-foobar } + - match: { data_streams.0.timestamp_field.name: '@timestamp' } + - length: { data_streams.0.indices: 1 } + - match: { data_streams.0.indices.0.index_name: '/\.ds-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { data_streams.0.failure_store: true } + - length: { data_streams.0.failure_indices: 1 } + - match: { data_streams.0.failure_indices.0.index_name: '/\.fs-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + + - do: + indices.delete_data_stream: + name: logs-foobar + - is_true: acknowledged diff --git a/modules/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java b/modules/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java index 362c0c1887261..48cb155ac2970 100644 --- a/modules/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java +++ b/modules/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java @@ -14,10 +14,10 @@ import org.apache.tika.metadata.Office; import org.apache.tika.metadata.TikaCoreProperties; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.ingest.AbstractProcessor; import org.elasticsearch.ingest.IngestDocument; import org.elasticsearch.ingest.Processor; @@ -226,15 +226,6 @@ public static final class Factory implements Processor.Factory { static final Set DEFAULT_PROPERTIES = EnumSet.allOf(Property.class); - static { - if (Version.CURRENT.major >= 9) { - throw new IllegalStateException( - "[poison pill] update the [remove_binary] default to be 'true' assuming " - + "enough time has passed. Deprecated in September 2022." - ); - } - } - @Override public AttachmentProcessor create( Map registry, @@ -249,6 +240,7 @@ public AttachmentProcessor create( int indexedChars = readIntProperty(TYPE, processorTag, config, "indexed_chars", NUMBER_OF_CHARS_INDEXED); boolean ignoreMissing = readBooleanProperty(TYPE, processorTag, config, "ignore_missing", false); String indexedCharsField = readOptionalStringProperty(TYPE, processorTag, config, "indexed_chars_field"); + @UpdateForV9 // update the [remove_binary] default to be 'true' assuming enough time has passed. Deprecated in September 2022. Boolean removeBinary = readOptionalBooleanProperty(TYPE, processorTag, config, "remove_binary"); if (removeBinary == null) { DEPRECATION_LOGGER.warn( diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/310_reroute_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/310_reroute_processor.yml index 191b92806b6ce..e2f4e32777a1f 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/310_reroute_processor.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/310_reroute_processor.yml @@ -23,6 +23,9 @@ teardown: --- "Test first matching router terminates pipeline": + - skip: + version: all + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/102144" - do: ingest.put_pipeline: id: "pipeline-with-two-data-stream-processors" diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java index 65b2e257de0b1..05a935229246d 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java @@ -84,6 +84,7 @@ import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.lucene.queries.BlendedTermQuery; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.index.IndexVersionUtils; import org.elasticsearch.xcontent.XContentFactory; @@ -215,93 +216,95 @@ public void testDuel() throws Exception { } Collections.sort(intValues); - SearchExecutionContext context = createSearchContext(indexService).getSearchExecutionContext(); - MappedFieldType intFieldType = mapperService.fieldType("int_field"); - - List> queryFunctions = new ArrayList<>(); - queryFunctions.add(MatchNoDocsQuery::new); - queryFunctions.add(MatchAllDocsQuery::new); - queryFunctions.add(() -> new TermQuery(new Term("unknown_field", "value"))); - String field1 = randomFrom(stringFields); - queryFunctions.add(() -> new TermQuery(new Term(field1, randomFrom(stringContent.get(field1))))); - String field2 = randomFrom(stringFields); - queryFunctions.add(() -> new TermQuery(new Term(field2, randomFrom(stringContent.get(field2))))); - queryFunctions.add(() -> intFieldType.termQuery(randomFrom(intValues), context)); - queryFunctions.add(() -> intFieldType.termsQuery(Arrays.asList(randomFrom(intValues), randomFrom(intValues)), context)); - queryFunctions.add( - () -> intFieldType.rangeQuery( - intValues.get(4), - intValues.get(intValues.size() - 4), - true, - true, - ShapeRelation.WITHIN, - null, - null, - context - ) - ); - queryFunctions.add( - () -> new TermInSetQuery( - field1, - new BytesRef(randomFrom(stringContent.get(field1))), - new BytesRef(randomFrom(stringContent.get(field1))) - ) - ); - queryFunctions.add( - () -> new TermInSetQuery( - field2, - new BytesRef(randomFrom(stringContent.get(field1))), - new BytesRef(randomFrom(stringContent.get(field1))) - ) - ); - // many iterations with boolean queries, which are the most complex queries to deal with when nested - int numRandomBoolQueries = 1000; - for (int i = 0; i < numRandomBoolQueries; i++) { - queryFunctions.add(() -> createRandomBooleanQuery(1, stringFields, stringContent, intFieldType, intValues, context)); - } - queryFunctions.add(() -> { - int numClauses = randomIntBetween(1, 1 << randomIntBetween(2, 4)); - List clauses = new ArrayList<>(); - for (int i = 0; i < numClauses; i++) { - String field = randomFrom(stringFields); - clauses.add(new TermQuery(new Term(field, randomFrom(stringContent.get(field))))); - } - return new DisjunctionMaxQuery(clauses, 0.01f); - }); - queryFunctions.add(() -> { - Float minScore = randomBoolean() ? null : (float) randomIntBetween(1, 1000); - Query innerQuery; - if (randomBoolean()) { - innerQuery = new TermQuery(new Term(field1, randomFrom(stringContent.get(field1)))); - } else { - innerQuery = new PhraseQuery(field1, randomFrom(stringContent.get(field1)), randomFrom(stringContent.get(field1))); + try (SearchContext searchContext = createSearchContext(indexService)) { + SearchExecutionContext context = searchContext.getSearchExecutionContext(); + MappedFieldType intFieldType = mapperService.fieldType("int_field"); + + List> queryFunctions = new ArrayList<>(); + queryFunctions.add(MatchNoDocsQuery::new); + queryFunctions.add(MatchAllDocsQuery::new); + queryFunctions.add(() -> new TermQuery(new Term("unknown_field", "value"))); + String field1 = randomFrom(stringFields); + queryFunctions.add(() -> new TermQuery(new Term(field1, randomFrom(stringContent.get(field1))))); + String field2 = randomFrom(stringFields); + queryFunctions.add(() -> new TermQuery(new Term(field2, randomFrom(stringContent.get(field2))))); + queryFunctions.add(() -> intFieldType.termQuery(randomFrom(intValues), context)); + queryFunctions.add(() -> intFieldType.termsQuery(Arrays.asList(randomFrom(intValues), randomFrom(intValues)), context)); + queryFunctions.add( + () -> intFieldType.rangeQuery( + intValues.get(4), + intValues.get(intValues.size() - 4), + true, + true, + ShapeRelation.WITHIN, + null, + null, + context + ) + ); + queryFunctions.add( + () -> new TermInSetQuery( + field1, + new BytesRef(randomFrom(stringContent.get(field1))), + new BytesRef(randomFrom(stringContent.get(field1))) + ) + ); + queryFunctions.add( + () -> new TermInSetQuery( + field2, + new BytesRef(randomFrom(stringContent.get(field1))), + new BytesRef(randomFrom(stringContent.get(field1))) + ) + ); + // many iterations with boolean queries, which are the most complex queries to deal with when nested + int numRandomBoolQueries = 1000; + for (int i = 0; i < numRandomBoolQueries; i++) { + queryFunctions.add(() -> createRandomBooleanQuery(1, stringFields, stringContent, intFieldType, intValues, context)); } - return new FunctionScoreQuery(innerQuery, minScore, 1f); - }); - - List documents = new ArrayList<>(); - for (Supplier queryFunction : queryFunctions) { - Query query = queryFunction.get(); - addQuery(query, documents); - } + queryFunctions.add(() -> { + int numClauses = randomIntBetween(1, 1 << randomIntBetween(2, 4)); + List clauses = new ArrayList<>(); + for (int i = 0; i < numClauses; i++) { + String field = randomFrom(stringFields); + clauses.add(new TermQuery(new Term(field, randomFrom(stringContent.get(field))))); + } + return new DisjunctionMaxQuery(clauses, 0.01f); + }); + queryFunctions.add(() -> { + Float minScore = randomBoolean() ? null : (float) randomIntBetween(1, 1000); + Query innerQuery; + if (randomBoolean()) { + innerQuery = new TermQuery(new Term(field1, randomFrom(stringContent.get(field1)))); + } else { + innerQuery = new PhraseQuery(field1, randomFrom(stringContent.get(field1)), randomFrom(stringContent.get(field1))); + } + return new FunctionScoreQuery(innerQuery, minScore, 1f); + }); - indexWriter.addDocuments(documents); - indexWriter.close(); - directoryReader = DirectoryReader.open(directory); - IndexSearcher shardSearcher = newSearcher(directoryReader); - // Disable query cache, because ControlQuery cannot be cached... - shardSearcher.setQueryCache(null); + List documents = new ArrayList<>(); + for (Supplier queryFunction : queryFunctions) { + Query query = queryFunction.get(); + addQuery(query, documents); + } - LuceneDocument document = new LuceneDocument(); - for (Map.Entry> entry : stringContent.entrySet()) { - String value = entry.getValue().stream().collect(Collectors.joining(" ")); - document.add(new TextField(entry.getKey(), value, Field.Store.NO)); - } - for (Integer intValue : intValues) { - NumberFieldMapper.NumberType.INTEGER.addFields(document, "int_field", intValue, true, true, false); + indexWriter.addDocuments(documents); + indexWriter.close(); + directoryReader = DirectoryReader.open(directory); + IndexSearcher shardSearcher = newSearcher(directoryReader); + // Disable query cache, because ControlQuery cannot be cached... + shardSearcher.setQueryCache(null); + + LuceneDocument document = new LuceneDocument(); + for (Map.Entry> entry : stringContent.entrySet()) { + String value = entry.getValue().stream().collect(Collectors.joining(" ")); + document.add(new TextField(entry.getKey(), value, Field.Store.NO)); + } + for (Integer intValue : intValues) { + NumberFieldMapper.NumberType.INTEGER.addFields(document, "int_field", intValue, true, true, false); + } + MemoryIndex memoryIndex = MemoryIndex.fromDocument(document, new WhitespaceAnalyzer()); + duelRun(queryStore, memoryIndex, shardSearcher); } - MemoryIndex memoryIndex = MemoryIndex.fromDocument(document, new WhitespaceAnalyzer()); - duelRun(queryStore, memoryIndex, shardSearcher); } private BooleanQuery createRandomBooleanQuery( @@ -376,53 +379,55 @@ public void testDuel2() throws Exception { ranges.add(new int[] { 0, 10 }); ranges.add(new int[] { 15, 50 }); - SearchExecutionContext context = createSearchContext(indexService).getSearchExecutionContext(); - List documents = new ArrayList<>(); - { - addQuery(new TermQuery(new Term("string_field", randomFrom(stringValues))), documents); - } - { - addQuery(new PhraseQuery(0, "string_field", stringValues.toArray(new String[0])), documents); - } - { - int[] range = randomFrom(ranges); - Query rangeQuery = intFieldType.rangeQuery(range[0], range[1], true, true, null, null, null, context); - addQuery(rangeQuery, documents); - } - { - int numBooleanQueries = randomIntBetween(1, 5); - for (int i = 0; i < numBooleanQueries; i++) { - Query randomBQ = randomBQ(1, stringValues, ranges, intFieldType, context); - addQuery(randomBQ, documents); + try (SearchContext searchContext = createSearchContext(indexService)) { + SearchExecutionContext context = searchContext.getSearchExecutionContext(); + List documents = new ArrayList<>(); + { + addQuery(new TermQuery(new Term("string_field", randomFrom(stringValues))), documents); + } + { + addQuery(new PhraseQuery(0, "string_field", stringValues.toArray(new String[0])), documents); + } + { + int[] range = randomFrom(ranges); + Query rangeQuery = intFieldType.rangeQuery(range[0], range[1], true, true, null, null, null, context); + addQuery(rangeQuery, documents); + } + { + int numBooleanQueries = randomIntBetween(1, 5); + for (int i = 0; i < numBooleanQueries; i++) { + Query randomBQ = randomBQ(1, stringValues, ranges, intFieldType, context); + addQuery(randomBQ, documents); + } + } + { + addQuery(new MatchNoDocsQuery(), documents); + } + { + addQuery(new MatchAllDocsQuery(), documents); } - } - { - addQuery(new MatchNoDocsQuery(), documents); - } - { - addQuery(new MatchAllDocsQuery(), documents); - } - - indexWriter.addDocuments(documents); - indexWriter.close(); - directoryReader = DirectoryReader.open(directory); - IndexSearcher shardSearcher = newSearcher(directoryReader); - // Disable query cache, because ControlQuery cannot be cached... - shardSearcher.setQueryCache(null); - LuceneDocument document = new LuceneDocument(); - for (String value : stringValues) { - document.add(new TextField("string_field", value, Field.Store.NO)); - logger.info("Test with document: {}" + document); - MemoryIndex memoryIndex = MemoryIndex.fromDocument(document, new WhitespaceAnalyzer()); - duelRun(queryStore, memoryIndex, shardSearcher); - } + indexWriter.addDocuments(documents); + indexWriter.close(); + directoryReader = DirectoryReader.open(directory); + IndexSearcher shardSearcher = newSearcher(directoryReader); + // Disable query cache, because ControlQuery cannot be cached... + shardSearcher.setQueryCache(null); + + LuceneDocument document = new LuceneDocument(); + for (String value : stringValues) { + document.add(new TextField("string_field", value, Field.Store.NO)); + logger.info("Test with document: {}" + document); + MemoryIndex memoryIndex = MemoryIndex.fromDocument(document, new WhitespaceAnalyzer()); + duelRun(queryStore, memoryIndex, shardSearcher); + } - for (int[] range : ranges) { - NumberFieldMapper.NumberType.INTEGER.addFields(document, "int_field", between(range[0], range[1]), true, true, false); - logger.info("Test with document: {}" + document); - MemoryIndex memoryIndex = MemoryIndex.fromDocument(document, new WhitespaceAnalyzer()); - duelRun(queryStore, memoryIndex, shardSearcher); + for (int[] range : ranges) { + NumberFieldMapper.NumberType.INTEGER.addFields(document, "int_field", between(range[0], range[1]), true, true, false); + logger.info("Test with document: {}" + document); + MemoryIndex memoryIndex = MemoryIndex.fromDocument(document, new WhitespaceAnalyzer()); + duelRun(queryStore, memoryIndex, shardSearcher); + } } } diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java index b47364e3b1a08..ad9a4e27207ac 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java @@ -81,6 +81,7 @@ import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.DummyQueryParserPlugin; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.xcontent.XContentBuilder; @@ -268,76 +269,79 @@ public void testExtractTerms() throws Exception { } public void testExtractRanges() throws Exception { - SearchExecutionContext context = createSearchContext(indexService).getSearchExecutionContext(); - addQueryFieldMappings(); - BooleanQuery.Builder bq = new BooleanQuery.Builder(); - Query rangeQuery1 = mapperService.fieldType("number_field1").rangeQuery(10, 20, true, true, null, null, null, context); - bq.add(rangeQuery1, Occur.MUST); - Query rangeQuery2 = mapperService.fieldType("number_field1").rangeQuery(15, 20, true, true, null, null, null, context); - bq.add(rangeQuery2, Occur.MUST); - - DocumentMapper documentMapper = mapperService.documentMapper(); - PercolatorFieldMapper fieldMapper = (PercolatorFieldMapper) documentMapper.mappers().getMapper(fieldName); - DocumentParserContext documentParserContext = new TestDocumentParserContext(); - fieldMapper.processQuery(bq.build(), documentParserContext); - LuceneDocument document = documentParserContext.doc(); - - PercolatorFieldMapper.PercolatorFieldType percolatorFieldType = (PercolatorFieldMapper.PercolatorFieldType) fieldMapper.fieldType(); - assertThat(document.getField(percolatorFieldType.extractionResultField.name()).stringValue(), equalTo(EXTRACTION_PARTIAL)); - List fields = new ArrayList<>(document.getFields(percolatorFieldType.rangeField.name())); - fields.sort(Comparator.comparing(IndexableField::binaryValue)); - assertThat( - fields, - transformedItemsMatch( - b -> b.binaryValue().bytes, - contains( - allOf( - transformedMatch(b -> IntPoint.decodeDimension(b, 12), equalTo(10)), - transformedMatch(b -> IntPoint.decodeDimension(b, 28), equalTo(20)) - ), - allOf( - transformedMatch(b -> IntPoint.decodeDimension(b, 12), equalTo(15)), - transformedMatch(b -> IntPoint.decodeDimension(b, 28), equalTo(20)) + try (SearchContext searchContext = createSearchContext(indexService)) { + SearchExecutionContext context = searchContext.getSearchExecutionContext(); + addQueryFieldMappings(); + BooleanQuery.Builder bq = new BooleanQuery.Builder(); + Query rangeQuery1 = mapperService.fieldType("number_field1").rangeQuery(10, 20, true, true, null, null, null, context); + bq.add(rangeQuery1, Occur.MUST); + Query rangeQuery2 = mapperService.fieldType("number_field1").rangeQuery(15, 20, true, true, null, null, null, context); + bq.add(rangeQuery2, Occur.MUST); + + DocumentMapper documentMapper = mapperService.documentMapper(); + PercolatorFieldMapper fieldMapper = (PercolatorFieldMapper) documentMapper.mappers().getMapper(fieldName); + DocumentParserContext documentParserContext = new TestDocumentParserContext(); + fieldMapper.processQuery(bq.build(), documentParserContext); + LuceneDocument document = documentParserContext.doc(); + + PercolatorFieldMapper.PercolatorFieldType percolatorFieldType = (PercolatorFieldMapper.PercolatorFieldType) fieldMapper + .fieldType(); + assertThat(document.getField(percolatorFieldType.extractionResultField.name()).stringValue(), equalTo(EXTRACTION_PARTIAL)); + List fields = new ArrayList<>(document.getFields(percolatorFieldType.rangeField.name())); + fields.sort(Comparator.comparing(IndexableField::binaryValue)); + assertThat( + fields, + transformedItemsMatch( + b -> b.binaryValue().bytes, + contains( + allOf( + transformedMatch(b -> IntPoint.decodeDimension(b, 12), equalTo(10)), + transformedMatch(b -> IntPoint.decodeDimension(b, 28), equalTo(20)) + ), + allOf( + transformedMatch(b -> IntPoint.decodeDimension(b, 12), equalTo(15)), + transformedMatch(b -> IntPoint.decodeDimension(b, 28), equalTo(20)) + ) ) ) - ) - ); - - fields = new ArrayList<>(document.getFields(percolatorFieldType.minimumShouldMatchField.name())); - assertThat(fields, transformedItemsMatch(IndexableField::numericValue, contains(1L))); - - // Range queries on different fields: - bq = new BooleanQuery.Builder(); - bq.add(rangeQuery1, Occur.MUST); - rangeQuery2 = mapperService.fieldType("number_field2").rangeQuery(15, 20, true, true, null, null, null, context); - bq.add(rangeQuery2, Occur.MUST); - - documentParserContext = new TestDocumentParserContext(); - fieldMapper.processQuery(bq.build(), documentParserContext); - document = documentParserContext.doc(); + ); - assertThat(document.getField(percolatorFieldType.extractionResultField.name()).stringValue(), equalTo(EXTRACTION_PARTIAL)); - fields = new ArrayList<>(document.getFields(percolatorFieldType.rangeField.name())); - fields.sort(Comparator.comparing(IndexableField::binaryValue)); - assertThat( - fields, - transformedItemsMatch( - b -> b.binaryValue().bytes, - contains( - allOf( - transformedMatch(b -> IntPoint.decodeDimension(b, 12), equalTo(10)), - transformedMatch(b -> IntPoint.decodeDimension(b, 28), equalTo(20)) - ), - allOf( - transformedMatch(b -> LongPoint.decodeDimension(b, 8), equalTo(15L)), - transformedMatch(b -> LongPoint.decodeDimension(b, 24), equalTo(20L)) + fields = new ArrayList<>(document.getFields(percolatorFieldType.minimumShouldMatchField.name())); + assertThat(fields, transformedItemsMatch(IndexableField::numericValue, contains(1L))); + + // Range queries on different fields: + bq = new BooleanQuery.Builder(); + bq.add(rangeQuery1, Occur.MUST); + rangeQuery2 = mapperService.fieldType("number_field2").rangeQuery(15, 20, true, true, null, null, null, context); + bq.add(rangeQuery2, Occur.MUST); + + documentParserContext = new TestDocumentParserContext(); + fieldMapper.processQuery(bq.build(), documentParserContext); + document = documentParserContext.doc(); + + assertThat(document.getField(percolatorFieldType.extractionResultField.name()).stringValue(), equalTo(EXTRACTION_PARTIAL)); + fields = new ArrayList<>(document.getFields(percolatorFieldType.rangeField.name())); + fields.sort(Comparator.comparing(IndexableField::binaryValue)); + assertThat( + fields, + transformedItemsMatch( + b -> b.binaryValue().bytes, + contains( + allOf( + transformedMatch(b -> IntPoint.decodeDimension(b, 12), equalTo(10)), + transformedMatch(b -> IntPoint.decodeDimension(b, 28), equalTo(20)) + ), + allOf( + transformedMatch(b -> LongPoint.decodeDimension(b, 8), equalTo(15L)), + transformedMatch(b -> LongPoint.decodeDimension(b, 24), equalTo(20L)) + ) ) ) - ) - ); + ); - fields = new ArrayList<>(document.getFields(percolatorFieldType.minimumShouldMatchField.name())); - assertThat(fields, transformedItemsMatch(IndexableField::numericValue, contains(2L))); + fields = new ArrayList<>(document.getFields(percolatorFieldType.minimumShouldMatchField.name())); + assertThat(fields, transformedItemsMatch(IndexableField::numericValue, contains(2L))); + } } public void testExtractTermsAndRanges_failed() throws Exception { diff --git a/modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java b/modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java index ffb5fd71f0c09..b2df41c69eda7 100644 --- a/modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java +++ b/modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java @@ -71,7 +71,7 @@ protected SecureSettings credentials() { @Override protected void createRepository(final String repoName) { - AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository("test-repo") + AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository(repoName) .setType("gcs") .setSettings( Settings.builder() diff --git a/modules/repository-s3/build.gradle b/modules/repository-s3/build.gradle index 87dda19368d5a..3fb236e1d867f 100644 --- a/modules/repository-s3/build.gradle +++ b/modules/repository-s3/build.gradle @@ -322,6 +322,41 @@ if (useFixture) { } } +// Sanity test for STS Regional Endpoints +if (useFixture) { + tasks.register("yamlRestTestRegionalSTS", RestIntegTestTask.class) { + description = "Runs tests with the Regional STS Endpoint" + SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); + SourceSet yamlRestTestSourceSet = sourceSets.getByName(LegacyYamlRestTestPlugin.SOURCE_SET_NAME) + setTestClassesDirs(yamlRestTestSourceSet.getOutput().getClassesDirs()) + setClasspath(yamlRestTestSourceSet.getRuntimeClasspath()) + // Run just the basic sanity test to make sure ES starts up and loads the S3 repository with + // a regional endpoint without an error. It would be great to make actual requests against + // a test fixture, but setting the region means using a production endpoint + systemProperty 'tests.rest.blacklist', [ + 'repository_s3/20_repository_permanent_credentials/*', + 'repository_s3/30_repository_temporary_credentials/*', + 'repository_s3/40_repository_ec2_credentials/*', + 'repository_s3/50_repository_ecs_credentials/*', + 'repository_s3/60_repository_sts_credentials/*' + ].join(",") + } + tasks.named("check").configure { dependsOn("yamlRestTestRegionalSTS") } + + testClusters.matching { it.name == "yamlRestTestRegionalSTS" }.configureEach { + module tasks.named("explodedBundlePlugin") + + File awsWebIdentityTokenExternalLocation = file('src/test/resources/aws-web-identity-token-file') + extraConfigFile 'repository-s3/aws-web-identity-token-file', awsWebIdentityTokenExternalLocation + environment 'AWS_WEB_IDENTITY_TOKEN_FILE', "$awsWebIdentityTokenExternalLocation" + environment 'AWS_ROLE_ARN', 'arn:aws:iam::123456789012:role/FederatedWebIdentityRole' + environment 'AWS_ROLE_SESSION_NAME', 'sts-fixture-test' + // Force the repository to set a regional production endpoint + environment 'AWS_STS_REGIONAL_ENDPOINTS', 'regional' + environment 'AWS_REGION', 'ap-southeast-2' + } +} + // 3rd Party Tests TaskProvider s3ThirdPartyTest = tasks.register("s3ThirdPartyTest", Test) { SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java index b9cb2f62f8cfc..afa52dd56ea6a 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java @@ -109,7 +109,7 @@ protected void createRepository(String repoName) { settings.put("storage_class", storageClass); } } - AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository("test-repo") + AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository(repoName) .setType("s3") .setSettings(settings) .get(); diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java index c0b64c5c672f6..87b3c17bfd91c 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java @@ -595,13 +595,17 @@ void run(BytesReference expected, BytesReference updated, ActionListenerandThen((l, currentValue) -> ActionListener.completeWith(l, () -> { if (currentValue.isPresent() && currentValue.bytesReference().equals(expected)) { + logger.trace("[{}] completing upload [{}]", blobKey, uploadId); completeMultipartUpload(uploadId, partETag); } else { // Best-effort attempt to clean up after ourselves. + logger.trace("[{}] aborting upload [{}]", blobKey, uploadId); safeAbortMultipartUpload(uploadId); } return currentValue; @@ -635,6 +641,7 @@ void run(BytesReference expected, BytesReference updated, ActionListener { // Best-effort attempt to clean up after ourselves. + logger.trace(() -> Strings.format("[%s] aborting upload [%s] on exception", blobKey, uploadId), e); safeAbortMultipartUpload(uploadId); l.onFailure(e); })); @@ -651,7 +658,10 @@ void run(BytesReference expected, BytesReference updated, ActionListener upload.getInitiated().after(expiryDate))) { + logger.trace("[{}] fresh preexisting uploads vs {}", blobKey, expiryDate); return true; } @@ -674,9 +685,23 @@ private boolean hasPreexistingUploads() { safeAbortMultipartUpload(upload.getUploadId()); } + logger.trace("[{}] stale preexisting uploads vs {}", blobKey, expiryDate); return false; } + private void logUploads(String description, List uploads) { + if (logger.isTraceEnabled()) { + logger.trace( + "[{}] {}: [{}]", + blobKey, + description, + uploads.stream() + .map(multipartUpload -> multipartUpload.getUploadId() + ": " + multipartUpload.getInitiated()) + .collect(Collectors.joining(",")) + ); + } + } + private List listMultipartUploads() { final var listRequest = new ListMultipartUploadsRequest(bucket); listRequest.setPrefix(blobKey); @@ -776,6 +801,7 @@ private void ensureOtherUploadsComplete( } private void cancelOtherUploads(String uploadId, List currentUploads, ActionListener listener) { + logger.trace("[{}] upload [{}] cancelling other uploads", blobKey, uploadId); final var executor = blobStore.getSnapshotExecutor(); try (var listeners = new RefCountingListener(listener)) { for (final var currentUpload : currentUploads) { @@ -826,6 +852,7 @@ public void compareAndExchangeRegister( ) { final var clientReference = blobStore.clientReference(); ActionListener.run(ActionListener.releaseAfter(listener.delegateResponse((delegate, e) -> { + logger.trace(() -> Strings.format("[%s]: compareAndExchangeRegister failed", key), e); if (e instanceof AmazonS3Exception amazonS3Exception && amazonS3Exception.getStatusCode() == 404) { // an uncaught 404 means that our multipart upload was aborted by a concurrent operation before we could complete it delegate.onResponse(OptionalBytesReference.MISSING); @@ -853,6 +880,7 @@ public void getRegister(OperationPurpose purpose, String key, ActionListener Strings.format("[%s]: getRegister failed", key), e); if (e.getStatusCode() == 404) { return OptionalBytesReference.EMPTY; } else { diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java index 97c065e771ffd..4042d414048d9 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java @@ -8,6 +8,7 @@ package org.elasticsearch.repositories.s3; +import com.amazonaws.regions.RegionUtils; import com.amazonaws.util.json.Jackson; import org.apache.lucene.util.SetOnce; @@ -49,6 +50,8 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin, Relo // ClientConfiguration clinit has some classloader problems // TODO: fix that Class.forName("com.amazonaws.ClientConfiguration"); + // Pre-load region metadata to avoid looking them up dynamically without privileges enabled + RegionUtils.initialize(); } catch (final ClassNotFoundException e) { throw new RuntimeException(e); } diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java index 25bba12db6952..e33bfbff141b2 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java @@ -370,7 +370,7 @@ static class CustomWebIdentityTokenCredentialsProvider implements AWSCredentials // https://github.com/aws/amazon-eks-pod-identity-webhook/pull/41 stsRegion = systemEnvironment.getEnv(SDKGlobalConfiguration.AWS_REGION_ENV_VAR); if (stsRegion != null) { - stsClientBuilder.withRegion(stsRegion); + SocketAccess.doPrivilegedVoid(() -> stsClientBuilder.withRegion(stsRegion)); } else { LOGGER.warn("Unable to use regional STS endpoints because the AWS_REGION environment variable is not set"); } diff --git a/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java index 275a41849d353..9c5415f1d5ea9 100644 --- a/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java +++ b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java @@ -17,31 +17,18 @@ import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.tests.util.TimeUnits; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; -import org.elasticsearch.action.bulk.BulkProcessor2; -import org.elasticsearch.action.bulk.BulkRequest; -import org.elasticsearch.action.bulk.BulkResponse; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.aggregations.pipeline.DerivativePipelineAggregationBuilder; import org.elasticsearch.client.Request; -import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; -import org.elasticsearch.client.RestClient; -import org.elasticsearch.client.RestHighLevelClient; -import org.elasticsearch.client.asyncsearch.AsyncSearchResponse; +import org.elasticsearch.client.ResponseListener; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.CheckedFunction; -import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.InnerHitBuilder; import org.elasticsearch.index.query.MatchQueryBuilder; @@ -55,9 +42,7 @@ import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; -import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.BucketOrder; -import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; import org.elasticsearch.search.aggregations.bucket.filter.FilterAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; @@ -77,22 +62,22 @@ import org.elasticsearch.search.suggest.SuggestBuilder; import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder; import org.elasticsearch.search.suggest.phrase.DirectCandidateGeneratorBuilder; -import org.elasticsearch.search.suggest.phrase.PhraseSuggestion; import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder; -import org.elasticsearch.search.suggest.term.TermSuggestion; import org.elasticsearch.search.suggest.term.TermSuggestionBuilder; import org.elasticsearch.test.NotEqualMessageBuilder; import org.elasticsearch.test.XContentTestUtils; import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.ObjectPath; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; -import org.junit.AfterClass; +import org.hamcrest.Matchers; import org.junit.Before; import java.io.IOException; +import java.io.UncheckedIOException; import java.net.URLEncoder; import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; @@ -100,7 +85,6 @@ import java.time.format.DateTimeFormatter; import java.util.Arrays; import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Locale; @@ -110,16 +94,14 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Consumer; -import static java.util.stream.Collectors.toList; +import static org.hamcrest.CoreMatchers.anyOf; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.Matchers.empty; +import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; -import static org.hamcrest.Matchers.not; /** * This test class executes twice, first against the remote cluster, and then against another cluster that has the remote cluster @@ -137,13 +119,13 @@ public class CCSDuelIT extends ESRestTestCase { private static final String REMOTE_INDEX_NAME = "my_remote_cluster:" + INDEX_NAME; private static final String[] TAGS = new String[] { "java", "xml", "sql", "html", "php", "ruby", "python", "perl" }; - private static RestHighLevelClient restHighLevelClient; + private static boolean init = false; @Before public void init() throws Exception { super.initClient(); - if (restHighLevelClient == null) { - restHighLevelClient = new HighLevelClient(client()); + if (init == false) { + init = true; String destinationCluster = System.getProperty("tests.rest.suite"); // we index docs with private randomness otherwise the two clusters end up with exactly the same documents // given that this test class is run twice with same seed. @@ -155,18 +137,6 @@ public void init() throws Exception { } } - private static class HighLevelClient extends RestHighLevelClient { - private HighLevelClient(RestClient restClient) { - super(restClient, (client) -> {}, Collections.emptyList()); - } - } - - @AfterClass - public static void cleanupClient() throws IOException { - IOUtils.close(restHighLevelClient); - restHighLevelClient = null; - } - @Override protected boolean preserveIndicesUponCompletion() { return true; @@ -177,14 +147,13 @@ protected boolean preserveDataStreamsUponCompletion() { return true; } - private static void indexDocuments(String idPrefix) throws IOException, InterruptedException { + private void indexDocuments(String idPrefix) throws IOException, InterruptedException { // this index with a single document is used to test partial failures - IndexRequest indexRequest = new IndexRequest(INDEX_NAME + "_err"); - indexRequest.id("id"); - indexRequest.source("id", "id", "creationDate", "err"); - indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL); - IndexResponse indexResponse = restHighLevelClient.index(indexRequest, RequestOptions.DEFAULT); - assertEquals(201, indexResponse.status().getStatus()); + Request request = new Request("POST", "/" + INDEX_NAME + "_err/_doc"); + request.addParameter("refresh", "wait_for"); + request.setJsonEntity("{ \"id\" : \"id\", \"creationDate\" : \"err\" }"); + Response response = client().performRequest(request); + assertEquals(201, response.getStatusLine().getStatusCode()); ElasticsearchAssertions.assertAcked(createIndex(INDEX_NAME + "_empty")); @@ -209,82 +178,98 @@ private static void indexDocuments(String idPrefix) throws IOException, Interrup }"""; ElasticsearchAssertions.assertAcked(createIndex(INDEX_NAME, settings, mapping)); - BulkProcessor2 bulkProcessor = BulkProcessor2.builder( - (r, l) -> restHighLevelClient.bulkAsync(r, RequestOptions.DEFAULT, l), - new BulkProcessor2.Listener() { - @Override - public void beforeBulk(long executionId, BulkRequest request) {} - - @Override - public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { - assertFalse(response.hasFailures()); - } - - @Override - public void afterBulk(long executionId, BulkRequest request, Exception failure) { - throw new AssertionError("Failed to execute bulk", failure); - } - }, - new DeterministicTaskQueue(random()).getThreadPool() - ).build(); + CountDownLatch latch = new CountDownLatch(2); int numQuestions = randomIntBetween(50, 100); - for (int i = 0; i < numQuestions; i++) { - bulkProcessor.add(buildIndexRequest(idPrefix + i, "question", null)); + { + StringBuilder builder = new StringBuilder(); + for (int i = 0; i < numQuestions; i++) { + buildIndexRequest(builder, idPrefix + i, "question", null); + } + executeBulkAsync(builder.toString(), latch); } - int numAnswers = randomIntBetween(100, 150); - for (int i = 0; i < numAnswers; i++) { - bulkProcessor.add(buildIndexRequest(idPrefix + (i + 1000), "answer", idPrefix + randomIntBetween(0, numQuestions - 1))); + { + StringBuilder builder = new StringBuilder(); + int numAnswers = randomIntBetween(100, 150); + for (int i = 0; i < numAnswers; i++) { + buildIndexRequest(builder, idPrefix + (i + 1000), "answer", idPrefix + randomIntBetween(0, numQuestions - 1)); + } + executeBulkAsync(builder.toString(), latch); } - assertTrue(bulkProcessor.awaitClose(30, TimeUnit.SECONDS)); + + assertTrue(latch.await(30, TimeUnit.SECONDS)); RefreshResponse refreshResponse = refresh(INDEX_NAME); ElasticsearchAssertions.assertNoFailures(refreshResponse); } - private static IndexRequest buildIndexRequest(String id, String type, String questionId) { - IndexRequest indexRequest = new IndexRequest(INDEX_NAME); - indexRequest.id(id); + private void executeBulkAsync(String body, CountDownLatch latch) { + Request bulk = new Request("POST", "/_bulk"); + bulk.setJsonEntity(body); + client().performRequestAsync(bulk, new ResponseListener() { + @Override + public void onSuccess(Response response) { + try { + ObjectPath objectPath = ObjectPath.createFromResponse(response); + assertThat(objectPath.evaluate("errors"), Matchers.equalTo(false)); + } catch (IOException ioException) { + throw new UncheckedIOException(ioException); + } finally { + latch.countDown(); + } + } + + @Override + public void onFailure(Exception exception) { + try { + fail(exception.getMessage()); + } finally { + latch.countDown(); + } + } + }); + } + + private static void buildIndexRequest(StringBuilder buffer, String id, String type, String questionId) { + // { "index" : { "_index" : "test", "_id" : "1" } }/n + buffer.append("{ \"index\" : { \"_index\" : \"").append(INDEX_NAME).append("\", \"_id\" : \"").append(id).append("\""); if (questionId != null) { - indexRequest.routing(questionId); + buffer.append(", \"routing\" : \"").append(questionId).append("\""); } - indexRequest.create(true); + buffer.append(" } }\n"); int numTags = randomIntBetween(1, 3); Set tags = new HashSet<>(); if (questionId == null) { for (int i = 0; i < numTags; i++) { - tags.add(randomFrom(TAGS)); + tags.add("\"" + randomFrom(TAGS) + "\""); } } String[] tagsArray = tags.toArray(new String[0]); String date = LocalDate.of(2019, 1, randomIntBetween(1, 31)).format(DateTimeFormatter.ofPattern("yyyy/MM/dd", Locale.ROOT)); - Map joinField = new HashMap<>(); - joinField.put("name", type); + + buffer.append("{ "); + buffer.append("\"id\" : \"").append(id).append("\","); + buffer.append("\"type\" : \"").append(type).append("\","); + buffer.append("\"votes\" : ").append(randomIntBetween(0, 30)).append(","); if (questionId != null) { - joinField.put("parent", questionId); - } - indexRequest.source( - XContentType.JSON, - "id", - id, - "type", - type, - "votes", - randomIntBetween(0, 30), - "questionId", - questionId, - "tags", - tagsArray, - "user", - "user" + randomIntBetween(1, 10), - "suggest", - Collections.singletonMap("input", tagsArray), - "creationDate", - date, - "join", - joinField - ); - return indexRequest; + buffer.append("\"questionId\" : \"").append(questionId).append("\","); + } else { + buffer.append("\"questionId\" : ").append(questionId).append(","); + } + buffer.append("\"tags\" : [").append(String.join(",", Arrays.asList(tagsArray))).append("],"); + buffer.append("\"user\" : \"").append("user").append(randomIntBetween(1, 10)).append("\","); + buffer.append("\"suggest\" : ") + .append("{") + .append("\"input\" : [") + .append(String.join(",", Arrays.asList(tagsArray))) + .append("]},"); + buffer.append("\"creationDate\" : \"").append(date).append("\","); + buffer.append("\"join\" : {"); + buffer.append("\"name\" : \"").append(type).append("\""); + if (questionId != null) { + buffer.append(", \"parent\" : \"").append(questionId).append("\""); + } + buffer.append("}}\n"); } public void testMatchAll() throws Exception { @@ -376,9 +361,9 @@ public void testHighlighting() throws Exception { SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); sourceBuilder.highlighter(new HighlightBuilder().field("tags")); sourceBuilder.query(QueryBuilders.matchQuery("tags", "xml")); - Consumer responseChecker = response -> { + CheckedConsumer responseChecker = response -> { assertHits(response); - assertFalse(response.getHits().getHits()[0].getHighlightFields().isEmpty()); + assertFalse(response.evaluateMapKeys("hits.hits.0.highlight").isEmpty()); }; { SearchRequest searchRequest = initLocalAndRemoteSearchRequest(); @@ -398,9 +383,9 @@ public void testFetchSource() throws Exception { sourceBuilder.fetchSource(new String[] { "tags" }, Strings.EMPTY_ARRAY); sourceBuilder.query(QueryBuilders.matchQuery("tags", "ruby")); - Consumer responseChecker = response -> { + CheckedConsumer responseChecker = response -> { assertHits(response); - assertEquals(1, response.getHits().getHits()[0].getSourceAsMap().size()); + assertThat(response.evaluateMapKeys("hits.hits.0._source").size(), equalTo(1)); }; { SearchRequest searchRequest = initLocalAndRemoteSearchRequest(); @@ -419,10 +404,10 @@ public void testDocValueFields() throws Exception { SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); sourceBuilder.docValueField("user.keyword"); sourceBuilder.query(QueryBuilders.matchQuery("tags", "xml")); - Consumer responseChecker = response -> { + CheckedConsumer responseChecker = response -> { assertHits(response); - assertEquals(1, response.getHits().getHits()[0].getFields().size()); - assertNotNull(response.getHits().getHits()[0].getFields().get("user.keyword")); + assertThat(response.evaluateMapKeys("hits.hits.0.fields").size(), equalTo(1)); + assertTrue(response.evaluateMapKeys("hits.hits.0.fields").contains("user.keyword")); }; { SearchRequest searchRequest = initLocalAndRemoteSearchRequest(); @@ -440,10 +425,10 @@ public void testScriptFields() throws Exception { assumeMultiClusterSetup(); SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); sourceBuilder.scriptField("parent", new Script(ScriptType.INLINE, "painless", "doc['join#question']", Collections.emptyMap())); - Consumer responseChecker = response -> { + CheckedConsumer responseChecker = response -> { assertHits(response); - assertEquals(1, response.getHits().getHits()[0].getFields().size()); - assertNotNull(response.getHits().getHits()[0].getFields().get("parent")); + assertThat(response.evaluateMapKeys("hits.hits.0.fields").size(), equalTo(1)); + assertTrue(response.evaluateMapKeys("hits.hits.0.fields").contains("parent")); }; { SearchRequest searchRequest = initLocalAndRemoteSearchRequest(); @@ -462,9 +447,9 @@ public void testExplain() throws Exception { SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); sourceBuilder.explain(true); sourceBuilder.query(QueryBuilders.matchQuery("tags", "sql")); - Consumer responseChecker = response -> { + CheckedConsumer responseChecker = response -> { assertHits(response); - assertNotNull(response.getHits().getHits()[0].getExplanation()); + assertNotNull(response.evaluate("hits.hits.0._explanation")); }; { SearchRequest searchRequest = initLocalAndRemoteSearchRequest(); @@ -486,7 +471,6 @@ public void testRescore() throws Exception { rescorerBuilder.setScoreMode(QueryRescoreMode.Multiply); rescorerBuilder.setRescoreQueryWeight(5); sourceBuilder.addRescorer(rescorerBuilder); - { SearchRequest searchRequest = initLocalAndRemoteSearchRequest(); searchRequest.source(sourceBuilder); @@ -541,13 +525,18 @@ public void testProfile() throws Exception { SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); sourceBuilder.profile(true); sourceBuilder.query(QueryBuilders.matchQuery("tags", "html")); - Consumer responseChecker = response -> { + CheckedConsumer responseChecker = response -> { assertHits(response); - assertFalse(response.getProfileResults().isEmpty()); - assertThat( - response.getProfileResults().values().stream().filter(sr -> sr.getFetchPhase() != null).collect(toList()), - not(empty()) - ); + assertFalse(response.evaluateMapKeys("profile").isEmpty()); + int size = response.evaluateArraySize("profile.shards"); + boolean fail = true; + for (int i = 0; i < size; i++) { + if (response.evaluate("profile.shards." + i + ".fetch") != null) { + fail = false; + break; + } + } + assertFalse("profile might be incomplete", fail); }; { SearchRequest searchRequest = initLocalAndRemoteSearchRequest(); @@ -570,10 +559,11 @@ public void testSortByField() throws Exception { sourceBuilder.sort("type.keyword", SortOrder.ASC); sourceBuilder.sort("creationDate", SortOrder.DESC); sourceBuilder.sort("user.keyword", SortOrder.ASC); - Consumer responseChecker = response -> { + CheckedConsumer responseChecker = response -> { assertHits(response, 30); - if (response.getHits().getTotalHits().value > 30) { - assertEquals(3, response.getHits().getHits()[0].getSortValues().length); + int total = response.evaluate("hits.total.value"); + if (total > 30) { + assertThat(response.evaluateArraySize("hits.hits.0.sort"), equalTo(3)); } }; { @@ -597,16 +587,16 @@ public void testSortByFieldOneClusterHasNoResults() throws Exception { sourceBuilder.sort("type.keyword", SortOrder.ASC); sourceBuilder.sort("creationDate", SortOrder.DESC); sourceBuilder.sort("user.keyword", SortOrder.ASC); - Consumer responseChecker = response -> { + CheckedConsumer responseChecker = response -> { assertHits(response); - SearchHit[] hits = response.getHits().getHits(); - for (SearchHit hit : hits) { - assertEquals(3, hit.getSortValues().length); - assertEquals(INDEX_NAME, hit.getIndex()); + int size = response.evaluateArraySize("hits.hits"); + for (int i = 0; i < size; i++) { + String hit = "hits.hits." + i; + assertThat(response.evaluateArraySize(hit + ".sort"), equalTo(3)); if (onlyRemote) { - assertEquals("my_remote_cluster", hit.getClusterAlias()); + assertThat(response.evaluate(hit + "._index"), equalTo(REMOTE_INDEX_NAME)); } else { - assertNull(hit.getClusterAlias()); + assertThat(response.evaluate(hit + "._index"), equalTo(INDEX_NAME)); } } }; @@ -621,14 +611,15 @@ public void testFieldCollapsingOneClusterHasNoResults() throws Exception { boolean onlyRemote = randomBoolean(); sourceBuilder.query(new TermQueryBuilder("_index", onlyRemote ? REMOTE_INDEX_NAME : INDEX_NAME)); sourceBuilder.collapse(new CollapseBuilder("user.keyword")); - Consumer responseChecker = response -> { + CheckedConsumer responseChecker = response -> { assertHits(response); - for (SearchHit hit : response.getHits().getHits()) { - assertEquals(INDEX_NAME, hit.getIndex()); + int size = response.evaluateArraySize("hits.hits"); + for (int i = 0; i < size; i++) { + String hit = "hits.hits." + i; if (onlyRemote) { - assertEquals("my_remote_cluster", hit.getClusterAlias()); + assertThat(response.evaluate(hit + "._index"), equalTo(REMOTE_INDEX_NAME)); } else { - assertNull(hit.getClusterAlias()); + assertThat(response.evaluate(hit + "._index"), equalTo(INDEX_NAME)); } } }; @@ -661,9 +652,9 @@ public void testFieldCollapsingSortByField() throws Exception { sourceBuilder.sort("creationDate", SortOrder.DESC); sourceBuilder.sort(new ScoreSortBuilder()); sourceBuilder.collapse(new CollapseBuilder("user.keyword")); - Consumer responseChecker = response -> { + CheckedConsumer responseChecker = response -> { assertHits(response); - assertEquals(2, response.getHits().getHits()[0].getSortValues().length); + assertThat(response.evaluateArraySize("hits.hits.0.sort"), equalTo(2)); }; { SearchRequest searchRequest = initLocalAndRemoteSearchRequest(); @@ -804,7 +795,7 @@ public void testPipelineAggs() throws Exception { searchRequest.source(sourceBuilder); duelRequest(searchRequest, response -> { assertAggs(response); - assertNotNull(response.getAggregations().get("most_voted")); + assertTrue(response.evaluateMapKeys("aggregations").contains("bucket_metric_value#most_voted")); }); duelRequest(searchRequest, CCSDuelIT::assertAggs); } @@ -813,7 +804,7 @@ public void testPipelineAggs() throws Exception { searchRequest.source(sourceBuilder); duelRequest(searchRequest, response -> { assertAggs(response); - assertNotNull(response.getAggregations().get("most_voted")); + assertTrue(response.evaluateMapKeys("aggregations").contains("bucket_metric_value#most_voted")); }); duelRequest(searchRequest, CCSDuelIT::assertAggs); } @@ -847,12 +838,12 @@ public void testTopHits() throws Exception { public void testTermsLookup() throws Exception { assumeMultiClusterSetup(); - IndexRequest indexRequest = new IndexRequest("lookup_index"); - indexRequest.id("id"); - indexRequest.source("tags", new String[] { "java", "sql", "html", "jax-ws" }); - indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL); - IndexResponse indexResponse = restHighLevelClient.index(indexRequest, RequestOptions.DEFAULT); - assertEquals(201, indexResponse.status().getStatus()); + Request request = new Request("POST", "/lookup_index/_doc/id"); + request.addParameter("refresh", "wait_for"); + request.setJsonEntity("{ \"tags\" : [ \"java\", \"sql\", \"html\", \"jax-ws\" ] }"); + Response response = client().performRequest(request); + assertEquals(201, response.getStatusLine().getStatusCode()); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); TermsQueryBuilder termsQueryBuilder = new TermsQueryBuilder("tags", new TermsLookup("lookup_index", "id", "tags")); sourceBuilder.query(termsQueryBuilder); @@ -879,11 +870,11 @@ public void testShardFailures() throws Exception { boolean compareAsyncAndSyncResponses = false; duelRequest(searchRequest, response -> { assertMultiClusterSearchResponse(response); - assertThat(response.getHits().getTotalHits().value, greaterThan(0L)); - assertNull(response.getAggregations()); - assertNull(response.getSuggest()); - assertThat(response.getHits().getHits().length, greaterThan(0)); - assertThat(response.getFailedShards(), greaterThanOrEqualTo(2)); + assertThat(response.evaluate("hits.total.value"), greaterThan(0)); + assertNull(response.evaluate("aggregations")); + assertNull(response.evaluate("suggest")); + assertThat(response.evaluateArraySize("hits.hits"), greaterThan(0)); + assertThat(response.evaluate("_shards.failed"), greaterThanOrEqualTo(2)); }, compareAsyncAndSyncResponses); } @@ -894,24 +885,21 @@ public void testTermSuggester() throws Exception { suggestBuilder.setGlobalText("jva hml"); suggestBuilder.addSuggestion("tags", new TermSuggestionBuilder("tags").suggestMode(TermSuggestionBuilder.SuggestMode.POPULAR)); sourceBuilder.suggest(suggestBuilder); - Consumer responseChecker = response -> { - assertEquals(1, response.getSuggest().size()); - TermSuggestion tags = response.getSuggest().getSuggestion("tags"); - assertThat(tags.getEntries().size(), greaterThan(0)); + CheckedConsumer responseChecker = response -> { + assertThat(response.evaluateMapKeys("suggest").size(), equalTo(1)); + assertThat(response.evaluateArraySize("suggest.term#tags"), greaterThan(0)); }; { SearchRequest searchRequest = initLocalAndRemoteSearchRequest(); searchRequest.source(sourceBuilder); - responseChecker.andThen(CCSDuelIT::assertMultiClusterSearchResponse); // suggest-only queries are not supported by _async_search, so only test against sync search API - duelSearchSync(searchRequest, responseChecker); + duelSearchSync(searchRequest, responseChecker.andThen(CCSDuelIT::assertMultiClusterSearchResponse)); } { SearchRequest searchRequest = initRemoteOnlySearchRequest(); searchRequest.source(sourceBuilder); - responseChecker.andThen(CCSDuelIT::assertSingleRemoteClusterSearchResponse); // suggest-only queries are not supported by _async_search, so only test against sync search API - duelSearchSync(searchRequest, responseChecker); + duelSearchSync(searchRequest, responseChecker.andThen(CCSDuelIT::assertSingleRemoteClusterSearchResponse)); } } @@ -926,24 +914,21 @@ public void testPhraseSuggester() throws Exception { .highlight("", "") ); sourceBuilder.suggest(suggestBuilder); - Consumer responseChecker = response -> { - assertEquals(1, response.getSuggest().size()); - PhraseSuggestion tags = response.getSuggest().getSuggestion("tags"); - assertThat(tags.getEntries().size(), greaterThan(0)); + CheckedConsumer responseChecker = response -> { + assertEquals(1, response.evaluateMapKeys("suggest").size()); + assertThat(response.evaluateArraySize("suggest.phrase#tags"), greaterThan(0)); }; { SearchRequest searchRequest = initLocalAndRemoteSearchRequest(); searchRequest.source(sourceBuilder); // suggest-only queries are not supported by _async_search, so only test against sync search API - responseChecker.andThen(CCSDuelIT::assertMultiClusterSearchResponse); - duelSearchSync(searchRequest, responseChecker); + duelSearchSync(searchRequest, responseChecker.andThen(CCSDuelIT::assertMultiClusterSearchResponse)); } { SearchRequest searchRequest = initRemoteOnlySearchRequest(); searchRequest.source(sourceBuilder); - responseChecker.andThen(CCSDuelIT::assertSingleRemoteClusterSearchResponse); // suggest-only queries are not supported by _async_search, so only test against sync search API - duelSearchSync(searchRequest, responseChecker); + duelSearchSync(searchRequest, responseChecker.andThen(CCSDuelIT::assertSingleRemoteClusterSearchResponse)); } } @@ -955,25 +940,23 @@ public void testCompletionSuggester() throws Exception { suggestBuilder.addSuggestion("java", new CompletionSuggestionBuilder("suggest").size(20).text("jav")); suggestBuilder.addSuggestion("ruby", new CompletionSuggestionBuilder("suggest").size(30).text("rub")); sourceBuilder.suggest(suggestBuilder); - Consumer responseChecker = response -> { - assertEquals(Strings.toString(response, true, true), 3, response.getSuggest().size()); - assertThat(response.getSuggest().getSuggestion("python").getEntries().size(), greaterThan(0)); - assertThat(response.getSuggest().getSuggestion("java").getEntries().size(), greaterThan(0)); - assertThat(response.getSuggest().getSuggestion("ruby").getEntries().size(), greaterThan(0)); + CheckedConsumer responseChecker = response -> { + assertThat(response.evaluateMapKeys("suggest").size(), equalTo(3)); + assertThat(response.evaluateArraySize("suggest.completion#python"), greaterThan(0)); + assertThat(response.evaluateArraySize("suggest.completion#java"), greaterThan(0)); + assertThat(response.evaluateArraySize("suggest.completion#ruby"), greaterThan(0)); }; { SearchRequest searchRequest = initLocalAndRemoteSearchRequest(); searchRequest.source(sourceBuilder); - responseChecker.andThen(CCSDuelIT::assertMultiClusterSearchResponse); // suggest-only queries are not supported by _async_search, so only test against sync search API - duelSearchSync(searchRequest, responseChecker); + duelSearchSync(searchRequest, responseChecker.andThen(CCSDuelIT::assertMultiClusterSearchResponse)); } { SearchRequest searchRequest = initRemoteOnlySearchRequest(); searchRequest.source(sourceBuilder); - responseChecker.andThen(CCSDuelIT::assertSingleRemoteClusterSearchResponse); // suggest-only queries are not supported by _async_search, so only test against sync search API - duelSearchSync(searchRequest, responseChecker); + duelSearchSync(searchRequest, responseChecker.andThen(CCSDuelIT::assertSingleRemoteClusterSearchResponse)); } } @@ -992,7 +975,7 @@ private static SearchRequest initLocalAndRemoteSearchRequest() { } private static SearchRequest initRemoteOnlySearchRequest() { - List indices = Arrays.asList("my_remote_cluster:" + INDEX_NAME); + List indices = List.of("my_remote_cluster:" + INDEX_NAME); final SearchRequest request = new SearchRequest(indices.toArray(new String[0])); if (randomBoolean()) { request.setPreFilterShardSize(between(1, 20)); @@ -1000,12 +983,15 @@ private static SearchRequest initRemoteOnlySearchRequest() { return request; } - private void duelRequest(SearchRequest searchRequest, Consumer responseChecker) throws Exception { + private void duelRequest(SearchRequest searchRequest, CheckedConsumer responseChecker) throws Exception { duelRequest(searchRequest, responseChecker, true); } - private void duelRequest(SearchRequest searchRequest, Consumer responseChecker, boolean compareAsyncToSyncResponses) - throws Exception { + private void duelRequest( + SearchRequest searchRequest, + CheckedConsumer responseChecker, + boolean compareAsyncToSyncResponses + ) throws Exception { Map syncResponseMap = duelSearchSync(searchRequest, responseChecker); Map asyncResponseMap = duelSearchAsync(searchRequest, responseChecker); if (compareAsyncToSyncResponses) { @@ -1016,26 +1002,17 @@ private void duelRequest(SearchRequest searchRequest, Consumer r /** * @return responseMap from one of the Synchronous Search Requests */ - private static Map duelSearchSync(SearchRequest searchRequest, Consumer responseChecker) + private static Map duelSearchSync(SearchRequest searchRequest, CheckedConsumer responseChecker) throws Exception { CountDownLatch latch = new CountDownLatch(2); AtomicReference exception1 = new AtomicReference<>(); - AtomicReference minimizeRoundtripsResponse = new AtomicReference<>(); + AtomicReference minimizeRoundtripsResponse = new AtomicReference<>(); searchRequest.setCcsMinimizeRoundtrips(true); - restHighLevelClient.searchAsync( - searchRequest, - RequestOptions.DEFAULT, - new LatchedActionListener<>(ActionListener.wrap(minimizeRoundtripsResponse::set, exception1::set), latch) - ); - + submitSyncSearch(searchRequest, minimizeRoundtripsResponse, exception1, latch); AtomicReference exception2 = new AtomicReference<>(); - AtomicReference fanOutResponse = new AtomicReference<>(); + AtomicReference fanOutResponse = new AtomicReference<>(); searchRequest.setCcsMinimizeRoundtrips(false); - restHighLevelClient.searchAsync( - searchRequest, - RequestOptions.DEFAULT, - new LatchedActionListener<>(ActionListener.wrap(fanOutResponse::set, exception2::set), latch) - ); + submitSyncSearch(searchRequest, fanOutResponse, exception2, latch); latch.await(); @@ -1049,167 +1026,170 @@ private static Map duelSearchSync(SearchRequest searchRequest, C if (exception2.get() != null) { throw new AssertionError("one of the two requests returned an exception", exception2.get()); } - SearchResponse minimizeRoundtripsSearchResponse = minimizeRoundtripsResponse.get(); - SearchResponse fanOutSearchResponse = null; - try { - responseChecker.accept(minimizeRoundtripsSearchResponse); - - // if only the remote cluster was searched, then only one reduce phase is expected - int expectedReducePhasesMinRoundTrip = 1; - if (searchRequest.indices().length > 1) { - expectedReducePhasesMinRoundTrip = searchRequest.indices().length + 1; - } + ObjectPath minimizeRoundtripsSearchResponse = ObjectPath.createFromResponse(minimizeRoundtripsResponse.get()); + responseChecker.accept(minimizeRoundtripsSearchResponse); - assertEquals(expectedReducePhasesMinRoundTrip, minimizeRoundtripsSearchResponse.getNumReducePhases()); - fanOutSearchResponse = fanOutResponse.get(); - responseChecker.accept(fanOutSearchResponse); - assertEquals(1, fanOutSearchResponse.getNumReducePhases()); + // if only the remote cluster was searched, then only one reduce phase is expected + int expectedReducePhasesMinRoundTrip = 1; + if (searchRequest.indices().length > 1) { + expectedReducePhasesMinRoundTrip = searchRequest.indices().length + 1; + } + if (expectedReducePhasesMinRoundTrip == 1) { + assertThat( + minimizeRoundtripsSearchResponse.evaluate("num_reduce_phases"), + anyOf(equalTo(expectedReducePhasesMinRoundTrip), nullValue()) + ); + } else { + assertThat(minimizeRoundtripsSearchResponse.evaluate("num_reduce_phases"), equalTo(expectedReducePhasesMinRoundTrip)); + } + ObjectPath fanOutSearchResponse = ObjectPath.createFromResponse(fanOutResponse.get()); + responseChecker.accept(fanOutSearchResponse); + assertThat(fanOutSearchResponse.evaluate("num_reduce_phases"), anyOf(equalTo(1), nullValue())); // default value is 1? - // compare Clusters objects - SearchResponse.Clusters clustersMRT = minimizeRoundtripsSearchResponse.getClusters(); - SearchResponse.Clusters clustersMRTFalse = fanOutSearchResponse.getClusters(); + // compare Clusters objects + assertThat( + minimizeRoundtripsSearchResponse.evaluate("_cluster.total"), + equalTo(fanOutSearchResponse.evaluate("_cluster.total")) + ); + assertThat( + minimizeRoundtripsSearchResponse.evaluate("_cluster.successful"), + equalTo(fanOutSearchResponse.evaluate("_cluster.successful")) + ); + assertThat( + minimizeRoundtripsSearchResponse.evaluate("_cluster.skipped"), + equalTo(fanOutSearchResponse.evaluate("_cluster.skipped")) + ); + assertThat( + minimizeRoundtripsSearchResponse.evaluate("_cluster.running"), + equalTo(fanOutSearchResponse.evaluate("_cluster.running")) + ); + assertThat( + minimizeRoundtripsSearchResponse.evaluate("_cluster.partial"), + equalTo(fanOutSearchResponse.evaluate("_cluster.partial")) + ); + assertThat( + minimizeRoundtripsSearchResponse.evaluate("_cluster.failed"), + equalTo(fanOutSearchResponse.evaluate("_cluster.failed")) + ); - assertEquals(clustersMRT.getTotal(), clustersMRTFalse.getTotal()); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL) - ); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED) - ); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING) - ); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL) - ); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.FAILED) + Map minimizeRoundtripsResponseMap = responseToMap(minimizeRoundtripsSearchResponse); + if (minimizeRoundtripsSearchResponse.evaluate("_clusters") != null && fanOutSearchResponse.evaluate("_clusters") != null) { + Map fanOutResponseMap = responseToMap(fanOutSearchResponse); + compareResponseMaps(minimizeRoundtripsResponseMap, fanOutResponseMap, "Comparing sync_search minimizeRoundTrip vs. fanOut"); + assertThat( + minimizeRoundtripsSearchResponse.evaluate("_shards.skipped"), + lessThanOrEqualTo((Integer) fanOutSearchResponse.evaluate("_shards.skipped")) ); + } + return minimizeRoundtripsResponseMap; + } + } + + private static void submitSyncSearch( + SearchRequest searchRequest, + AtomicReference responseRef, + AtomicReference exceptionRef, + CountDownLatch latch + ) throws IOException { + String indices = Strings.collectionToDelimitedString(List.of(searchRequest.indices()), ","); + final Request request = new Request("POST", URLEncoder.encode(indices, StandardCharsets.UTF_8) + "/_search"); + request.addParameter("ccs_minimize_roundtrips", Boolean.toString(searchRequest.isCcsMinimizeRoundtrips())); + request.addParameter(RestSearchAction.TYPED_KEYS_PARAM, "true"); + request.setEntity(createEntity(searchRequest.source(), XContentType.JSON, ToXContent.EMPTY_PARAMS)); + client().performRequestAsync(request, new ResponseListener() { + @Override + public void onSuccess(Response response) { + try { + responseRef.set(response); + } finally { + latch.countDown(); + } + } - Map minimizeRoundtripsResponseMap = responseToMap(minimizeRoundtripsSearchResponse); - if (clustersMRT.hasClusterObjects() && clustersMRTFalse.hasClusterObjects()) { - Map fanOutResponseMap = responseToMap(fanOutSearchResponse); - compareResponseMaps( - minimizeRoundtripsResponseMap, - fanOutResponseMap, - "Comparing sync_search minimizeRoundTrip vs. fanOut" - ); - assertThat( - minimizeRoundtripsSearchResponse.getSkippedShards(), - lessThanOrEqualTo(fanOutSearchResponse.getSkippedShards()) - ); + @Override + public void onFailure(Exception exception) { + try { + exceptionRef.set(exception); + } finally { + latch.countDown(); } - return minimizeRoundtripsResponseMap; - } finally { - if (fanOutSearchResponse != null) fanOutSearchResponse.decRef(); - if (minimizeRoundtripsSearchResponse != null) minimizeRoundtripsSearchResponse.decRef(); } - } + }); } /** * @return responseMap from one of the async searches */ - private static Map duelSearchAsync(SearchRequest searchRequest, Consumer responseChecker) - throws Exception { + private static Map duelSearchAsync( + SearchRequest searchRequest, + CheckedConsumer responseChecker + ) throws Exception { searchRequest.setCcsMinimizeRoundtrips(true); - AsyncSearchResponse minimizeRoundtripsResponse = submitAsyncSearch( - searchRequest, - TimeValue.timeValueSeconds(1), - restHighLevelClient.getParserConfig() - ); + ObjectPath minimizeRoundtripsResponse = submitAsyncSearch(searchRequest, TimeValue.timeValueSeconds(1)); try { - final String responseId = minimizeRoundtripsResponse.getId(); + final String responseId = minimizeRoundtripsResponse.evaluate("id");// minimizeRoundtripsResponse.getId(); assertBusy(() -> { - AsyncSearchResponse resp = getAsyncSearch(responseId, restHighLevelClient.getParserConfig()); - assertThat(resp.isRunning(), equalTo(false)); + ObjectPath resp = getAsyncSearch(responseId); + assertThat(resp.evaluate("is_running"), equalTo(false)); }); - minimizeRoundtripsResponse = getAsyncSearch(responseId, restHighLevelClient.getParserConfig()); + minimizeRoundtripsResponse = getAsyncSearch(responseId); } finally { - deleteAsyncSearch(minimizeRoundtripsResponse.getId()); + deleteAsyncSearch(minimizeRoundtripsResponse.evaluate("id")); } searchRequest.setCcsMinimizeRoundtrips(false); - AsyncSearchResponse fanOutResponse = submitAsyncSearch( - searchRequest, - TimeValue.timeValueSeconds(1), - restHighLevelClient.getParserConfig() - ); + ObjectPath fanOutResponse = submitAsyncSearch(searchRequest, TimeValue.timeValueSeconds(1)); try { - final String responseId = fanOutResponse.getId(); + final String responseId = fanOutResponse.evaluate("id"); assertBusy(() -> { - AsyncSearchResponse resp = getAsyncSearch(responseId, restHighLevelClient.getParserConfig()); - assertThat(resp.isRunning(), equalTo(false)); + ObjectPath resp = getAsyncSearch(responseId); + assertThat(resp.evaluate("is_running"), equalTo(false)); }); - fanOutResponse = getAsyncSearch(responseId, restHighLevelClient.getParserConfig()); + fanOutResponse = getAsyncSearch(responseId); } finally { - deleteAsyncSearch(fanOutResponse.getId()); + deleteAsyncSearch(fanOutResponse.evaluate("id")); } - SearchResponse minimizeRoundtripsSearchResponse = null; - SearchResponse fanOutSearchResponse = null; - try { - fanOutSearchResponse = fanOutResponse.getSearchResponse(); - minimizeRoundtripsSearchResponse = minimizeRoundtripsResponse.getSearchResponse(); - - responseChecker.accept(minimizeRoundtripsSearchResponse); - - // if only the remote cluster was searched, then only one reduce phase is expected - int expectedReducePhasesMinRoundTrip = 1; - if (searchRequest.indices().length > 1) { - expectedReducePhasesMinRoundTrip = searchRequest.indices().length + 1; - } - assertEquals(expectedReducePhasesMinRoundTrip, minimizeRoundtripsSearchResponse.getNumReducePhases()); - responseChecker.accept(fanOutSearchResponse); - assertEquals(1, fanOutSearchResponse.getNumReducePhases()); + // extract the response + minimizeRoundtripsResponse = new ObjectPath(minimizeRoundtripsResponse.evaluate("response")); + fanOutResponse = new ObjectPath(fanOutResponse.evaluate("response")); - // compare Clusters objects - SearchResponse.Clusters clustersMRT = minimizeRoundtripsSearchResponse.getClusters(); - SearchResponse.Clusters clustersMRTFalse = fanOutSearchResponse.getClusters(); + responseChecker.accept(minimizeRoundtripsResponse); - assertEquals(clustersMRT.getTotal(), clustersMRTFalse.getTotal()); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL) - ); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED) - ); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING) - ); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL) + // if only the remote cluster was searched, then only one reduce phase is expected + int expectedReducePhasesMinRoundTrip = 1; + if (searchRequest.indices().length > 1) { + expectedReducePhasesMinRoundTrip = searchRequest.indices().length + 1; + } + if (expectedReducePhasesMinRoundTrip == 1) { + assertThat( + minimizeRoundtripsResponse.evaluate("num_reduce_phases"), + anyOf(equalTo(expectedReducePhasesMinRoundTrip), nullValue()) ); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.FAILED) + } else { + assertThat(minimizeRoundtripsResponse.evaluate("num_reduce_phases"), equalTo(expectedReducePhasesMinRoundTrip)); + } + + responseChecker.accept(fanOutResponse); + assertThat(fanOutResponse.evaluate("num_reduce_phases"), anyOf(equalTo(1), nullValue())); // default value is 1? + + assertThat(minimizeRoundtripsResponse.evaluate("_cluster.total"), equalTo(fanOutResponse.evaluate("_cluster.total"))); + assertThat(minimizeRoundtripsResponse.evaluate("_cluster.successful"), equalTo(fanOutResponse.evaluate("_cluster.successful"))); + assertThat(minimizeRoundtripsResponse.evaluate("_cluster.skipped"), equalTo(fanOutResponse.evaluate("_cluster.skipped"))); + assertThat(minimizeRoundtripsResponse.evaluate("_cluster.running"), equalTo(fanOutResponse.evaluate("_cluster.running"))); + assertThat(minimizeRoundtripsResponse.evaluate("_cluster.partial"), equalTo(fanOutResponse.evaluate("_cluster.partial"))); + assertThat(minimizeRoundtripsResponse.evaluate("_cluster.failed"), equalTo(fanOutResponse.evaluate("_cluster.failed"))); + Map minimizeRoundtripsResponseMap = responseToMap(minimizeRoundtripsResponse); + if (minimizeRoundtripsResponse.evaluate("_clusters") != null && fanOutResponse.evaluate("_clusters") != null) { + Map fanOutResponseMap = responseToMap(fanOutResponse); + compareResponseMaps(minimizeRoundtripsResponseMap, fanOutResponseMap, "Comparing async_search minimizeRoundTrip vs. fanOut"); + assertThat( + minimizeRoundtripsResponse.evaluate("_shards.skipped"), + lessThanOrEqualTo((Integer) fanOutResponse.evaluate("_shards.skipped")) ); - - Map minimizeRoundtripsResponseMap = responseToMap(minimizeRoundtripsSearchResponse); - if (clustersMRT.hasClusterObjects() && clustersMRTFalse.hasClusterObjects()) { - Map fanOutResponseMap = responseToMap(fanOutSearchResponse); - compareResponseMaps( - minimizeRoundtripsResponseMap, - fanOutResponseMap, - "Comparing async_search minimizeRoundTrip vs. fanOut" - ); - assertThat(minimizeRoundtripsSearchResponse.getSkippedShards(), lessThanOrEqualTo(fanOutSearchResponse.getSkippedShards())); - } - return minimizeRoundtripsResponseMap; - } finally { - if (minimizeRoundtripsSearchResponse != null) minimizeRoundtripsSearchResponse.decRef(); - if (fanOutSearchResponse != null) fanOutSearchResponse.decRef(); } + return minimizeRoundtripsResponseMap; } private static void compareResponseMaps(Map responseMap1, Map responseMap2, String info) { @@ -1222,11 +1202,7 @@ private static void compareResponseMaps(Map responseMap1, Map from) { - assertThat(response.getHits().getHits().length, greaterThan(0)); + int totalHits = response.evaluate("hits.total.value"); + assertThat(totalHits, greaterThan(0)); + assertThat(response.evaluate("_shards.failed"), Matchers.equalTo(0)); + assertNull(response.evaluate("hits.aggregations")); + assertNull(response.evaluate("hits.suggest")); + if (totalHits > from) { + assertThat(response.evaluateArraySize("hits.hits"), greaterThan(0)); } else { - assertThat(response.getHits().getHits().length, equalTo(0)); + assertThat(response.evaluateArraySize("hits.hits"), equalTo(0)); } } - private static void assertAggs(SearchResponse response) { - if (response.getClusters().getTotal() == 1) { + private static void assertAggs(ObjectPath response) throws IOException { + int totalHits = response.evaluate("_clusters.total"); + if (totalHits == 1) { assertSingleRemoteClusterSearchResponse(response); } else { assertMultiClusterSearchResponse(response); } - assertThat(response.getHits().getTotalHits().value, greaterThan(0L)); - assertEquals(0, response.getHits().getHits().length); - assertNull(response.getSuggest()); - assertNotNull(response.getAggregations()); - List aggregations = response.getAggregations().asList(); - for (Aggregation aggregation : aggregations) { - if (aggregation instanceof MultiBucketsAggregation multiBucketsAggregation) { + assertThat(response.evaluate("hits.total.value"), greaterThan(0)); + assertThat(response.evaluateArraySize("hits.hits"), equalTo(0)); + assertNull(response.evaluate("suggest")); + assertNotNull(response.evaluate("aggregations")); + Set aggregations = response.evaluateMapKeys("aggregations"); + for (String aggregation : aggregations) { + if (aggregation.startsWith("date_histogram") || aggregation.startsWith("sterms")) { assertThat( - "agg " + multiBucketsAggregation.getName() + " has 0 buckets", - multiBucketsAggregation.getBuckets().size(), + aggregation + " has 0 buckets", + response.evaluateArraySize("aggregations." + aggregation + ".buckets"), greaterThan(0) ); } @@ -1347,8 +1326,8 @@ private static void assertAggs(SearchResponse response) { } @SuppressWarnings("unchecked") - private static Map responseToMap(SearchResponse response) throws IOException { - BytesReference bytesReference = XContentHelper.toXContent(response, XContentType.JSON, false); + private static Map responseToMap(ObjectPath response) throws IOException { + BytesReference bytesReference = BytesReference.bytes(response.toXContentBuilder(XContentType.JSON.xContent())); Map responseMap = XContentHelper.convertToMap(bytesReference, false, XContentType.JSON).v2(); assertNotNull(responseMap.put("took", -1)); responseMap.remove("num_reduce_phases"); diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml new file mode 100644 index 0000000000000..6d6ee1f6bed41 --- /dev/null +++ b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml @@ -0,0 +1,214 @@ +setup: + - skip: + version: ' - 8.11.99' + reason: 'ingest simulate added in 8.12' + +--- +"Test ingest simulate with reroute": + + - skip: + features: headers + + - do: + headers: + Content-Type: application/json + ingest.put_pipeline: + id: "my-pipeline-1" + body: > + { + "processors": [ + { + "set": { + "field": "my-pipeline-1-ran", + "value": true + } + }, + { + "reroute": { + "destination": "index-2-a" + } + } + ] + } + - match: { acknowledged: true } + + - do: + headers: + Content-Type: application/json + ingest.put_pipeline: + id: "my-final-pipeline-1" + body: > + { + "processors": [ + { + "set": { + "field": "my-final-pipeline-1-ran", + "value": true + } + } + ] + } + - match: { acknowledged: true } + + - do: + indices.put_template: + name: my-template-1 + body: + index_patterns: index-1-* + settings: + default_pipeline: "my-pipeline-1" + final_pipeline: "my-final-pipeline-1" + + - do: + headers: + Content-Type: application/json + ingest.put_pipeline: + id: "my-pipeline-2" + body: > + { + "processors": [ + { + "set": { + "field": "my-pipeline-2-ran", + "value": true + } + } + ] + } + - match: { acknowledged: true } + + - do: + headers: + Content-Type: application/json + ingest.put_pipeline: + id: "my-final-pipeline-2" + body: > + { + "processors": [ + { + "set": { + "field": "my-final-pipeline-2-ran", + "value": true + } + }, + { + "uppercase": { + "field": "foo" + } + } + ] + } + - match: { acknowledged: true } + + - do: + indices.put_template: + name: my-template-2 + body: + index_patterns: index-2-* + settings: + default_pipeline: "my-pipeline-2" + final_pipeline: "my-final-pipeline-2" + + - do: + headers: + Content-Type: application/json + simulate.ingest: + body: > + { + "docs": [ + { + "_index": "index-1-a", + "_id": "id", + "_source": { + "foo": "bar" + } + }, + { + "_index": "index-1-a", + "_id": "id", + "_source": { + "foo": "rab" + } + } + ], + "pipeline_substitutions": { + "my-pipeline": { + "processors": [ + ] + } + } + } + - length: { docs: 2 } + - match: { docs.0.doc._index: "index-2-a" } + - match: { docs.0.doc._source.foo: "BAR" } + - match: { docs.0.doc._source.my-pipeline-1-ran: true } + - match: { docs.0.doc._source.my-final-pipeline-1-ran: null } + - match: { docs.0.doc._source.my-pipeline-2-ran: true } + - match: { docs.0.doc._source.my-final-pipeline-2-ran: true } + - match: { docs.0.doc.executed_pipelines: ["my-pipeline-1", "my-pipeline-2", "my-final-pipeline-2"] } + - match: { docs.0.doc._index: "index-2-a" } + - match: { docs.1.doc._source.foo: "RAB" } + - match: { docs.0.doc._source.my-pipeline-1-ran: true } + - match: { docs.0.doc._source.my-final-pipeline-1-ran: null } + - match: { docs.0.doc._source.my-pipeline-2-ran: true } + - match: { docs.0.doc._source.my-final-pipeline-2-ran: true } + - match: { docs.1.doc.executed_pipelines: ["my-pipeline-1", "my-pipeline-2", "my-final-pipeline-2"] } + +--- +"Test ingest simulate with errors": + + - skip: + features: headers + + - do: + headers: + Content-Type: application/json + ingest.put_pipeline: + id: "my-pipeline" + body: > + { + "processors": [ + { + "uppercase": { + "field": "field1" + } + } + ] + } + - match: { acknowledged: true } + + - do: + indices.create: + index: index + body: + settings: + default_pipeline: "my-pipeline" + + - do: + headers: + Content-Type: application/json + simulate.ingest: + body: > + { + "docs": [ + { + "_index": "index", + "_source": { + "field1": true + } + }, + { + "_index": "index", + "_source": { + "field1": "bar" + } + } + ] + } + - length: { docs: 2 } + - match: { docs.0.doc._index: "index" } + - match: { docs.0.doc.error.type: "illegal_argument_exception" } + - match: { docs.0.doc.executed_pipelines: null } + - match: { docs.1.doc._index: "index" } + - match: { docs.1.doc._source.field1: "BAR" } + - match: { docs.1.doc.executed_pipelines: ["my-pipeline"] } diff --git a/qa/smoke-test-multinode/src/yamlRestTest/resources/rest-api-spec/test/smoke_test_multinode/30_desired_balance.yml b/qa/smoke-test-multinode/src/yamlRestTest/resources/rest-api-spec/test/smoke_test_multinode/30_desired_balance.yml index afe66594a490b..92905243fdb12 100644 --- a/qa/smoke-test-multinode/src/yamlRestTest/resources/rest-api-spec/test/smoke_test_multinode/30_desired_balance.yml +++ b/qa/smoke-test-multinode/src/yamlRestTest/resources/rest-api-spec/test/smoke_test_multinode/30_desired_balance.yml @@ -200,4 +200,4 @@ setup: - gte: { 'stats.unassigned_shards' : 0 } - gte: { 'stats.total_allocations' : 0 } - gte: { 'stats.undesired_allocations' : 0 } - - gte: { 'stats.undesired_allocations_fraction' : 0.0 } + - gte: { 'stats.undesired_allocations_ratio' : 0.0 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/simulate.ingest.json b/rest-api-spec/src/main/resources/rest-api-spec/api/simulate.ingest.json new file mode 100644 index 0000000000000..91e7153d466da --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/simulate.ingest.json @@ -0,0 +1,48 @@ +{ + "simulate.ingest":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/simulate-ingest-api.html", + "description":"Simulates running ingest with example documents." + }, + "stability":"experimental", + "visibility":"public", + "headers":{ + "accept": [ "application/json"], + "content_type": ["application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_ingest/_simulate", + "methods":[ + "GET", + "POST" + ] + }, + { + "path":"/_ingest/{index}/_simulate", + "methods":[ + "GET", + "POST" + ], + "parts":{ + "index":{ + "type":"string", + "description":"Default index for docs which don't provide one" + } + } + } + ] + }, + "params":{ + "pipeline":{ + "type":"string", + "description":"The pipeline id to preprocess incoming documents with if no pipeline is given for a particular document" + } + }, + "body":{ + "description":"The simulate definition", + "required":true + } + } +} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_balance/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_balance/10_basic.yml index 4647c85ba9caf..a4204034bfd80 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_balance/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_balance/10_basic.yml @@ -235,4 +235,4 @@ setup: - gte: { 'stats.unassigned_shards' : 0 } - gte: { 'stats.total_allocations' : 0 } - gte: { 'stats.undesired_allocations' : 0 } - - gte: { 'stats.undesired_allocations_fraction' : 0.0 } + - gte: { 'stats.undesired_allocations_ratio' : 0.0 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/60_dense_vector_dynamic_mapping.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/60_dense_vector_dynamic_mapping.yml index 62d752b1efe88..f2a10b26ae553 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/60_dense_vector_dynamic_mapping.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/60_dense_vector_dynamic_mapping.yml @@ -4,6 +4,9 @@ setup: reason: 'Dynamic mapping of floats to dense_vector was added in 8.11' --- "Fields indexed as strings won't be transformed into dense_vector": + - skip: + version: ' - 8.11.0' + reason: 'Bug fix was added in 8.11.1' - do: index: index: strings-are-not-floats diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/simulate.ingest/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/simulate.ingest/10_basic.yml new file mode 100644 index 0000000000000..89011750479a0 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/simulate.ingest/10_basic.yml @@ -0,0 +1,340 @@ +setup: + - skip: + version: ' - 8.11.99' + reason: 'ingest simulate added in 8.12' + +--- +"Test no pipelines": + + - skip: + features: headers + + - do: + headers: + Content-Type: application/json + simulate.ingest: + body: > + { + "docs": [ + { + "_index": "index-1", + "_id": "id", + "_source": { + "foo": "bar" + } + }, + { + "_index": "index-2", + "_id": "id", + "_source": { + "foo": "rab" + } + } + ] + } + - length: { docs: 2 } + - match: { docs.0.doc._index: "index-1" } + - match: { docs.0.doc._source.foo: "bar" } + - match: { docs.0.doc.executed_pipelines: [] } + - match: { docs.1.doc._index: "index-2" } + - match: { docs.1.doc._source.foo: "rab" } + - match: { docs.1.doc.executed_pipelines: [] } + +--- +"Test existing index with pipelines": + + - skip: + features: headers + + - do: + headers: + Content-Type: application/json + ingest.put_pipeline: + id: "my-pipeline" + body: > + { + "processors": [ + ] + } + - match: { acknowledged: true } + + - do: + headers: + Content-Type: application/json + ingest.put_pipeline: + id: "my-final-pipeline" + body: > + { + "processors": [ + ] + } + - match: { acknowledged: true } + + - do: + indices.create: + index: index + body: + settings: + default_pipeline: "my-pipeline" + final_pipeline: "my-final-pipeline" + + - do: + headers: + Content-Type: application/json + simulate.ingest: + body: > + { + "docs": [ + { + "_index": "index", + "_id": "id", + "_source": { + "foo": "bar" + } + }, + { + "_index": "index", + "_id": "id", + "_source": { + "foo": "rab" + } + } + ], + "pipeline_substitutions": { + "my-pipeline": { + "processors": [ + ] + } + } + } + - length: { docs: 2 } + - match: { docs.0.doc._source.foo: "bar" } + - match: { docs.0.doc.executed_pipelines: ["my-pipeline", "my-final-pipeline"] } + - match: { docs.1.doc._source.foo: "rab" } + - match: { docs.1.doc.executed_pipelines: ["my-pipeline", "my-final-pipeline"] } + +--- +"Test index templates with pipelines": + + - skip: + features: headers + + - do: + headers: + Content-Type: application/json + ingest.put_pipeline: + id: "my-pipeline" + body: > + { + "processors": [ + ] + } + - match: { acknowledged: true } + + - do: + headers: + Content-Type: application/json + ingest.put_pipeline: + id: "my-final-pipeline" + body: > + { + "processors": [ + ] + } + - match: { acknowledged: true } + + - do: + indices.put_index_template: + name: my-template + body: + index_patterns: index-* + template: + settings: + default_pipeline: "my-pipeline" + final_pipeline: "my-final-pipeline" + + - do: + headers: + Content-Type: application/json + simulate.ingest: + body: > + { + "docs": [ + { + "_index": "index-1", + "_id": "id", + "_source": { + "foo": "bar" + } + }, + { + "_index": "index-1", + "_id": "id", + "_source": { + "foo": "rab" + } + } + ], + "pipeline_substitutions": { + "my-pipeline": { + "processors": [ + ] + } + } + } + - length: { docs: 2 } + - match: { docs.0.doc._index: "index-1" } + - match: { docs.0.doc._source.foo: "bar" } + - match: { docs.0.doc.executed_pipelines: ["my-pipeline", "my-final-pipeline"] } + - match: { docs.1.doc._index: "index-1" } + - match: { docs.1.doc._source.foo: "rab" } + - match: { docs.1.doc.executed_pipelines: ["my-pipeline", "my-final-pipeline"] } + +--- +"Test bad pipeline substitution": + + - skip: + features: headers + + - do: + headers: + Content-Type: application/json + ingest.put_pipeline: + id: "my-pipeline" + body: > + { + "processors": [ + ] + } + - match: { acknowledged: true } + + - do: + indices.put_index_template: + name: my-template + body: + index_patterns: index-* + template: + settings: + default_pipeline: "my-pipeline" + + - do: + catch: "request" + headers: + Content-Type: application/json + simulate.ingest: + body: > + { + "docs": [ + { + "_index": "index-1", + "_id": "id", + "_source": { + "foo": "bar" + } + }, + { + "_index": "index-1", + "_id": "id", + "_source": { + "foo": "rab" + } + } + ], + "pipeline_substitutions": { + "my-pipeline": { + "processors": [ + { + "non-existent-processor": { + } + } + ] + } + } + } + - match: { status: 500 } + +--- +"Test index in path": + + - skip: + features: headers + + - do: + headers: + Content-Type: application/json + simulate.ingest: + index: "test-index" + body: > + { + "docs": [ + { + "_id": "id", + "_source": { + "foo": "bar" + } + }, + { + "_id": "id", + "_source": { + "foo": "rab" + } + } + ] + } + - length: { docs: 2 } + - match: { docs.0.doc._index: "test-index" } + - match: { docs.0.doc._source.foo: "bar" } + - match: { docs.0.doc.executed_pipelines: [] } + - match: { docs.1.doc._index: "test-index" } + - match: { docs.1.doc._source.foo: "rab" } + - match: { docs.1.doc.executed_pipelines: [] } + +--- +"Test pipeline in query param": + + - skip: + features: headers + + - do: + headers: + Content-Type: application/json + ingest.put_pipeline: + id: "my-pipeline" + body: > + { + "processors": [ + ] + } + - match: { acknowledged: true } + + - do: + headers: + Content-Type: application/json + simulate.ingest: + pipeline: "my-pipeline" + body: > + { + "docs": [ + { + "_index": "index-1", + "_id": "id", + "_source": { + "foo": "bar" + } + }, + { + "_index": "index-2", + "_id": "id", + "_source": { + "foo": "rab" + } + } + ] + } + - length: { docs: 2 } + - match: { docs.0.doc._index: "index-1" } + - match: { docs.0.doc._source.foo: "bar" } + - match: { docs.0.doc.executed_pipelines: ["my-pipeline"] } + - match: { docs.1.doc._index: "index-2" } + - match: { docs.1.doc._source.foo: "rab" } + - match: { docs.1.doc.executed_pipelines: ["my-pipeline"] } diff --git a/server/build.gradle b/server/build.gradle index 0e154d2287b56..01879e232634b 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -141,9 +141,11 @@ sourceSets.main.compiledBy(generateModulesList, generatePluginsList) if (BuildParams.isSnapshotBuild() == false) { tasks.named("test").configure { systemProperty 'es.index_mode_feature_flag_registered', 'true' + systemProperty 'es.failure_store_feature_flag_enabled', 'true' } tasks.named("internalClusterTest").configure { systemProperty 'es.index_mode_feature_flag_registered', 'true' + systemProperty 'es.failure_store_feature_flag_enabled', 'true' } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java index ec01e34976058..3dd90b096c631 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.search; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.search.MultiSearchAction; @@ -50,6 +51,7 @@ import static org.hamcrest.Matchers.notNullValue; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE) +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/102257") public class SearchCancellationIT extends AbstractSearchCancellationTestCase { @Override diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java index ca522064e3d04..f91a848ed2362 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java @@ -39,6 +39,7 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.transport.RemoteTransportException; import java.io.IOException; import java.nio.file.Files; @@ -768,7 +769,18 @@ public void testQueuedOperationsAndBrokenRepoOnMasterFailOver() throws Exception ensureStableCluster(3); awaitNoMoreRunningOperations(); - expectThrows(RepositoryException.class, deleteFuture::actionGet); + var innerException = expectThrows(ExecutionException.class, RuntimeException.class, deleteFuture::get); + + // There may be many layers of RTE to unwrap here, see https://github.com/elastic/elasticsearch/issues/102351. + // ExceptionsHelper#unwrapCause gives up at 10 layers of wrapping so we must unwrap more tenaciously by hand here: + while (true) { + if (innerException instanceof RemoteTransportException remoteTransportException) { + innerException = asInstanceOf(RuntimeException.class, remoteTransportException.getCause()); + } else { + assertThat(innerException, instanceOf(RepositoryException.class)); + break; + } + } } public void testQueuedSnapshotOperationsAndBrokenRepoOnMasterFailOver() throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java b/server/src/internalClusterTest/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java index a68d56e05cb48..0277c569fbe5d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java @@ -7,20 +7,22 @@ */ package org.elasticsearch.versioning; +import org.apache.logging.log4j.Level; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.cluster.coordination.LinearizabilityChecker; import org.elasticsearch.cluster.coordination.LinearizabilityChecker.LinearizabilityCheckAborted; import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.logging.ChunkedLoggingStream; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.discovery.AbstractDisruptionTestCase; @@ -30,7 +32,7 @@ import java.io.FileInputStream; import java.io.IOException; -import java.util.ArrayList; +import java.io.OutputStream; import java.util.Arrays; import java.util.Base64; import java.util.List; @@ -435,16 +437,28 @@ public void assertLinearizable() { } catch (LinearizabilityCheckAborted e) { logger.warn("linearizability check check was aborted", e); } finally { - // implicitly test that we can serialize all histories. - String serializedHistory = base64Serialize(history); - if (linearizable == false) { - // we dump base64 encoded data, since the nature of this test is that it does not reproduce even with same seed. - logger.error( - "Linearizability check failed. Spec: {}, initial version: {}, serialized history: {}", - spec, - initialVersion, - serializedHistory - ); + try { + if (linearizable) { + // ensure that we can serialize all histories. + writeHistory(new OutputStreamStreamOutput(OutputStream.nullOutputStream()), history); + } else { + logger.error("Linearizability check failed. Spec: {}, initial version: {}", spec, initialVersion); + // we dump base64 encoded data, since the nature of this test is that it does not reproduce even with same seed. + try ( + var chunkedLoggingStream = ChunkedLoggingStream.create( + logger, + Level.ERROR, + "unlinearizable history", + ReferenceDocs.LOGGING // any old docs link will do + ); + var output = new OutputStreamStreamOutput(chunkedLoggingStream) + ) { + writeHistory(output, history); + } + } + } catch (IOException e) { + logger.error("failure writing out history", e); + fail(e); } } assertTrue("Must be linearizable", linearizable); @@ -623,31 +637,15 @@ private static Function missingResponseGenerator() { return input -> new FailureHistoryOutput(); } - private String base64Serialize(LinearizabilityChecker.History history) { - BytesStreamOutput output = new BytesStreamOutput(); - try { - List events = history.copyEvents(); - output.writeInt(events.size()); - for (LinearizabilityChecker.Event event : events) { - writeEvent(event, output); - } - output.close(); - return Base64.getEncoder().encodeToString(BytesReference.toBytes(output.bytes())); - } catch (IOException e) { - throw new RuntimeException(e); - } + private static void writeHistory(StreamOutput output, LinearizabilityChecker.History history) throws IOException { + output.writeCollection(history.copyEvents(), ConcurrentSeqNoVersioningIT::writeEvent); } private static LinearizabilityChecker.History readHistory(StreamInput input) throws IOException { - int size = input.readInt(); - List events = new ArrayList<>(size); - for (int i = 0; i < size; ++i) { - events.add(readEvent(input)); - } - return new LinearizabilityChecker.History(events); + return new LinearizabilityChecker.History(input.readCollectionAsList(ConcurrentSeqNoVersioningIT::readEvent)); } - private static void writeEvent(LinearizabilityChecker.Event event, BytesStreamOutput output) throws IOException { + private static void writeEvent(StreamOutput output, LinearizabilityChecker.Event event) throws IOException { output.writeEnum(event.type()); output.writeNamedWriteable((NamedWriteable) event.value()); output.writeInt(event.id()); diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index 0b899d3bafc3e..7f9328e2c08ab 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -407,6 +407,7 @@ with org.elasticsearch.features.FeatureInfrastructureFeatures, org.elasticsearch.health.HealthFeatures, + org.elasticsearch.cluster.service.TransportFeatures, org.elasticsearch.cluster.metadata.MetadataFeatures, org.elasticsearch.rest.RestFeatures, org.elasticsearch.indices.IndicesFeatures; diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 5bdc74b8f2545..baae500b70d55 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -169,6 +169,9 @@ static TransportVersion def(int id) { public static final TransportVersion INFERENCE_MULTIPLE_INPUTS = def(8_538_00_0); public static final TransportVersion ADDITIONAL_DESIRED_BALANCE_RECONCILIATION_STATS = def(8_539_00_0); public static final TransportVersion ML_STATE_CHANGE_TIMESTAMPS = def(8_540_00_0); + public static final TransportVersion DATA_STREAM_FAILURE_STORE_ADDED = def(8_541_00_0); + public static final TransportVersion ML_INFERENCE_OPENAI_ADDED = def(8_542_00_0); + public static final TransportVersion SHUTDOWN_MIGRATION_STATUS_INCLUDE_COUNTS = def(8_543_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index a855b6b8ee7e3..220090a132ec2 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -202,8 +202,10 @@ import org.elasticsearch.action.admin.indices.validate.query.TransportValidateQueryAction; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction; import org.elasticsearch.action.bulk.BulkAction; +import org.elasticsearch.action.bulk.SimulateBulkAction; import org.elasticsearch.action.bulk.TransportBulkAction; import org.elasticsearch.action.bulk.TransportShardBulkAction; +import org.elasticsearch.action.bulk.TransportSimulateBulkAction; import org.elasticsearch.action.delete.DeleteAction; import org.elasticsearch.action.delete.TransportDeleteAction; import org.elasticsearch.action.explain.ExplainAction; @@ -439,6 +441,7 @@ import org.elasticsearch.rest.action.ingest.RestDeletePipelineAction; import org.elasticsearch.rest.action.ingest.RestGetPipelineAction; import org.elasticsearch.rest.action.ingest.RestPutPipelineAction; +import org.elasticsearch.rest.action.ingest.RestSimulateIngestAction; import org.elasticsearch.rest.action.ingest.RestSimulatePipelineAction; import org.elasticsearch.rest.action.search.RestClearScrollAction; import org.elasticsearch.rest.action.search.RestCountAction; @@ -758,6 +761,7 @@ public void reg actions.register(MultiGetAction.INSTANCE, TransportMultiGetAction.class); actions.register(TransportShardMultiGetAction.TYPE, TransportShardMultiGetAction.class); actions.register(BulkAction.INSTANCE, TransportBulkAction.class); + actions.register(SimulateBulkAction.INSTANCE, TransportSimulateBulkAction.class); actions.register(TransportShardBulkAction.TYPE, TransportShardBulkAction.class); actions.register(SearchAction.INSTANCE, TransportSearchAction.class); actions.register(SearchScrollAction.INSTANCE, TransportSearchScrollAction.class); @@ -944,6 +948,7 @@ public void initRestHandlers(Supplier nodesInCluster, Predicate< registerHandler.accept(new RestGetComposableIndexTemplateAction()); registerHandler.accept(new RestDeleteComposableIndexTemplateAction()); registerHandler.accept(new RestSimulateIndexTemplateAction()); + registerHandler.accept(new RestSimulateIngestAction()); registerHandler.accept(new RestSimulateTemplateAction()); registerHandler.accept(new RestPutMappingAction()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java index af8637cf1febc..2b52a18fb4185 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java @@ -19,6 +19,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.indices.SystemIndices; @@ -61,6 +62,7 @@ public class TransportGetFeatureUpgradeStatusAction extends TransportMasterNodeA PersistentTasksService persistentTasksService; @Inject + @UpdateForV9 // Once we begin working on 9.x, we need to update our migration classes public TransportGetFeatureUpgradeStatusAction( TransportService transportService, ThreadPool threadPool, @@ -82,8 +84,6 @@ public TransportGetFeatureUpgradeStatusAction( EsExecutors.DIRECT_EXECUTOR_SERVICE ); - assert Version.CURRENT.major == 8 : "Once we begin working on 9.x, we need to update our migration classes"; - this.systemIndices = systemIndices; this.persistentTasksService = persistentTasksService; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java index ee6797ca58fb9..9d10065c9c3e9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; @@ -22,6 +23,7 @@ import java.io.IOException; import java.util.Map; +@UpdateForV9 // make this class a regular ActionRequest rather than a MasterNodeReadRequest public class GetAliasesRequest extends MasterNodeReadRequest implements AliasesRequest { public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.strictExpandHidden(); @@ -40,9 +42,10 @@ public GetAliasesRequest() {} /** * NB prior to 8.12 get-aliases was a TransportMasterNodeReadAction so for BwC we must remain able to read these requests until we no - * longer need to support {@link org.elasticsearch.TransportVersions#CLUSTER_FEATURES_ADDED} and earlier. Once we remove this we can - * also make this class a regular ActionRequest instead of a MasterNodeReadRequest. + * longer need to support calling this action remotely. Once we remove this we can also make this class a regular ActionRequest instead + * of a MasterNodeReadRequest. */ + @UpdateForV9 // remove this constructor public GetAliasesRequest(StreamInput in) throws IOException { super(in); indices = in.readStringArray(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java index c0e26b16585c4..edb05b0fcef75 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java @@ -12,6 +12,7 @@ import org.elasticsearch.cluster.metadata.AliasMetadata; import org.elasticsearch.cluster.metadata.DataStreamAlias; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.UpdateForV9; import java.io.IOException; import java.util.List; @@ -38,8 +39,9 @@ public Map> getDataStreamAliases() { /** * NB prior to 8.12 get-aliases was a TransportMasterNodeReadAction so for BwC we must remain able to write these responses until we no - * longer need to support {@link org.elasticsearch.TransportVersions#CLUSTER_FEATURES_ADDED} and earlier. + * longer need to support calling this action remotely. */ + @UpdateForV9 // replace this implementation with TransportAction.localOnly() @Override public void writeTo(StreamOutput out) throws IOException { out.writeMap(aliases, StreamOutput::writeCollection); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java index e43d1a825c233..9b9fb49c1bbe0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.indices.SystemIndices.SystemIndexAccessLevel; import org.elasticsearch.tasks.CancellableTask; @@ -41,9 +42,9 @@ /** * NB prior to 8.12 this was a TransportMasterNodeReadAction so for BwC it must be registered with the TransportService (i.e. a - * HandledTransportAction) until we no longer need to support {@link org.elasticsearch.TransportVersions#CLUSTER_FEATURES_ADDED} and - * earlier. + * HandledTransportAction) until we no longer need to support calling this action remotely. */ +@UpdateForV9 // remove the HandledTransportAction superclass, this action need not be registered with the TransportService public class TransportGetAliasesAction extends TransportLocalClusterStateAction { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(TransportGetAliasesAction.class); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java index 1cec71d2abe53..87334afa3ed8a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java @@ -50,6 +50,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -111,7 +112,7 @@ public TransportAction( this.taskQueue = clusterService.createTaskQueue("auto-create", Priority.URGENT, batchExecutionContext -> { final var listener = new AllocationActionMultiListener(threadPool.getThreadContext()); final var taskContexts = batchExecutionContext.taskContexts(); - final var successfulRequests = Maps.newMapWithExpectedSize(taskContexts.size()); + final var successfulRequests = Maps.>newMapWithExpectedSize(taskContexts.size()); var state = batchExecutionContext.initialState(); for (final var taskContext : taskContexts) { final var task = taskContext.getTask(); @@ -169,6 +170,13 @@ public void onFailure(Exception e) { private ClusterStateAckListener getAckListener( String indexName, AllocationActionMultiListener allocationActionMultiListener + ) { + return getAckListener(List.of(indexName), allocationActionMultiListener); + } + + private ClusterStateAckListener getAckListener( + List indexNames, + AllocationActionMultiListener allocationActionMultiListener ) { return new ClusterStateAckListener() { @Override @@ -180,22 +188,22 @@ public boolean mustAck(DiscoveryNode discoveryNode) { public void onAllNodesAcked() { ActiveShardsObserver.waitForActiveShards( clusterService, - new String[] { indexName }, + indexNames.toArray(String[]::new), ActiveShardCount.DEFAULT, request.timeout(), allocationActionMultiListener.delay(listener) - .map(shardsAcked -> new CreateIndexResponse(true, shardsAcked, indexName)) + .map(shardsAcked -> new CreateIndexResponse(true, shardsAcked, indexNames.get(0))) ); } @Override public void onAckFailure(Exception e) { - allocationActionMultiListener.delay(listener).onResponse(new CreateIndexResponse(false, false, indexName)); + allocationActionMultiListener.delay(listener).onResponse(new CreateIndexResponse(false, false, indexNames.get(0))); } @Override public void onAckTimeout() { - allocationActionMultiListener.delay(listener).onResponse(new CreateIndexResponse(false, false, indexName)); + allocationActionMultiListener.delay(listener).onResponse(new CreateIndexResponse(false, false, indexNames.get(0))); } @Override @@ -212,7 +220,7 @@ public TimeValue ackTimeout() { */ ClusterState execute( ClusterState currentState, - Map successfulRequests, + Map> successfulRequests, ClusterStateTaskExecutor.TaskContext taskContext, AllocationActionMultiListener allocationActionMultiListener ) throws Exception { @@ -255,9 +263,13 @@ ClusterState execute( rerouteCompletionIsNotRequired() ); - final var indexName = clusterState.metadata().dataStreams().get(request.index()).getIndices().get(0).getName(); - taskContext.success(getAckListener(indexName, allocationActionMultiListener)); - successfulRequests.put(request, indexName); + final var dataStream = clusterState.metadata().dataStreams().get(request.index()); + final var backingIndexName = dataStream.getIndices().get(0).getName(); + final var indexNames = dataStream.getFailureIndices().isEmpty() + ? List.of(backingIndexName) + : List.of(backingIndexName, dataStream.getFailureIndices().get(0).getName()); + taskContext.success(getAckListener(indexNames, allocationActionMultiListener)); + successfulRequests.put(request, indexNames); return clusterState; } else { final var indexName = IndexNameExpressionResolver.resolveDateMathExpression(request.index()); @@ -272,7 +284,7 @@ ClusterState execute( if (shouldAutoCreate == false) { // The index already exists. taskContext.success(getAckListener(indexName, allocationActionMultiListener)); - successfulRequests.put(request, indexName); + successfulRequests.put(request, List.of(indexName)); return currentState; } } @@ -318,7 +330,7 @@ ClusterState execute( rerouteCompletionIsNotRequired() ); taskContext.success(getAckListener(indexName, allocationActionMultiListener)); - successfulRequests.put(request, indexName); + successfulRequests.put(request, List.of(indexName)); return clusterState; } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java index 76259d899c90a..c2b6c666d829a 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java @@ -15,6 +15,7 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.ingest.SimulateIndexResponse; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -505,7 +506,9 @@ public void writeThin(StreamOutput out) throws IOException { } private void writeResponseType(StreamOutput out) throws IOException { - if (response instanceof IndexResponse) { + if (response instanceof SimulateIndexResponse) { + out.writeByte((byte) 4); + } else if (response instanceof IndexResponse) { out.writeByte((byte) 0); } else if (response instanceof DeleteResponse) { out.writeByte((byte) 1); @@ -523,6 +526,7 @@ private static DocWriteResponse readResponse(ShardId shardId, StreamInput in) th case 1 -> new DeleteResponse(shardId, in); case 2 -> null; case 3 -> new UpdateResponse(shardId, in); + case 4 -> new SimulateIndexResponse(in); default -> throw new IllegalArgumentException("Unexpected type [" + type + "]"); }; } @@ -534,6 +538,7 @@ private static DocWriteResponse readResponse(StreamInput in) throws IOException case 1 -> new DeleteResponse(in); case 2 -> null; case 3 -> new UpdateResponse(in); + case 4 -> new SimulateIndexResponse(in); default -> throw new IllegalArgumentException("Unexpected type [" + type + "]"); }; } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/SimulateBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/SimulateBulkAction.java new file mode 100644 index 0000000000000..a799c60fe7b38 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/bulk/SimulateBulkAction.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.bulk; + +import org.elasticsearch.action.ActionType; + +public class SimulateBulkAction extends ActionType { + + public static final SimulateBulkAction INSTANCE = new SimulateBulkAction(); + public static final String NAME = "indices:data/write/simulate/bulk"; + + private SimulateBulkAction() { + super(NAME, BulkResponse::new); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/bulk/SimulateBulkRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/SimulateBulkRequest.java new file mode 100644 index 0000000000000..c167c88954b38 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/bulk/SimulateBulkRequest.java @@ -0,0 +1,81 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.bulk; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; + +import java.io.IOException; +import java.util.Map; + +/** + * This extends BulkRequest with support for providing substitute pipeline definitions. In a user request, the pipeline substitutions + * will look something like this: + * + * "pipeline_substitutions": { + * "my-pipeline-1": { + * "processors": [ + * { + * "set": { + * "field": "my-new-boolean-field", + * "value": true + * } + * } + * ] + * }, + * "my-pipeline-2": { + * "processors": [ + * { + * "set": { + * "field": "my-new-boolean-field", + * "value": true + * }, + * "rename": { + * "field": "old_field", + * "target_field": "new field" + * } + * } + * ] + * } + * } + * + * The pipelineSubstitutions Map held by this class is intended to be the result of XContentHelper.convertToMap(). The top-level keys + * are the pipelineIds ("my-pipeline-1" and "my-pipeline-2" in the example above). The values are the Maps of "processors" to the List of + * processor definitions. + */ +public class SimulateBulkRequest extends BulkRequest { + private final Map> pipelineSubstitutions; + + /** + * @param pipelineSubstitutions The pipeline definitions that are to be used in place of any pre-existing pipeline definitions with + * the same pipelineId. The key of the map is the pipelineId, and the value the pipeline definition as + * parsed by XContentHelper.convertToMap(). + */ + public SimulateBulkRequest(@Nullable Map> pipelineSubstitutions) { + super(); + this.pipelineSubstitutions = pipelineSubstitutions; + } + + @SuppressWarnings("unchecked") + public SimulateBulkRequest(StreamInput in) throws IOException { + super(in); + this.pipelineSubstitutions = (Map>) in.readGenericValue(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeGenericValue(pipelineSubstitutions); + } + + public Map> getPipelineSubstitutions() { + return pipelineSubstitutions; + } +} diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index f11baec87de9b..b89b5e2de7924 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -17,6 +17,7 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteRequest.OpType; import org.elasticsearch.action.DocWriteResponse; @@ -45,6 +46,7 @@ import org.elasticsearch.cluster.routing.IndexRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Assertions; @@ -92,6 +94,7 @@ public class TransportBulkAction extends HandledTransportAction bulkAction; private final ThreadPool threadPool; private final ClusterService clusterService; private final IngestService ingestService; @@ -141,8 +144,39 @@ public TransportBulkAction( SystemIndices systemIndices, LongSupplier relativeTimeProvider ) { - super(BulkAction.NAME, transportService, actionFilters, BulkRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); + this( + BulkAction.INSTANCE, + BulkRequest::new, + threadPool, + transportService, + clusterService, + ingestService, + client, + actionFilters, + indexNameExpressionResolver, + indexingPressure, + systemIndices, + relativeTimeProvider + ); + } + + TransportBulkAction( + ActionType bulkAction, + Writeable.Reader requestReader, + ThreadPool threadPool, + TransportService transportService, + ClusterService clusterService, + IngestService ingestService, + NodeClient client, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + IndexingPressure indexingPressure, + SystemIndices systemIndices, + LongSupplier relativeTimeProvider + ) { + super(bulkAction.name(), transportService, actionFilters, requestReader, EsExecutors.DIRECT_EXECUTOR_SERVICE); Objects.requireNonNull(relativeTimeProvider); + this.bulkAction = bulkAction; this.threadPool = threadPool; this.clusterService = clusterService; this.ingestService = ingestService; @@ -267,7 +301,6 @@ protected void doRun() { protected void doInternalExecute(Task task, BulkRequest bulkRequest, String executorName, ActionListener listener) { final long startTime = relativeTime(); - final AtomicArray responses = new AtomicArray<>(bulkRequest.requests.size()); boolean hasIndexRequestsWithPipelines = false; final Metadata metadata = clusterService.state().getMetadata(); @@ -301,7 +334,7 @@ protected void doInternalExecute(Task task, BulkRequest bulkRequest, String exec if (clusterService.localNode().isIngestNode()) { processBulkIndexIngestRequest(task, bulkRequest, executorName, l); } else { - ingestForwarder.forwardIngestRequest(BulkAction.INSTANCE, bulkRequest, l); + ingestForwarder.forwardIngestRequest(bulkAction, bulkRequest, l); } }); return; @@ -333,6 +366,30 @@ protected void doInternalExecute(Task task, BulkRequest bulkRequest, String exec } // Step 3: create all the indices that are missing, if there are any missing. start the bulk after all the creates come back. + createMissingIndicesAndIndexData( + task, + bulkRequest, + executorName, + listener, + autoCreateIndices, + indicesThatCannotBeCreated, + startTime + ); + } + + /* + * This method is responsible for creating any missing indices and indexing the data in the BulkRequest + */ + protected void createMissingIndicesAndIndexData( + Task task, + BulkRequest bulkRequest, + String executorName, + ActionListener listener, + Set autoCreateIndices, + Map indicesThatCannotBeCreated, + long startTime + ) { + final AtomicArray responses = new AtomicArray<>(bulkRequest.requests.size()); if (autoCreateIndices.isEmpty()) { executeBulk(task, bulkRequest, startTime, listener, executorName, responses, indicesThatCannotBeCreated); } else { @@ -383,6 +440,14 @@ protected void doRun() { } } + /* + * This returns the IngestService to be used for the given request. The default implementation ignores the request and always returns + * the same ingestService, but child classes might use information in the request in creating an IngestService specific to that request. + */ + protected IngestService getIngestService(BulkRequest request) { + return ingestService; + } + static void prohibitAppendWritesInBackingIndices(DocWriteRequest writeRequest, Metadata metadata) { DocWriteRequest.OpType opType = writeRequest.opType(); if ((opType == OpType.CREATE || opType == OpType.INDEX) == false) { @@ -488,7 +553,7 @@ private static boolean setResponseFailureIfIndexMatches( return false; } - private long buildTookInMillis(long startTimeNanos) { + protected long buildTookInMillis(long startTimeNanos) { return TimeUnit.NANOSECONDS.toMillis(relativeTime() - startTimeNanos); } @@ -806,7 +871,7 @@ private void processBulkIndexIngestRequest( ) { final long ingestStartTimeInNanos = System.nanoTime(); final BulkRequestModifier bulkRequestModifier = new BulkRequestModifier(original); - ingestService.executeBulkRequest( + getIngestService(original).executeBulkRequest( original.numberOfActions(), () -> bulkRequestModifier, bulkRequestModifier::markItemAsDropped, diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java new file mode 100644 index 0000000000000..7e2fef88c7680 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java @@ -0,0 +1,108 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.bulk; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.ingest.SimulateIndexResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.IndexingPressure; +import org.elasticsearch.indices.SystemIndices; +import org.elasticsearch.ingest.IngestService; +import org.elasticsearch.ingest.SimulateIngestService; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.util.Map; +import java.util.Set; + +public class TransportSimulateBulkAction extends TransportBulkAction { + @Inject + public TransportSimulateBulkAction( + ThreadPool threadPool, + TransportService transportService, + ClusterService clusterService, + IngestService ingestService, + NodeClient client, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + IndexingPressure indexingPressure, + SystemIndices systemIndices + ) { + super( + SimulateBulkAction.INSTANCE, + SimulateBulkRequest::new, + threadPool, + transportService, + clusterService, + ingestService, + client, + actionFilters, + indexNameExpressionResolver, + indexingPressure, + systemIndices, + System::nanoTime + ); + } + + /* + * This overrides indexData in TransportBulkAction in order to _not_ actually create any indices or index any data. Instead, each + * request gets a corresponding CREATE response, using information from the request. + */ + @Override + protected void createMissingIndicesAndIndexData( + Task task, + BulkRequest bulkRequest, + String executorName, + ActionListener listener, + Set autoCreateIndices, + Map indicesThatCannotBeCreated, + long startTime + ) { + final AtomicArray responses = new AtomicArray<>(bulkRequest.requests.size()); + for (int i = 0; i < bulkRequest.requests.size(); i++) { + DocWriteRequest request = bulkRequest.requests.get(i); + assert request instanceof IndexRequest; // This action is only ever called with IndexRequests + responses.set( + i, + BulkItemResponse.success( + 0, + DocWriteRequest.OpType.CREATE, + new SimulateIndexResponse( + request.id(), + request.index(), + request.version(), + ((IndexRequest) request).source(), + ((IndexRequest) request).getContentType(), + ((IndexRequest) request).getExecutedPipelines() + ) + ) + ); + } + listener.onResponse(new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTime))); + } + + /* + * This overrides TransportSimulateBulkAction's getIngestService to allow us to provide an IngestService that handles pipeline + * substitutions defined in the request. + */ + @Override + protected IngestService getIngestService(BulkRequest request) { + IngestService rawIngestService = super.getIngestService(request); + return new SimulateIngestService(rawIngestService, request); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java index 9c1fb63a6b8d0..7530fc18acb59 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java @@ -307,6 +307,24 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params, @Nulla builder.endArray(); } builder.field(DataStream.GENERATION_FIELD.getPreferredName(), dataStream.getGeneration()); + if (DataStream.isFailureStoreEnabled()) { + builder.field(DataStream.FAILURE_INDICES_FIELD.getPreferredName()); + builder.startArray(); + for (Index failureStore : dataStream.getFailureIndices()) { + builder.startObject(); + failureStore.toXContentFragment(builder); + IndexProperties indexProperties = indexSettingsValues.get(failureStore); + if (indexProperties != null) { + builder.field(PREFER_ILM.getPreferredName(), indexProperties.preferIlm()); + if (indexProperties.ilmPolicyName() != null) { + builder.field(ILM_POLICY_FIELD.getPreferredName(), indexProperties.ilmPolicyName()); + } + builder.field(MANAGED_BY.getPreferredName(), indexProperties.managedBy.displayValue); + } + builder.endObject(); + } + builder.endArray(); + } if (dataStream.getMetadata() != null) { builder.field(DataStream.METADATA_FIELD.getPreferredName(), dataStream.getMetadata()); } @@ -327,6 +345,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params, @Nulla builder.field(SYSTEM_FIELD.getPreferredName(), dataStream.isSystem()); builder.field(ALLOW_CUSTOM_ROUTING.getPreferredName(), dataStream.isAllowCustomRouting()); builder.field(REPLICATED.getPreferredName(), dataStream.isReplicated()); + if (DataStream.isFailureStoreEnabled()) { + builder.field(DataStream.FAILURE_STORE_FIELD.getPreferredName(), dataStream.isFailureStore()); + } if (timeSeries != null) { builder.startObject(TIME_SERIES.getPreferredName()); builder.startArray(TEMPORAL_RANGES.getPreferredName()); diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java b/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java index 9dccdfc64620e..a9c0c8ef42380 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java @@ -37,7 +37,7 @@ public class IndexResponse extends DocWriteResponse { * information about the pipelines executed. An empty list means that there were no pipelines executed. */ @Nullable - private final List executedPipelines; + protected final List executedPipelines; public IndexResponse(ShardId shardId, StreamInput in) throws IOException { super(shardId, in); diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulateIndexResponse.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulateIndexResponse.java new file mode 100644 index 0000000000000..3363f3caa164b --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulateIndexResponse.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.ingest; + +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.support.replication.ReplicationResponse; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.List; + +/** + * This is an IndexResponse that is specifically for simulate requests. Unlike typical IndexResponses, we need to include the original + * source in a SimulateIndexResponse, and don't need most other fields. This has to extend IndexResponse though so that it can be used by + * BulkItemResponse in IngestService. + */ +public class SimulateIndexResponse extends IndexResponse { + private final BytesReference source; + private final XContentType sourceXContentType; + + @SuppressWarnings("this-escape") + public SimulateIndexResponse(StreamInput in) throws IOException { + super(in); + this.source = in.readBytesReference(); + this.sourceXContentType = XContentType.valueOf(in.readString()); + setShardInfo(new ReplicationResponse.ShardInfo(0, 0)); + } + + @SuppressWarnings("this-escape") + public SimulateIndexResponse( + String id, + String index, + long version, + BytesReference source, + XContentType sourceXContentType, + List pipelines + ) { + // We don't actually care about most of the IndexResponse fields: + super(new ShardId(index, "", 0), id == null ? "" : id, 0, 0, version, true, pipelines); + this.source = source; + this.sourceXContentType = sourceXContentType; + setShardInfo(new ReplicationResponse.ShardInfo(0, 0)); + } + + @Override + public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException { + builder.field("_id", getId()); + builder.field("_index", getShardId().getIndexName()); + builder.field("_version", getVersion()); + builder.field("_source", XContentHelper.convertToMap(source, false, sourceXContentType).v2()); + assert executedPipelines != null : "executedPipelines is null when it shouldn't be - we always list pipelines in simulate mode"; + builder.array("executed_pipelines", executedPipelines.toArray()); + return builder; + } + + @Override + public RestStatus status() { + return RestStatus.CREATED; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBytesReference(source); + out.writeString(sourceXContentType.name()); + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("SimulateIndexResponse["); + builder.append("index=").append(getIndex()); + try { + builder.append(",source=").append(XContentHelper.convertToJson(source, false, sourceXContentType)); + } catch (IOException e) { + throw new RuntimeException(e); + } + builder.append(",pipelines=[").append(String.join(", ", executedPipelines)); + return builder.append("]]").toString(); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java index e010e840d3f2d..128281ead4046 100644 --- a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java @@ -95,7 +95,7 @@ public void run() { connection, querySearchRequest, context.getTask(), - new SearchActionListener(shardTarget, shardIndex) { + new SearchActionListener<>(shardTarget, shardIndex) { @Override protected void innerOnResponse(QuerySearchResult response) { diff --git a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java index d0a4ca14ee4f3..afffde13cf641 100644 --- a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java @@ -9,10 +9,8 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.search.ScoreDoc; -import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AtomicArray; -import org.elasticsearch.search.RescoreDocIds; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.dfs.AggregatedDfs; @@ -20,7 +18,6 @@ import org.elasticsearch.search.fetch.ShardFetchSearchRequest; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchContextId; -import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.transport.Transport; @@ -99,40 +96,32 @@ public void onFailure(Exception e) { private void innerRun() throws Exception { final int numShards = context.getNumShards(); - final boolean isScrollSearch = context.getRequest().scroll() != null; - final List phaseResults = queryResults.asList(); final SearchPhaseController.ReducedQueryPhase reducedQueryPhase = resultConsumer.reduce(); // Usually when there is a single shard, we force the search type QUERY_THEN_FETCH. But when there's kNN, we might // still use DFS_QUERY_THEN_FETCH, which does not perform the "query and fetch" optimization during the query phase. final boolean queryAndFetchOptimization = queryResults.length() == 1 && context.getRequest().hasKnnSearch() == false && reducedQueryPhase.rankCoordinatorContext() == null; - final Runnable finishPhase = () -> moveToNextPhase( - queryResults, - reducedQueryPhase, - queryAndFetchOptimization ? queryResults : fetchResults.getAtomicArray() - ); if (queryAndFetchOptimization) { - assert phaseResults.isEmpty() || phaseResults.get(0).fetchResult() != null - : "phaseResults empty [" + phaseResults.isEmpty() + "], single result: " + phaseResults.get(0).fetchResult(); + assert assertConsistentWithQueryAndFetchOptimization(); // query AND fetch optimization - finishPhase.run(); + moveToNextPhase(reducedQueryPhase, queryResults); } else { ScoreDoc[] scoreDocs = reducedQueryPhase.sortedTopDocs().scoreDocs(); - final List[] docIdsToLoad = SearchPhaseController.fillDocIdsToLoad(numShards, scoreDocs); // no docs to fetch -- sidestep everything and return if (scoreDocs.length == 0) { // we have to release contexts here to free up resources - phaseResults.stream().map(SearchPhaseResult::queryResult).forEach(this::releaseIrrelevantSearchContext); - finishPhase.run(); + queryResults.asList().stream().map(SearchPhaseResult::queryResult).forEach(this::releaseIrrelevantSearchContext); + moveToNextPhase(reducedQueryPhase, fetchResults.getAtomicArray()); } else { - final ScoreDoc[] lastEmittedDocPerShard = isScrollSearch + final ScoreDoc[] lastEmittedDocPerShard = context.getRequest().scroll() != null ? SearchPhaseController.getLastEmittedDocPerShard(reducedQueryPhase, numShards) : null; + final List[] docIdsToLoad = SearchPhaseController.fillDocIdsToLoad(numShards, scoreDocs); final CountedCollector counter = new CountedCollector<>( fetchResults, docIdsToLoad.length, // we count down every shard in the result no matter if we got any results or not - finishPhase, + () -> moveToNextPhase(reducedQueryPhase, fetchResults.getAtomicArray()), context ); for (int i = 0; i < docIdsToLoad.length; i++) { @@ -149,66 +138,43 @@ private void innerRun() throws Exception { // in any case we count down this result since we don't talk to this shard anymore counter.countDown(); } else { - SearchShardTarget shardTarget = queryResult.getSearchShardTarget(); - Transport.Connection connection = context.getConnection(shardTarget.getClusterAlias(), shardTarget.getNodeId()); - ShardFetchSearchRequest fetchSearchRequest = createFetchRequest( - queryResult.queryResult().getContextId(), - i, - entry, - lastEmittedDocPerShard, - context.getOriginalIndices(queryResult.getShardIndex()), - queryResult.getShardSearchRequest(), - queryResult.getRescoreDocIds() - ); - executeFetch( - queryResult.getShardIndex(), - shardTarget, - counter, - fetchSearchRequest, - queryResult.queryResult(), - connection - ); + executeFetch(queryResult, counter, entry, (lastEmittedDocPerShard != null) ? lastEmittedDocPerShard[i] : null); } } } } } - protected ShardFetchSearchRequest createFetchRequest( - ShardSearchContextId contextId, - int index, - List entry, - ScoreDoc[] lastEmittedDocPerShard, - OriginalIndices originalIndices, - ShardSearchRequest shardSearchRequest, - RescoreDocIds rescoreDocIds - ) { - final ScoreDoc lastEmittedDoc = (lastEmittedDocPerShard != null) ? lastEmittedDocPerShard[index] : null; - return new ShardFetchSearchRequest( - originalIndices, - contextId, - shardSearchRequest, - entry, - lastEmittedDoc, - rescoreDocIds, - aggregatedDfs - ); + private boolean assertConsistentWithQueryAndFetchOptimization() { + var phaseResults = queryResults.asList(); + assert phaseResults.isEmpty() || phaseResults.get(0).fetchResult() != null + : "phaseResults empty [" + phaseResults.isEmpty() + "], single result: " + phaseResults.get(0).fetchResult(); + return true; } private void executeFetch( - final int shardIndex, - final SearchShardTarget shardTarget, + SearchPhaseResult queryResult, final CountedCollector counter, - final ShardFetchSearchRequest fetchSearchRequest, - final QuerySearchResult querySearchResult, - final Transport.Connection connection + final List entry, + ScoreDoc lastEmittedDocForShard ) { + final SearchShardTarget shardTarget = queryResult.getSearchShardTarget(); + final int shardIndex = queryResult.getShardIndex(); + final ShardSearchContextId contextId = queryResult.queryResult().getContextId(); context.getSearchTransport() .sendExecuteFetch( - connection, - fetchSearchRequest, + context.getConnection(shardTarget.getClusterAlias(), shardTarget.getNodeId()), + new ShardFetchSearchRequest( + context.getOriginalIndices(queryResult.getShardIndex()), + contextId, + queryResult.getShardSearchRequest(), + entry, + lastEmittedDocForShard, + queryResult.getRescoreDocIds(), + aggregatedDfs + ), context.getTask(), - new SearchActionListener(shardTarget, shardIndex) { + new SearchActionListener<>(shardTarget, shardIndex) { @Override public void innerOnResponse(FetchSearchResult result) { try { @@ -222,14 +188,14 @@ public void innerOnResponse(FetchSearchResult result) { @Override public void onFailure(Exception e) { try { - logger.debug(() -> "[" + fetchSearchRequest.contextId() + "] Failed to execute fetch phase", e); + logger.debug(() -> "[" + contextId + "] Failed to execute fetch phase", e); progressListener.notifyFetchFailure(shardIndex, shardTarget, e); counter.onFailure(shardIndex, shardTarget, e); } finally { // the search context might not be cleared on the node where the fetch was executed for example // because the action was rejected by the thread pool. in this case we need to send a dedicated // request to clear the search context. - releaseIrrelevantSearchContext(querySearchResult); + releaseIrrelevantSearchContext(queryResult.queryResult()); } } } @@ -260,16 +226,14 @@ private void releaseIrrelevantSearchContext(QuerySearchResult queryResult) { } private void moveToNextPhase( - AtomicArray queryPhaseResults, SearchPhaseController.ReducedQueryPhase reducedQueryPhase, AtomicArray fetchResultsArr ) { final InternalSearchResponse internalResponse = SearchPhaseController.merge( context.getRequest().scroll() != null, reducedQueryPhase, - fetchResultsArr.asList(), - fetchResultsArr::get + fetchResultsArr ); - context.executeNextPhase(this, nextPhaseFactory.apply(internalResponse, queryPhaseResults)); + context.executeNextPhase(this, nextPhaseFactory.apply(internalResponse, queryResults)); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index 5af5c4c2ec602..0662e94b519d9 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.util.Maps; +import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.lucene.grouping.TopFieldGroups; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.SearchHit; @@ -60,7 +61,6 @@ import java.util.concurrent.Executor; import java.util.function.BiFunction; import java.util.function.Consumer; -import java.util.function.IntFunction; import java.util.function.Supplier; public final class SearchPhaseController { @@ -351,52 +351,58 @@ public static List[] fillDocIdsToLoad(int numShards, ScoreDoc[] shardDo public static InternalSearchResponse merge( boolean ignoreFrom, ReducedQueryPhase reducedQueryPhase, - Collection fetchResults, - IntFunction resultsLookup + AtomicArray fetchResultsArray ) { if (reducedQueryPhase.isEmptyResult) { return InternalSearchResponse.EMPTY_WITH_TOTAL_HITS; } ScoreDoc[] sortedDocs = reducedQueryPhase.sortedTopDocs.scoreDocs; - SearchHits hits = getHits(reducedQueryPhase, ignoreFrom, fetchResults, resultsLookup); - if (reducedQueryPhase.suggest != null) { - if (fetchResults.isEmpty() == false) { - int currentOffset = hits.getHits().length; - for (CompletionSuggestion suggestion : reducedQueryPhase.suggest.filter(CompletionSuggestion.class)) { - final List suggestionOptions = suggestion.getOptions(); - for (int scoreDocIndex = currentOffset; scoreDocIndex < currentOffset + suggestionOptions.size(); scoreDocIndex++) { - ScoreDoc shardDoc = sortedDocs[scoreDocIndex]; - SearchPhaseResult searchResultProvider = resultsLookup.apply(shardDoc.shardIndex); - if (searchResultProvider == null) { - // this can happen if we are hitting a shard failure during the fetch phase - // in this case we referenced the shard result via the ScoreDoc but never got a - // result from fetch. - // TODO it would be nice to assert this in the future - continue; - } - FetchSearchResult fetchResult = searchResultProvider.fetchResult(); - final int index = fetchResult.counterGetAndIncrement(); - assert index < fetchResult.hits().getHits().length - : "not enough hits fetched. index [" + index + "] length: " + fetchResult.hits().getHits().length; - SearchHit hit = fetchResult.hits().getHits()[index]; - CompletionSuggestion.Entry.Option suggestOption = suggestionOptions.get(scoreDocIndex - currentOffset); - hit.score(shardDoc.score); - hit.shard(fetchResult.getSearchShardTarget()); - suggestOption.setHit(hit); - } - currentOffset += suggestionOptions.size(); + var fetchResults = fetchResultsArray.asList(); + SearchHits hits = getHits(reducedQueryPhase, ignoreFrom, fetchResultsArray); + if (reducedQueryPhase.suggest != null && fetchResults.isEmpty() == false) { + mergeSuggest(reducedQueryPhase, fetchResultsArray, hits, sortedDocs); + } + return reducedQueryPhase.buildResponse(hits, fetchResults); + } + + private static void mergeSuggest( + ReducedQueryPhase reducedQueryPhase, + AtomicArray fetchResultsArray, + SearchHits hits, + ScoreDoc[] sortedDocs + ) { + int currentOffset = hits.getHits().length; + for (CompletionSuggestion suggestion : reducedQueryPhase.suggest.filter(CompletionSuggestion.class)) { + final List suggestionOptions = suggestion.getOptions(); + for (int scoreDocIndex = currentOffset; scoreDocIndex < currentOffset + suggestionOptions.size(); scoreDocIndex++) { + ScoreDoc shardDoc = sortedDocs[scoreDocIndex]; + SearchPhaseResult searchResultProvider = fetchResultsArray.get(shardDoc.shardIndex); + if (searchResultProvider == null) { + // this can happen if we are hitting a shard failure during the fetch phase + // in this case we referenced the shard result via the ScoreDoc but never got a + // result from fetch. + // TODO it would be nice to assert this in the future + continue; } - assert currentOffset == sortedDocs.length : "expected no more score doc slices"; + FetchSearchResult fetchResult = searchResultProvider.fetchResult(); + final int index = fetchResult.counterGetAndIncrement(); + assert index < fetchResult.hits().getHits().length + : "not enough hits fetched. index [" + index + "] length: " + fetchResult.hits().getHits().length; + SearchHit hit = fetchResult.hits().getHits()[index]; + CompletionSuggestion.Entry.Option suggestOption = suggestionOptions.get(scoreDocIndex - currentOffset); + hit.score(shardDoc.score); + hit.shard(fetchResult.getSearchShardTarget()); + suggestOption.setHit(hit); } + currentOffset += suggestionOptions.size(); } - return reducedQueryPhase.buildResponse(hits, fetchResults); + assert currentOffset == sortedDocs.length : "expected no more score doc slices"; } private static SearchHits getHits( ReducedQueryPhase reducedQueryPhase, boolean ignoreFrom, - Collection fetchResults, - IntFunction resultsLookup + AtomicArray fetchResultsArray ) { SortedTopDocs sortedTopDocs = reducedQueryPhase.sortedTopDocs; int sortScoreIndex = -1; @@ -408,6 +414,7 @@ private static SearchHits getHits( } } } + var fetchResults = fetchResultsArray.asList(); // clean the fetch counter for (SearchPhaseResult entry : fetchResults) { entry.fetchResult().initCounter(); @@ -422,7 +429,7 @@ private static SearchHits getHits( if (fetchResults.isEmpty() == false) { for (int i = 0; i < numSearchHits; i++) { ScoreDoc shardDoc = sortedTopDocs.scoreDocs[i]; - SearchPhaseResult fetchResultProvider = resultsLookup.apply(shardDoc.shardIndex); + SearchPhaseResult fetchResultProvider = fetchResultsArray.get(shardDoc.shardIndex); if (fetchResultProvider == null) { // this can happen if we are hitting a shard failure during the fetch phase // in this case we referenced the shard result via the ScoreDoc but never got a @@ -737,7 +744,7 @@ public record ReducedQueryPhase( /** * Creates a new search response from the given merged hits. - * @see #merge(boolean, ReducedQueryPhase, Collection, IntFunction) + * @see #merge(boolean, ReducedQueryPhase, AtomicArray) */ public InternalSearchResponse buildResponse(SearchHits hits, Collection fetchResults) { return new InternalSearchResponse( @@ -753,10 +760,8 @@ public InternalSearchResponse buildResponse(SearchHits hits, Collection fetchResults) { if (profileBuilder == null) { - assert fetchResults.stream() - .map(SearchPhaseResult::fetchResult) - .filter(r -> r != null) - .allMatch(r -> r.profileResult() == null) : "found fetch profile without search profile"; + assert fetchResults.stream().map(SearchPhaseResult::fetchResult).allMatch(r -> r == null || r.profileResult() == null) + : "found fetch profile without search profile"; return null; } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java index df16c107a2619..fc1ccfb00d6ce 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java @@ -240,12 +240,7 @@ protected final void sendResponse( final AtomicArray fetchResults ) { try { - final InternalSearchResponse internalResponse = SearchPhaseController.merge( - true, - queryPhase, - fetchResults.asList(), - fetchResults::get - ); + final InternalSearchResponse internalResponse = SearchPhaseController.merge(true, queryPhase, fetchResults); // the scroll ID never changes we always return the same ID. This ID contains all the shards and their context ids // such that we can talk to them again in the next roundtrip. String scrollId = null; diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java index bf6517e97a842..9600561ac7ea3 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java @@ -99,7 +99,7 @@ public void run() { connection, shardFetchRequest, task, - new SearchActionListener(querySearchResult.getSearchShardTarget(), index) { + new SearchActionListener<>(querySearchResult.getSearchShardTarget(), index) { @Override protected void innerOnResponse(FetchSearchResult response) { fetchResults.setOnce(response.getShardIndex(), response); diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java index b109f67b7fa41..5f682804a5b88 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -77,6 +77,7 @@ import org.elasticsearch.snapshots.SnapshotsInfoService; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskResultsService; +import org.elasticsearch.telemetry.TelemetryProvider; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.upgrades.FeatureMigrationResults; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -126,7 +127,8 @@ public ClusterModule( SnapshotsInfoService snapshotsInfoService, ThreadPool threadPool, SystemIndices systemIndices, - WriteLoadForecaster writeLoadForecaster + WriteLoadForecaster writeLoadForecaster, + TelemetryProvider telemetryProvider ) { this.clusterPlugins = clusterPlugins; this.deciderList = createAllocationDeciders(settings, clusterService.getClusterSettings(), clusterPlugins); @@ -138,7 +140,8 @@ public ClusterModule( clusterPlugins, clusterService, this::reconcile, - writeLoadForecaster + writeLoadForecaster, + telemetryProvider ); this.clusterService = clusterService; this.indexNameExpressionResolver = new IndexNameExpressionResolver(threadPool.getThreadContext(), systemIndices); @@ -381,7 +384,8 @@ private static ShardsAllocator createShardsAllocator( List clusterPlugins, ClusterService clusterService, DesiredBalanceReconcilerAction reconciler, - WriteLoadForecaster writeLoadForecaster + WriteLoadForecaster writeLoadForecaster, + TelemetryProvider telemetryProvider ) { Map> allocators = new HashMap<>(); allocators.put(BALANCED_ALLOCATOR, () -> new BalancedShardsAllocator(clusterSettings, writeLoadForecaster)); @@ -392,7 +396,8 @@ private static ShardsAllocator createShardsAllocator( new BalancedShardsAllocator(clusterSettings, writeLoadForecaster), threadPool, clusterService, - reconciler + reconciler, + telemetryProvider ) ); diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java index 884441aa41798..e861ff3ecf27e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -818,12 +818,6 @@ public DiscoveryNodes nodes() { return nodes; } - // Deprecate to keep downstream projects compiling - @Deprecated(forRemoval = true) - public Builder putTransportVersion(String nodeId, TransportVersion transportVersion) { - return putCompatibilityVersions(nodeId, transportVersion, Map.of()); - } - public Builder putCompatibilityVersions( String nodeId, TransportVersion transportVersion, @@ -840,12 +834,6 @@ public Builder putCompatibilityVersions(String nodeId, CompatibilityVersions ver return this; } - // Deprecate to keep downstream projects compiling - @Deprecated(forRemoval = true) - public Builder compatibilityVersions(Map versions) { - return nodeIdsToCompatibilityVersions(versions); - } - public Builder nodeIdsToCompatibilityVersions(Map versions) { versions.forEach((key, value) -> Objects.requireNonNull(value, key)); // remove all versions not present in the new map diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java index 4a97d79380013..a0dd7bc3e9eef 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java @@ -364,28 +364,42 @@ public static class DataStreamTemplate implements Writeable, ToXContentObject { private static final ParseField HIDDEN = new ParseField("hidden"); private static final ParseField ALLOW_CUSTOM_ROUTING = new ParseField("allow_custom_routing"); + private static final ParseField FAILURE_STORE = new ParseField("failure_store"); public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "data_stream_template", false, - args -> new DataStreamTemplate(args[0] != null && (boolean) args[0], args[1] != null && (boolean) args[1]) + args -> new DataStreamTemplate( + args[0] != null && (boolean) args[0], + args[1] != null && (boolean) args[1], + DataStream.isFailureStoreEnabled() && args[2] != null && (boolean) args[2] + ) ); static { PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), HIDDEN); PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), ALLOW_CUSTOM_ROUTING); + if (DataStream.isFailureStoreEnabled()) { + PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), FAILURE_STORE); + } } private final boolean hidden; private final boolean allowCustomRouting; + private final boolean failureStore; public DataStreamTemplate() { - this(false, false); + this(false, false, false); } public DataStreamTemplate(boolean hidden, boolean allowCustomRouting) { + this(hidden, allowCustomRouting, false); + } + + public DataStreamTemplate(boolean hidden, boolean allowCustomRouting, boolean failureStore) { this.hidden = hidden; this.allowCustomRouting = allowCustomRouting; + this.failureStore = failureStore; } DataStreamTemplate(StreamInput in) throws IOException { @@ -403,6 +417,11 @@ public DataStreamTemplate(boolean hidden, boolean allowCustomRouting) { boolean value = in.readBoolean(); assert value == false : "expected false, because this used to be an optional enum that never got set"; } + if (in.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION)) { + failureStore = in.readBoolean(); + } else { + failureStore = false; + } } /** @@ -431,6 +450,10 @@ public boolean isAllowCustomRouting() { return allowCustomRouting; } + public boolean hasFailureStore() { + return failureStore; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(hidden); @@ -441,6 +464,9 @@ public void writeTo(StreamOutput out) throws IOException { // See comment in constructor. out.writeBoolean(false); } + if (out.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION)) { + out.writeBoolean(failureStore); + } } @Override @@ -448,6 +474,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); builder.field("hidden", hidden); builder.field(ALLOW_CUSTOM_ROUTING.getPreferredName(), allowCustomRouting); + if (DataStream.isFailureStoreEnabled()) { + builder.field(FAILURE_STORE.getPreferredName(), failureStore); + } builder.endObject(); return builder; } @@ -457,12 +486,12 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; DataStreamTemplate that = (DataStreamTemplate) o; - return hidden == that.hidden && allowCustomRouting == that.allowCustomRouting; + return hidden == that.hidden && allowCustomRouting == that.allowCustomRouting && failureStore == that.failureStore; } @Override public int hashCode() { - return Objects.hash(hidden, allowCustomRouting); + return Objects.hash(hidden, allowCustomRouting, failureStore); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java index c5cf0b29f6273..34d8515d2dfdd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java @@ -11,6 +11,7 @@ import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.PointValues; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverConfiguration; @@ -26,6 +27,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.time.DateFormatters; +import org.elasticsearch.common.util.FeatureFlag; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; @@ -66,7 +68,15 @@ public final class DataStream implements SimpleDiffable, ToXContentObject, IndexAbstraction { + public static final FeatureFlag FAILURE_STORE_FEATURE_FLAG = new FeatureFlag("failure_store"); + public static final TransportVersion ADDED_FAILURE_STORE_TRANSPORT_VERSION = TransportVersions.DATA_STREAM_FAILURE_STORE_ADDED; + + public static boolean isFailureStoreEnabled() { + return FAILURE_STORE_FEATURE_FLAG.isEnabled(); + } + public static final String BACKING_INDEX_PREFIX = ".ds-"; + public static final String FAILURE_STORE_PREFIX = ".fs-"; public static final DateFormatter DATE_FORMATTER = DateFormatter.forPattern("uuuu.MM.dd"); public static final String TIMESTAMP_FIELD_NAME = "@timestamp"; // Timeseries indices' leaf readers should be sorted by desc order of their timestamp field, as it allows search time optimizations @@ -100,6 +110,8 @@ public final class DataStream implements SimpleDiffable, ToXContentO private final IndexMode indexMode; @Nullable private final DataStreamLifecycle lifecycle; + private final boolean failureStore; + private final List failureIndices; public DataStream( String name, @@ -111,7 +123,9 @@ public DataStream( boolean system, boolean allowCustomRouting, IndexMode indexMode, - DataStreamLifecycle lifecycle + DataStreamLifecycle lifecycle, + boolean failureStore, + List failureIndices ) { this( name, @@ -124,7 +138,9 @@ public DataStream( System::currentTimeMillis, allowCustomRouting, indexMode, - lifecycle + lifecycle, + failureStore, + failureIndices ); } @@ -140,7 +156,9 @@ public DataStream( LongSupplier timeProvider, boolean allowCustomRouting, IndexMode indexMode, - DataStreamLifecycle lifecycle + DataStreamLifecycle lifecycle, + boolean failureStore, + List failureIndices ) { this.name = name; this.indices = List.copyOf(indices); @@ -155,6 +173,8 @@ public DataStream( this.allowCustomRouting = allowCustomRouting; this.indexMode = indexMode; this.lifecycle = lifecycle; + this.failureStore = failureStore; + this.failureIndices = failureIndices; assert assertConsistent(this.indices); } @@ -170,7 +190,7 @@ public DataStream( boolean allowCustomRouting, IndexMode indexMode ) { - this(name, indices, generation, metadata, hidden, replicated, system, allowCustomRouting, indexMode, null); + this(name, indices, generation, metadata, hidden, replicated, system, allowCustomRouting, indexMode, null, false, List.of()); } private static boolean assertConsistent(List indices) { @@ -207,6 +227,10 @@ public long getGeneration() { return generation; } + public List getFailureIndices() { + return failureIndices; + } + @Override public Index getWriteIndex() { return indices.get(indices.size() - 1); @@ -327,6 +351,16 @@ public boolean isAllowCustomRouting() { return allowCustomRouting; } + /** + * Determines if this data stream should persist ingest pipeline and mapping failures from bulk requests to a locally + * configured failure store. + * + * @return Whether this data stream should store ingestion failures. + */ + public boolean isFailureStore() { + return failureStore; + } + @Nullable public IndexMode getIndexMode() { return indexMode; @@ -369,7 +403,20 @@ public DataStream unsafeRollover(Index writeIndex, long generation, boolean time List backingIndices = new ArrayList<>(indices); backingIndices.add(writeIndex); - return new DataStream(name, backingIndices, generation, metadata, hidden, false, system, allowCustomRouting, indexMode, lifecycle); + return new DataStream( + name, + backingIndices, + generation, + metadata, + hidden, + false, + system, + allowCustomRouting, + indexMode, + lifecycle, + failureStore, + failureIndices + ); } /** @@ -444,7 +491,9 @@ public DataStream removeBackingIndex(Index index) { system, allowCustomRouting, indexMode, - lifecycle + lifecycle, + failureStore, + failureIndices ); } @@ -487,7 +536,9 @@ public DataStream replaceBackingIndex(Index existingBackingIndex, Index newBacki system, allowCustomRouting, indexMode, - lifecycle + lifecycle, + failureStore, + failureIndices ); } @@ -545,7 +596,9 @@ public DataStream addBackingIndex(Metadata clusterMetadata, Index index) { system, allowCustomRouting, indexMode, - lifecycle + lifecycle, + failureStore, + failureIndices ); } @@ -561,7 +614,9 @@ public DataStream promoteDataStream() { timeProvider, allowCustomRouting, indexMode, - lifecycle + lifecycle, + failureStore, + failureIndices ); } @@ -595,7 +650,9 @@ public DataStream snapshot(Collection indicesInSnapshot) { system, allowCustomRouting, indexMode, - lifecycle + lifecycle, + failureStore, + failureIndices ); } @@ -778,9 +835,28 @@ public static String getDefaultBackingIndexName(String dataStreamName, long gene ); } + /** + * Generates the name of the index that conforms to the default naming convention for backing indices + * on data streams given the specified data stream name, generation, and time. + * + * @param dataStreamName name of the data stream + * @param generation generation of the data stream + * @param epochMillis creation time for the backing index + * @return backing index name + */ + public static String getDefaultFailureStoreName(String dataStreamName, long generation, long epochMillis) { + return String.format( + Locale.ROOT, + FAILURE_STORE_PREFIX + "%s-%s-%06d", + dataStreamName, + DATE_FORMATTER.formatMillis(epochMillis), + generation + ); + } + public DataStream(StreamInput in) throws IOException { this( - in.readString(), + readName(in), readIndices(in), in.readVLong(), in.readMap(), @@ -789,12 +865,19 @@ public DataStream(StreamInput in) throws IOException { in.readBoolean(), in.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0) ? in.readBoolean() : false, in.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0) ? in.readOptionalEnum(IndexMode.class) : null, - in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020) ? in.readOptionalWriteable(DataStreamLifecycle::new) : null + in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020) ? in.readOptionalWriteable(DataStreamLifecycle::new) : null, + in.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION) ? in.readBoolean() : false, + in.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION) ? readIndices(in) : List.of() ); } + static String readName(StreamInput in) throws IOException { + String name = in.readString(); + in.readString(); // TODO: clear out the timestamp field, which is a constant https://github.com/elastic/elasticsearch/issues/101991 + return name; + } + static List readIndices(StreamInput in) throws IOException { - in.readString(); // timestamp field, which is always @timestamp return in.readCollectionAsImmutableList(Index::new); } @@ -805,7 +888,7 @@ public static Diff readDiffFrom(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); - out.writeString(TIMESTAMP_FIELD_NAME); + out.writeString(TIMESTAMP_FIELD_NAME); // TODO: clear this out in the future https://github.com/elastic/elasticsearch/issues/101991 out.writeCollection(indices); out.writeVLong(generation); out.writeGenericMap(metadata); @@ -821,6 +904,10 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { out.writeOptionalWriteable(lifecycle); } + if (out.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION)) { + out.writeBoolean(failureStore); + out.writeCollection(failureIndices); + } } public static final ParseField NAME_FIELD = new ParseField("name"); @@ -834,6 +921,8 @@ public void writeTo(StreamOutput out) throws IOException { public static final ParseField ALLOW_CUSTOM_ROUTING = new ParseField("allow_custom_routing"); public static final ParseField INDEX_MODE = new ParseField("index_mode"); public static final ParseField LIFECYCLE = new ParseField("lifecycle"); + public static final ParseField FAILURE_STORE_FIELD = new ParseField("failure_store"); + public static final ParseField FAILURE_INDICES_FIELD = new ParseField("failure_indices"); @SuppressWarnings("unchecked") private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( @@ -848,7 +937,9 @@ public void writeTo(StreamOutput out) throws IOException { args[6] != null && (boolean) args[6], args[7] != null && (boolean) args[7], args[8] != null ? IndexMode.fromString((String) args[8]) : null, - (DataStreamLifecycle) args[9] + (DataStreamLifecycle) args[9], + DataStream.isFailureStoreEnabled() && args[10] != null && (boolean) args[10], + DataStream.isFailureStoreEnabled() && args[11] != null ? (List) args[11] : List.of() ) ); @@ -871,6 +962,14 @@ public void writeTo(StreamOutput out) throws IOException { PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), ALLOW_CUSTOM_ROUTING); PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), INDEX_MODE); PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> DataStreamLifecycle.fromXContent(p), LIFECYCLE); + if (DataStream.isFailureStoreEnabled()) { + PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), FAILURE_STORE_FIELD); + PARSER.declareObjectArray( + ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> Index.fromXContent(p), + FAILURE_INDICES_FIELD + ); + } } public static DataStream fromXContent(XContentParser parser) throws IOException { @@ -895,6 +994,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params, @Nulla .endObject(); builder.xContentList(INDICES_FIELD.getPreferredName(), indices); builder.field(GENERATION_FIELD.getPreferredName(), generation); + if (DataStream.isFailureStoreEnabled() && failureIndices.isEmpty() == false) { + builder.xContentList(FAILURE_INDICES_FIELD.getPreferredName(), failureIndices); + } if (metadata != null) { builder.field(METADATA_FIELD.getPreferredName(), metadata); } @@ -902,6 +1004,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params, @Nulla builder.field(REPLICATED_FIELD.getPreferredName(), replicated); builder.field(SYSTEM_FIELD.getPreferredName(), system); builder.field(ALLOW_CUSTOM_ROUTING.getPreferredName(), allowCustomRouting); + if (DataStream.isFailureStoreEnabled()) { + builder.field(FAILURE_STORE_FIELD.getPreferredName(), failureStore); + } if (indexMode != null) { builder.field(INDEX_MODE.getPreferredName(), indexMode); } @@ -927,12 +1032,27 @@ public boolean equals(Object o) { && replicated == that.replicated && allowCustomRouting == that.allowCustomRouting && indexMode == that.indexMode - && Objects.equals(lifecycle, that.lifecycle); + && Objects.equals(lifecycle, that.lifecycle) + && failureStore == that.failureStore + && failureIndices.equals(that.failureIndices); } @Override public int hashCode() { - return Objects.hash(name, indices, generation, metadata, hidden, system, replicated, allowCustomRouting, indexMode, lifecycle); + return Objects.hash( + name, + indices, + generation, + metadata, + hidden, + system, + replicated, + allowCustomRouting, + indexMode, + lifecycle, + failureStore, + failureIndices + ); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java index ca885632a08c4..d500a8b8e6876 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java @@ -70,13 +70,18 @@ public MetadataCreateDataStreamService( public void createDataStream(CreateDataStreamClusterStateUpdateRequest request, ActionListener finalListener) { AtomicReference firstBackingIndexRef = new AtomicReference<>(); + AtomicReference firstFailureStoreRef = new AtomicReference<>(); ActionListener listener = finalListener.delegateFailureAndWrap((l, response) -> { if (response.isAcknowledged()) { String firstBackingIndexName = firstBackingIndexRef.get(); assert firstBackingIndexName != null; + String firstFailureStoreName = firstFailureStoreRef.get(); + var waitForIndices = firstFailureStoreName == null + ? new String[] { firstBackingIndexName } + : new String[] { firstBackingIndexName, firstFailureStoreName }; ActiveShardsObserver.waitForActiveShards( clusterService, - new String[] { firstBackingIndexName }, + waitForIndices, ActiveShardCount.DEFAULT, request.masterNodeTimeout(), l.map(shardsAcked -> AcknowledgedResponse.TRUE) @@ -98,7 +103,11 @@ public ClusterState execute(ClusterState currentState) throws Exception { request, delegate.reroute() ); - firstBackingIndexRef.set(clusterState.metadata().dataStreams().get(request.name).getIndices().get(0).getName()); + DataStream createdDataStream = clusterState.metadata().dataStreams().get(request.name); + firstBackingIndexRef.set(createdDataStream.getIndices().get(0).getName()); + if (createdDataStream.getFailureIndices().isEmpty() == false) { + firstFailureStoreRef.set(createdDataStream.getFailureIndices().get(0).getName()); + } return clusterState; } } @@ -113,9 +122,9 @@ private void submitUnbatchedTask(@SuppressWarnings("SameParameterValue") String public ClusterState createDataStream( CreateDataStreamClusterStateUpdateRequest request, ClusterState current, - ActionListener listener + ActionListener rerouteListener ) throws Exception { - return createDataStream(metadataCreateIndexService, current, isDslOnlyMode, request, listener); + return createDataStream(metadataCreateIndexService, current, isDslOnlyMode, request, rerouteListener); } public static final class CreateDataStreamClusterStateUpdateRequest extends ClusterStateUpdateRequest< @@ -167,6 +176,10 @@ public boolean performReroute() { public SystemDataStreamDescriptor getSystemDataStreamDescriptor() { return descriptor; } + + long getStartTime() { + return startTime; + } } static ClusterState createDataStream( @@ -174,9 +187,9 @@ static ClusterState createDataStream( ClusterState currentState, boolean isDslOnlyMode, CreateDataStreamClusterStateUpdateRequest request, - ActionListener listener + ActionListener rerouteListener ) throws Exception { - return createDataStream(metadataCreateIndexService, currentState, isDslOnlyMode, request, List.of(), null, listener); + return createDataStream(metadataCreateIndexService, currentState, isDslOnlyMode, request, List.of(), null, rerouteListener); } /** @@ -196,7 +209,7 @@ static ClusterState createDataStream( CreateDataStreamClusterStateUpdateRequest request, List backingIndices, IndexMetadata writeIndex, - ActionListener listener + ActionListener rerouteListener ) throws Exception { String dataStreamName = request.name; SystemDataStreamDescriptor systemDataStreamDescriptor = request.getSystemDataStreamDescriptor(); @@ -225,6 +238,11 @@ static ClusterState createDataStream( "data_stream [" + dataStreamName + "] must not start with '" + DataStream.BACKING_INDEX_PREFIX + "'" ); } + if (dataStreamName.startsWith(DataStream.FAILURE_STORE_PREFIX)) { + throw new IllegalArgumentException( + "data_stream [" + dataStreamName + "] must not start with '" + DataStream.FAILURE_STORE_PREFIX + "'" + ); + } final var metadata = currentState.metadata(); final boolean isSystem = systemDataStreamDescriptor != null; @@ -232,43 +250,47 @@ static ClusterState createDataStream( ? systemDataStreamDescriptor.getComposableIndexTemplate() : lookupTemplateForDataStream(dataStreamName, currentState.metadata()); - if (writeIndex == null) { - String firstBackingIndexName = DataStream.getDefaultBackingIndexName(dataStreamName, 1, request.startTime); - CreateIndexClusterStateUpdateRequest createIndexRequest = new CreateIndexClusterStateUpdateRequest( - "initialize_data_stream", - firstBackingIndexName, - firstBackingIndexName - ).dataStreamName(dataStreamName) - .systemDataStreamDescriptor(systemDataStreamDescriptor) - .nameResolvedInstant(request.startTime) - .performReroute(request.performReroute()) - .setMatchingTemplate(template); - + // If we need to create a failure store, do so first. Do not reroute during the creation since we will do + // that as part of creating the backing index if required. + IndexMetadata failureStoreIndex = null; + if (template.getDataStreamTemplate().hasFailureStore()) { if (isSystem) { - createIndexRequest.settings(SystemIndexDescriptor.DEFAULT_SETTINGS); - } else { - createIndexRequest.settings(MetadataRolloverService.HIDDEN_INDEX_SETTINGS); + throw new IllegalArgumentException("Failure stores are not supported on system data streams"); } + String failureStoreIndexName = DataStream.getDefaultFailureStoreName(dataStreamName, 1, request.getStartTime()); + currentState = createFailureStoreIndex( + metadataCreateIndexService, + currentState, + request, + dataStreamName, + template, + failureStoreIndexName + ); + failureStoreIndex = currentState.metadata().index(failureStoreIndexName); + } - try { - currentState = metadataCreateIndexService.applyCreateIndexRequest(currentState, createIndexRequest, false, listener); - } catch (ResourceAlreadyExistsException e) { - // Rethrow as ElasticsearchStatusException, so that bulk transport action doesn't ignore it during - // auto index/data stream creation. - // (otherwise bulk execution fails later, because data stream will also not have been created) - throw new ElasticsearchStatusException( - "data stream could not be created because backing index [{}] already exists", - RestStatus.BAD_REQUEST, - e, - firstBackingIndexName - ); - } + if (writeIndex == null) { + String firstBackingIndexName = DataStream.getDefaultBackingIndexName(dataStreamName, 1, request.getStartTime()); + currentState = createBackingIndex( + metadataCreateIndexService, + currentState, + request, + rerouteListener, + dataStreamName, + systemDataStreamDescriptor, + isSystem, + template, + firstBackingIndexName + ); writeIndex = currentState.metadata().index(firstBackingIndexName); } else { - listener.onResponse(null); + rerouteListener.onResponse(null); } assert writeIndex != null; assert writeIndex.mapping() != null : "no mapping found for backing index [" + writeIndex.getIndex().getName() + "]"; + assert template.getDataStreamTemplate().hasFailureStore() == false || failureStoreIndex != null; + assert failureStoreIndex == null || failureStoreIndex.mapping() != null + : "no mapping found for failure store [" + failureStoreIndex.getIndex().getName() + "]"; List dsBackingIndices = backingIndices.stream() .map(IndexMetadata::getIndex) @@ -279,6 +301,7 @@ static ClusterState createDataStream( final DataStreamLifecycle lifecycle = isSystem ? MetadataIndexTemplateService.resolveLifecycle(template, systemDataStreamDescriptor.getComponentTemplates()) : MetadataIndexTemplateService.resolveLifecycle(template, metadata.componentTemplates()); + List failureIndices = failureStoreIndex == null ? List.of() : List.of(failureStoreIndex.getIndex()); DataStream newDataStream = new DataStream( dataStreamName, dsBackingIndices, @@ -289,7 +312,9 @@ static ClusterState createDataStream( isSystem, template.getDataStreamTemplate().isAllowCustomRouting(), indexMode, - lifecycle == null && isDslOnlyMode ? DataStreamLifecycle.DEFAULT : lifecycle + lifecycle == null && isDslOnlyMode ? DataStreamLifecycle.DEFAULT : lifecycle, + template.getDataStreamTemplate().hasFailureStore(), + failureIndices ); Metadata.Builder builder = Metadata.builder(currentState.metadata()).put(newDataStream); @@ -313,6 +338,92 @@ static ClusterState createDataStream( return ClusterState.builder(currentState).metadata(builder).build(); } + private static ClusterState createBackingIndex( + MetadataCreateIndexService metadataCreateIndexService, + ClusterState currentState, + CreateDataStreamClusterStateUpdateRequest request, + ActionListener rerouteListener, + String dataStreamName, + SystemDataStreamDescriptor systemDataStreamDescriptor, + boolean isSystem, + ComposableIndexTemplate template, + String firstBackingIndexName + ) throws Exception { + CreateIndexClusterStateUpdateRequest createIndexRequest = new CreateIndexClusterStateUpdateRequest( + "initialize_data_stream", + firstBackingIndexName, + firstBackingIndexName + ).dataStreamName(dataStreamName) + .systemDataStreamDescriptor(systemDataStreamDescriptor) + .nameResolvedInstant(request.getStartTime()) + .performReroute(request.performReroute()) + .setMatchingTemplate(template); + + if (isSystem) { + createIndexRequest.settings(SystemIndexDescriptor.DEFAULT_SETTINGS); + } else { + createIndexRequest.settings(MetadataRolloverService.HIDDEN_INDEX_SETTINGS); + } + + try { + currentState = metadataCreateIndexService.applyCreateIndexRequest(currentState, createIndexRequest, false, rerouteListener); + } catch (ResourceAlreadyExistsException e) { + // Rethrow as ElasticsearchStatusException, so that bulk transport action doesn't ignore it during + // auto index/data stream creation. + // (otherwise bulk execution fails later, because data stream will also not have been created) + throw new ElasticsearchStatusException( + "data stream could not be created because backing index [{}] already exists", + RestStatus.BAD_REQUEST, + e, + firstBackingIndexName + ); + } + return currentState; + } + + private static ClusterState createFailureStoreIndex( + MetadataCreateIndexService metadataCreateIndexService, + ClusterState currentState, + CreateDataStreamClusterStateUpdateRequest request, + String dataStreamName, + ComposableIndexTemplate template, + String failureStoreIndexName + ) throws Exception { + if (DataStream.isFailureStoreEnabled() == false) { + return currentState; + } + + CreateIndexClusterStateUpdateRequest createIndexRequest = new CreateIndexClusterStateUpdateRequest( + "initialize_data_stream", + failureStoreIndexName, + failureStoreIndexName + ).dataStreamName(dataStreamName) + .nameResolvedInstant(request.getStartTime()) + .performReroute(false) + .setMatchingTemplate(template) + .settings(MetadataRolloverService.HIDDEN_INDEX_SETTINGS); + + try { + currentState = metadataCreateIndexService.applyCreateIndexRequest( + currentState, + createIndexRequest, + false, + AllocationActionListener.rerouteCompletionIsNotRequired() + ); + } catch (ResourceAlreadyExistsException e) { + // Rethrow as ElasticsearchStatusException, so that bulk transport action doesn't ignore it during + // auto index/data stream creation. + // (otherwise bulk execution fails later, because data stream will also not have been created) + throw new ElasticsearchStatusException( + "data stream could not be created because failure store index [{}] already exists", + RestStatus.BAD_REQUEST, + e, + failureStoreIndexName + ); + } + return currentState; + } + public static ComposableIndexTemplate lookupTemplateForDataStream(String dataStreamName, Metadata metadata) { final String v2Template = MetadataIndexTemplateService.findV2Template(metadata, dataStreamName, false); if (v2Template == null) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java index 8423a5ad37334..2ebcad22185fd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java @@ -166,7 +166,9 @@ static ClusterState updateDataLifecycle( dataStream.isSystem(), dataStream.isAllowCustomRouting(), dataStream.getIndexMode(), - lifecycle + lifecycle, + dataStream.isFailureStore(), + dataStream.getFailureIndices() ) ); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java index 0c78d497d1194..1e2e15a6300c7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java @@ -92,6 +92,8 @@ public class MetadataIndexTemplateService { private static final CompressedXContent DEFAULT_TIMESTAMP_MAPPING_WITH_ROUTING; + private static final CompressedXContent DATA_STREAM_FAILURE_STORE_MAPPING; + static { final Map> defaultTimestampField = Map.of( DEFAULT_TIMESTAMP_FIELD, @@ -120,6 +122,110 @@ public class MetadataIndexTemplateService { .map(defaultTimestampField) .endObject() ); + /* + * The data stream failure store mapping. The JSON content is as follows: + * { + * "_doc": { + * "dynamic": false, + * "_routing": { + * "required": false + * }, + * "properties": { + * "@timestamp": { + * "type": "date", + * "ignore_malformed": false + * }, + * "document": { + * "properties": { + * "id": { + * "type": "keyword" + * }, + * "routing": { + * "type": "keyword" + * }, + * "index": { + * "type": "keyword" + * } + * } + * }, + * "error": { + * "properties": { + * "message": { + * "type": "wildcard" + * }, + * "stack_trace": { + * "type": "text" + * }, + * "type": { + * "type": "keyword" + * }, + * "pipeline": { + * "type": "keyword" + * }, + * "pipeline_trace": { + * "type": "keyword" + * }, + * "processor": { + * "type": "keyword" + * } + * } + * } + * } + * } + * } + */ + DATA_STREAM_FAILURE_STORE_MAPPING = new CompressedXContent( + (builder, params) -> builder.startObject(MapperService.SINGLE_MAPPING_NAME) + .field("dynamic", false) + .startObject(RoutingFieldMapper.NAME) + .field("required", false) + .endObject() + .startObject("properties") + .startObject(DEFAULT_TIMESTAMP_FIELD) + .field("type", DateFieldMapper.CONTENT_TYPE) + .field("ignore_malformed", false) + .endObject() + .startObject("document") + .startObject("properties") + // document.source is unmapped so that it can be persisted in source only without worrying that the document might cause + // a mapping error + .startObject("id") + .field("type", "keyword") + .endObject() + .startObject("routing") + .field("type", "keyword") + .endObject() + .startObject("index") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .startObject("error") + .startObject("properties") + .startObject("message") + .field("type", "wildcard") + .endObject() + .startObject("stack_trace") + .field("type", "text") + .endObject() + .startObject("type") + .field("type", "keyword") + .endObject() + .startObject("pipeline") + .field("type", "keyword") + .endObject() + .startObject("pipeline_trace") + .field("type", "keyword") + .endObject() + .startObject("processor") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + ); + } catch (IOException e) { throw new AssertionError(e); } @@ -1338,6 +1444,10 @@ public static List collectMappings( final String indexName ) { Objects.requireNonNull(template, "Composable index template must be provided"); + // Check if this is a failure store index, and if it is, discard any template mappings. Failure store mappings are predefined. + if (template.getDataStreamTemplate() != null && indexName.startsWith(DataStream.FAILURE_STORE_PREFIX)) { + return List.of(DATA_STREAM_FAILURE_STORE_MAPPING, ComposableIndexTemplate.DataStreamTemplate.DATA_STREAM_MAPPING_SNIPPET); + } List mappings = template.composedOf() .stream() .map(componentTemplates::get) @@ -1348,7 +1458,7 @@ public static List collectMappings( .collect(Collectors.toCollection(LinkedList::new)); // Add the actual index template's mappings, since it takes the highest precedence Optional.ofNullable(template.template()).map(Template::mappings).ifPresent(mappings::add); - if (template.getDataStreamTemplate() != null && indexName.startsWith(DataStream.BACKING_INDEX_PREFIX)) { + if (template.getDataStreamTemplate() != null && isDataStreamIndex(indexName)) { // add a default mapping for the `@timestamp` field, at the lowest precedence, to make bootstrapping data streams more // straightforward as all backing indices are required to have a timestamp field if (template.getDataStreamTemplate().isAllowCustomRouting()) { @@ -1359,7 +1469,7 @@ public static List collectMappings( } // Only include _timestamp mapping snippet if creating backing index. - if (indexName.startsWith(DataStream.BACKING_INDEX_PREFIX)) { + if (isDataStreamIndex(indexName)) { // Only if template has data stream definition this should be added and // adding this template last, since _timestamp field should have highest precedence: if (template.getDataStreamTemplate() != null) { @@ -1369,6 +1479,10 @@ public static List collectMappings( return Collections.unmodifiableList(mappings); } + private static boolean isDataStreamIndex(String indexName) { + return indexName.startsWith(DataStream.BACKING_INDEX_PREFIX) || indexName.startsWith(DataStream.FAILURE_STORE_PREFIX); + } + /** * Resolve index settings for the given list of v1 templates, templates are apply in reverse * order since they should be provided in order of priority/order diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ShutdownShardMigrationStatus.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ShutdownShardMigrationStatus.java index ecc26d15d001f..15fab193dad57 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ShutdownShardMigrationStatus.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ShutdownShardMigrationStatus.java @@ -36,27 +36,72 @@ public class ShutdownShardMigrationStatus implements Writeable, ChunkedToXConten public static final String NODE_ALLOCATION_DECISION_KEY = "node_allocation_decision"; private final SingleNodeShutdownMetadata.Status status; + private final long startedShards; + private final long relocatingShards; + private final long initializingShards; private final long shardsRemaining; @Nullable private final String explanation; @Nullable private final ShardAllocationDecision allocationDecision; - public ShutdownShardMigrationStatus(SingleNodeShutdownMetadata.Status status, long shardsRemaining) { - this(status, shardsRemaining, null, null); + public ShutdownShardMigrationStatus( + SingleNodeShutdownMetadata.Status status, + long shardsRemaining, + @Nullable String explanation, + @Nullable ShardAllocationDecision allocationDecision + ) { + this(status, -1, -1, -1, shardsRemaining, explanation, null); } - public ShutdownShardMigrationStatus(SingleNodeShutdownMetadata.Status status, long shardsRemaining, @Nullable String explanation) { - this(status, shardsRemaining, explanation, null); + public ShutdownShardMigrationStatus( + SingleNodeShutdownMetadata.Status status, + long startedShards, + long relocatingShards, + long initializingShards + ) { + this( + status, + startedShards, + relocatingShards, + initializingShards, + startedShards + relocatingShards + initializingShards, + null, + null + ); } public ShutdownShardMigrationStatus( SingleNodeShutdownMetadata.Status status, + long startedShards, + long relocatingShards, + long initializingShards, + @Nullable String explanation + ) { + this( + status, + startedShards, + relocatingShards, + initializingShards, + startedShards + relocatingShards + initializingShards, + explanation, + null + ); + } + + private ShutdownShardMigrationStatus( + SingleNodeShutdownMetadata.Status status, + long startedShards, + long relocatingShards, + long initializingShards, long shardsRemaining, @Nullable String explanation, @Nullable ShardAllocationDecision allocationDecision ) { this.status = Objects.requireNonNull(status, "status must not be null"); + this.startedShards = startedShards; + this.relocatingShards = relocatingShards; + this.initializingShards = initializingShards; this.shardsRemaining = shardsRemaining; this.explanation = explanation; this.allocationDecision = allocationDecision; @@ -64,7 +109,17 @@ public ShutdownShardMigrationStatus( public ShutdownShardMigrationStatus(StreamInput in) throws IOException { this.status = in.readEnum(SingleNodeShutdownMetadata.Status.class); - this.shardsRemaining = in.readLong(); + if (in.getTransportVersion().onOrAfter(TransportVersions.SHUTDOWN_MIGRATION_STATUS_INCLUDE_COUNTS)) { + this.startedShards = in.readZLong(); + this.relocatingShards = in.readZLong(); + this.initializingShards = in.readZLong(); + this.shardsRemaining = in.readZLong(); + } else { + this.startedShards = -1; + this.relocatingShards = -1; + this.initializingShards = -1; + this.shardsRemaining = in.readLong(); + } this.explanation = in.readOptionalString(); if (in.getTransportVersion().onOrAfter(ALLOCATION_DECISION_ADDED_VERSION)) { this.allocationDecision = in.readOptionalWriteable(ShardAllocationDecision::new); @@ -99,6 +154,11 @@ public Iterator toXContentChunked(ToXContent.Params params private XContentBuilder buildHeader(XContentBuilder builder) throws IOException { builder.field("status", status); + if (startedShards != -1) { + builder.field("started_shards", startedShards); + builder.field("relocating_shards", relocatingShards); + builder.field("initializing_shards", initializingShards); + } builder.field("shard_migrations_remaining", shardsRemaining); if (Objects.nonNull(explanation)) { builder.field("explanation", explanation); @@ -109,7 +169,14 @@ private XContentBuilder buildHeader(XContentBuilder builder) throws IOException @Override public void writeTo(StreamOutput out) throws IOException { out.writeEnum(status); - out.writeLong(shardsRemaining); + if (out.getTransportVersion().onOrAfter(TransportVersions.SHUTDOWN_MIGRATION_STATUS_INCLUDE_COUNTS)) { + out.writeZLong(startedShards); + out.writeZLong(relocatingShards); + out.writeZLong(initializingShards); + out.writeZLong(shardsRemaining); + } else { + out.writeLong(shardsRemaining); + } out.writeOptionalString(explanation); if (out.getTransportVersion().onOrAfter(ALLOCATION_DECISION_ADDED_VERSION)) { out.writeOptionalWriteable(allocationDecision); @@ -119,9 +186,12 @@ public void writeTo(StreamOutput out) throws IOException { @Override public boolean equals(Object o) { if (this == o) return true; - if ((o instanceof ShutdownShardMigrationStatus) == false) return false; + if (o == null || getClass() != o.getClass()) return false; ShutdownShardMigrationStatus that = (ShutdownShardMigrationStatus) o; - return shardsRemaining == that.shardsRemaining + return startedShards == that.startedShards + && relocatingShards == that.relocatingShards + && initializingShards == that.initializingShards + && shardsRemaining == that.shardsRemaining && status == that.status && Objects.equals(explanation, that.explanation) && Objects.equals(allocationDecision, that.allocationDecision); @@ -129,7 +199,7 @@ public boolean equals(Object o) { @Override public int hashCode() { - return Objects.hash(status, shardsRemaining, explanation, allocationDecision); + return Objects.hash(status, startedShards, relocatingShards, initializingShards, shardsRemaining, explanation, allocationDecision); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ContinuousComputation.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ContinuousComputation.java index eecd0a7410513..7cdffc3a5bf24 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ContinuousComputation.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ContinuousComputation.java @@ -10,12 +10,12 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; -import org.elasticsearch.threadpool.ThreadPool; import java.util.Objects; -import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicReference; /** @@ -27,15 +27,15 @@ public abstract class ContinuousComputation { private static final Logger logger = LogManager.getLogger(ContinuousComputation.class); - private final ExecutorService executorService; + private final Executor executor; private final AtomicReference enqueuedInput = new AtomicReference<>(); private final Processor processor = new Processor(); /** - * @param threadPool Each computation runs on a {@code GENERIC} thread from this thread pool. At most one task executes at once. + * @param executor the {@link Executor} with which to execute the computation */ - public ContinuousComputation(ThreadPool threadPool) { - this.executorService = threadPool.generic(); + public ContinuousComputation(Executor executor) { + this.executor = executor; } /** @@ -44,7 +44,7 @@ public ContinuousComputation(ThreadPool threadPool) { public void onNewInput(T input) { assert input != null; if (enqueuedInput.getAndSet(Objects.requireNonNull(input)) == null) { - executorService.execute(processor); + executor.execute(processor); } } @@ -74,6 +74,7 @@ private class Processor extends AbstractRunnable { @Override public void onFailure(Exception e) { + logger.error(Strings.format("unexpected error processing [%s]", ContinuousComputation.this), e); assert false : e; } @@ -85,14 +86,16 @@ public void onRejection(Exception e) { } @Override - protected void doRun() throws Exception { + protected void doRun() { final T input = enqueuedInput.get(); assert input != null; - processInput(input); - - if (enqueuedInput.compareAndSet(input, null) == false) { - executorService.execute(this); + try { + processInput(input); + } finally { + if (enqueuedInput.compareAndSet(input, null) == false) { + executor.execute(this); + } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java index 8df50196c5d4b..33a578196866e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java @@ -29,12 +29,15 @@ import org.elasticsearch.gateway.PriorityComparator; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.telemetry.metric.DoubleGauge; +import org.elasticsearch.telemetry.metric.DoubleWithAttributes; +import org.elasticsearch.telemetry.metric.LongGaugeMetric; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.threadpool.ThreadPool; import java.util.Comparator; import java.util.Iterator; import java.util.Set; -import java.util.concurrent.atomic.AtomicLong; import java.util.function.BiFunction; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -74,23 +77,53 @@ public class DesiredBalanceReconciler { /** * Number of unassigned shards during last reconciliation */ - protected final AtomicLong unassignedShards = new AtomicLong(); + protected final LongGaugeMetric unassignedShards; /** * Total number of assigned shards during last reconciliation */ - protected final AtomicLong totalAllocations = new AtomicLong(); + protected final LongGaugeMetric totalAllocations; /** * Number of assigned shards during last reconciliation that are not allocated on desired node and need to be moved */ - protected final AtomicLong undesiredAllocations = new AtomicLong(); + protected final LongGaugeMetric undesiredAllocations; + private final DoubleGauge undesiredAllocationsRatio; - public DesiredBalanceReconciler(ClusterSettings clusterSettings, ThreadPool threadPool) { + public DesiredBalanceReconciler(ClusterSettings clusterSettings, ThreadPool threadPool, MeterRegistry meterRegistry) { this.undesiredAllocationLogInterval = new FrequencyCappedAction(threadPool); clusterSettings.initializeAndWatch(UNDESIRED_ALLOCATIONS_LOG_INTERVAL_SETTING, this.undesiredAllocationLogInterval::setMinInterval); clusterSettings.initializeAndWatch( UNDESIRED_ALLOCATIONS_LOG_THRESHOLD_SETTING, value -> this.undesiredAllocationsLogThreshold = value ); + + unassignedShards = LongGaugeMetric.create( + meterRegistry, + "es.allocator.desired_balance.unassigned_shards", + "Current unassigned shards count", + "count" + ); + totalAllocations = LongGaugeMetric.create( + meterRegistry, + "es.allocator.desired_balance.total_allocations", + "Current total shards count in cluster", + "count" + ); + undesiredAllocations = LongGaugeMetric.create( + meterRegistry, + "es.allocator.desired_balance.undesired_allocations", + "Current number of shards allocated on undesired nodes", + "count" + ); + undesiredAllocationsRatio = meterRegistry.registerDoubleGauge( + "es.allocator.desired_balance.undesired_allocations.ratio", + "Current undesired_allocations / allocations ratio", + "count", + () -> { + var total = totalAllocations.get(); + var undesired = undesiredAllocations.get(); + return new DoubleWithAttributes(total != 0 ? (double) undesired / total : 0.0); + } + ); } public void reconcile(DesiredBalance desiredBalance, RoutingAllocation allocation) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java index 2319bcbef3383..bd99003d3fe0c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.telemetry.TelemetryProvider; import org.elasticsearch.threadpool.ThreadPool; import java.util.ArrayList; @@ -77,14 +78,16 @@ public DesiredBalanceShardsAllocator( ShardsAllocator delegateAllocator, ThreadPool threadPool, ClusterService clusterService, - DesiredBalanceReconcilerAction reconciler + DesiredBalanceReconcilerAction reconciler, + TelemetryProvider telemetryProvider ) { this( delegateAllocator, threadPool, clusterService, new DesiredBalanceComputer(clusterSettings, threadPool, delegateAllocator), - reconciler + reconciler, + telemetryProvider ); } @@ -93,14 +96,19 @@ public DesiredBalanceShardsAllocator( ThreadPool threadPool, ClusterService clusterService, DesiredBalanceComputer desiredBalanceComputer, - DesiredBalanceReconcilerAction reconciler + DesiredBalanceReconcilerAction reconciler, + TelemetryProvider telemetryProvider ) { this.delegateAllocator = delegateAllocator; this.threadPool = threadPool; this.reconciler = reconciler; this.desiredBalanceComputer = desiredBalanceComputer; - this.desiredBalanceReconciler = new DesiredBalanceReconciler(clusterService.getClusterSettings(), threadPool); - this.desiredBalanceComputation = new ContinuousComputation<>(threadPool) { + this.desiredBalanceReconciler = new DesiredBalanceReconciler( + clusterService.getClusterSettings(), + threadPool, + telemetryProvider.getMeterRegistry() + ); + this.desiredBalanceComputation = new ContinuousComputation<>(threadPool.generic()) { @Override protected void processInput(DesiredBalanceInput desiredBalanceInput) { @@ -141,7 +149,7 @@ private DesiredBalance getInitialDesiredBalance() { @Override public String toString() { - return "DesiredBalanceShardsAllocator#updateDesiredBalanceAndReroute"; + return "DesiredBalanceShardsAllocator#allocate"; } }; this.queue = new PendingListenersQueue(); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceStats.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceStats.java index 6a08b896136d2..8a95b947735f1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceStats.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceStats.java @@ -97,12 +97,12 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("unassigned_shards", unassignedShards); builder.field("total_allocations", totalAllocations); builder.field("undesired_allocations", undesiredAllocations); - builder.field("undesired_allocations_fraction", undesiredAllocationsFraction()); + builder.field("undesired_allocations_ratio", undesiredAllocationsRatio()); builder.endObject(); return builder; } - public double undesiredAllocationsFraction() { + public double undesiredAllocationsRatio() { if (unassignedShards == -1 || totalAllocations == -1 || undesiredAllocations == -1) { return -1.0; } else if (totalAllocations == 0) { diff --git a/server/src/main/java/org/elasticsearch/cluster/service/TransportFeatures.java b/server/src/main/java/org/elasticsearch/cluster/service/TransportFeatures.java new file mode 100644 index 0000000000000..bfecc577f7a47 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/service/TransportFeatures.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.service; + +import org.elasticsearch.Version; +import org.elasticsearch.features.FeatureSpecification; +import org.elasticsearch.features.NodeFeature; + +import java.util.Map; + +public class TransportFeatures implements FeatureSpecification { + @Override + public Map getHistoricalFeatures() { + // transport version was introduced in 8.8.0, but we need to wait until all nodes are >8.8.0 + // to properly detect when we need to fix transport versions + return Map.of(TransportVersionsFixupListener.FIX_TRANSPORT_VERSION, Version.V_8_8_1); + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java b/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java index a54130aec95b6..e77d44e5ad71e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java @@ -9,7 +9,6 @@ package org.elasticsearch.cluster.service; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; @@ -26,6 +25,9 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.UpdateForV9; +import org.elasticsearch.features.FeatureService; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; import org.elasticsearch.threadpool.Scheduler; @@ -47,10 +49,13 @@ * due to the master node not understanding cluster state with transport versions added in 8.8.0. * Any nodes with the inferred placeholder cluster state is then refreshed with their actual transport version */ +@UpdateForV9 // this can be removed in v9 public class TransportVersionsFixupListener implements ClusterStateListener { private static final Logger logger = LogManager.getLogger(TransportVersionsFixupListener.class); + static final NodeFeature FIX_TRANSPORT_VERSION = new NodeFeature("transport.fix_transport_version"); + private static final TimeValue RETRY_TIME = TimeValue.timeValueSeconds(30); private final MasterServiceTaskQueue taskQueue; @@ -58,13 +63,20 @@ public class TransportVersionsFixupListener implements ClusterStateListener { private final Scheduler scheduler; private final Executor executor; private final Set pendingNodes = Collections.synchronizedSet(new HashSet<>()); + private final FeatureService featureService; - public TransportVersionsFixupListener(ClusterService service, ClusterAdminClient client, ThreadPool threadPool) { + public TransportVersionsFixupListener( + ClusterService service, + ClusterAdminClient client, + FeatureService featureService, + ThreadPool threadPool + ) { // there tends to be a lot of state operations on an upgrade - this one is not time-critical, // so use LOW priority. It just needs to be run at some point after upgrade. this( service.createTaskQueue("fixup-transport-versions", Priority.LOW, new TransportVersionUpdater()), client, + featureService, threadPool, threadPool.executor(ThreadPool.Names.CLUSTER_COORDINATION) ); @@ -73,11 +85,13 @@ public TransportVersionsFixupListener(ClusterService service, ClusterAdminClient TransportVersionsFixupListener( MasterServiceTaskQueue taskQueue, ClusterAdminClient client, + FeatureService featureService, Scheduler scheduler, Executor executor ) { this.taskQueue = taskQueue; this.client = client; + this.featureService = featureService; this.scheduler = scheduler; this.executor = executor; } @@ -139,7 +153,7 @@ public void clusterChanged(ClusterChangedEvent event) { // if the min node version > 8.8.0, and the cluster state has some transport versions == 8.8.0, // then refresh all inferred transport versions to their real versions // now that everything should understand cluster state with transport versions - if (event.state().nodes().getMinNodeVersion().after(Version.V_8_8_0) + if (featureService.clusterHasFeature(event.state(), FIX_TRANSPORT_VERSION) && event.state().getMinTransportVersion().equals(INFERRED_TRANSPORT_VERSION)) { // find all the relevant nodes diff --git a/server/src/main/java/org/elasticsearch/health/HealthPeriodicLogger.java b/server/src/main/java/org/elasticsearch/health/HealthPeriodicLogger.java index c7946a6e17bc6..55b03ec1192c8 100644 --- a/server/src/main/java/org/elasticsearch/health/HealthPeriodicLogger.java +++ b/server/src/main/java/org/elasticsearch/health/HealthPeriodicLogger.java @@ -36,6 +36,8 @@ import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; +import static org.elasticsearch.health.HealthStatus.GREEN; + /** * This class periodically logs the results of the Health API to the standard Elasticsearch server log file. */ @@ -202,7 +204,6 @@ static Map convertToLoggedFields(List ind // overall status final HealthStatus status = HealthStatus.merge(indicatorResults.stream().map(HealthIndicatorResult::status)); result.put(String.format(Locale.ROOT, "%s.overall.status", HEALTH_FIELD_PREFIX), status.xContentValue()); - result.put(MESSAGE_FIELD, String.format(Locale.ROOT, "health=%s", status.xContentValue())); // top-level status for each indicator indicatorResults.forEach((indicatorResult) -> { @@ -212,6 +213,18 @@ static Map convertToLoggedFields(List ind ); }); + // message field. Show the non-green indicators if they exist. + List nonGreen = indicatorResults.stream() + .filter(p -> p.status() != GREEN) + .map(HealthIndicatorResult::name) + .sorted() + .toList(); + if (nonGreen.isEmpty()) { + result.put(MESSAGE_FIELD, String.format(Locale.ROOT, "health=%s", status.xContentValue())); + } else { + result.put(MESSAGE_FIELD, String.format(Locale.ROOT, "health=%s [%s]", status.xContentValue(), String.join(",", nonGreen))); + } + return result; } diff --git a/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java b/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java index 7dd605c4c8a73..e19ee050c93a7 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java @@ -40,6 +40,7 @@ import org.apache.lucene.analysis.pt.PortugueseAnalyzer; import org.apache.lucene.analysis.ro.RomanianAnalyzer; import org.apache.lucene.analysis.ru.RussianAnalyzer; +import org.apache.lucene.analysis.sr.SerbianAnalyzer; import org.apache.lucene.analysis.sv.SwedishAnalyzer; import org.apache.lucene.analysis.th.ThaiAnalyzer; import org.apache.lucene.analysis.tr.TurkishAnalyzer; @@ -129,6 +130,7 @@ public static CharArraySet parseStemExclusion(Settings settings, CharArraySet de entry("_portuguese_", PortugueseAnalyzer.getDefaultStopSet()), entry("_romanian_", RomanianAnalyzer.getDefaultStopSet()), entry("_russian_", RussianAnalyzer.getDefaultStopSet()), + entry("_serbian_", SerbianAnalyzer.getDefaultStopSet()), entry("_sorani_", SoraniAnalyzer.getDefaultStopSet()), entry("_spanish_", SpanishAnalyzer.getDefaultStopSet()), entry("_swedish_", SwedishAnalyzer.getDefaultStopSet()), diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index ed7fab325408e..43437529cd301 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -48,6 +48,7 @@ import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.DocumentParser; @@ -100,7 +101,8 @@ public abstract class Engine implements Closeable { - public static final String SYNC_COMMIT_ID = "sync_id"; // TODO: Remove sync_id in 9.0 + @UpdateForV9 // TODO: Remove sync_id in 9.0 + public static final String SYNC_COMMIT_ID = "sync_id"; public static final String HISTORY_UUID_KEY = "history_uuid"; public static final String FORCE_MERGE_UUID_KEY = "force_merge_uuid"; public static final String MIN_RETAINED_SEQNO = "min_retained_seq_no"; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index deaac37508511..cbf2dd872da2f 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -707,7 +707,7 @@ public boolean isMultiField(String field) { */ public synchronized List reloadSearchAnalyzers(AnalysisRegistry registry, @Nullable String resource, boolean preview) throws IOException { - logger.info("reloading search analyzers"); + logger.debug("reloading search analyzers for index [{}]", indexSettings.getIndex().getName()); // TODO this should bust the cache somehow. Tracked in https://github.com/elastic/elasticsearch/issues/66722 return indexAnalyzers.reload(registry, indexSettings, resource, preview); } diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 8b6f6afb72042..fedd84ee7392b 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -2006,7 +2006,7 @@ private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier) t assert currentEngineReference.get() == null : "engine is running"; verifyNotClosed(); // we must create a new engine under mutex (see IndexShard#snapshotStoreMetadata). - final Engine newEngine = engineFactory.newReadWriteEngine(config); + final Engine newEngine = createEngine(config); onNewEngine(newEngine); currentEngineReference.set(newEngine); // We set active because we are now writing operations to the engine; this way, @@ -2021,6 +2021,22 @@ private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier) t checkAndCallWaitForEngineOrClosedShardListeners(); } + // awful hack to work around problem in CloseFollowerIndexIT + static boolean suppressCreateEngineErrors; + + private Engine createEngine(EngineConfig config) { + if (suppressCreateEngineErrors) { + try { + return engineFactory.newReadWriteEngine(config); + } catch (Error e) { + ExceptionsHelper.maybeDieOnAnotherThread(e); + throw new RuntimeException("rethrowing suppressed error", e); + } + } else { + return engineFactory.newReadWriteEngine(config); + } + } + private boolean assertSequenceNumbersInCommit() throws IOException { final SegmentInfos segmentCommitInfos = SegmentInfos.readLatestCommit(store.directory()); final Map userData = segmentCommitInfos.getUserData(); diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestService.java b/server/src/main/java/org/elasticsearch/ingest/IngestService.java index 3adaab078ad4a..3a2a810dc61b5 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestService.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestService.java @@ -206,6 +206,22 @@ public IngestService( this.taskQueue = clusterService.createTaskQueue("ingest-pipelines", Priority.NORMAL, PIPELINE_TASK_EXECUTOR); } + /** + * This copy constructor returns a copy of the given ingestService, using all of the same internal state. The returned copy is not + * registered to listen to any cluster state changes + * @param ingestService + */ + IngestService(IngestService ingestService) { + this.clusterService = ingestService.clusterService; + this.scriptService = ingestService.scriptService; + this.documentParsingObserverSupplier = ingestService.documentParsingObserverSupplier; + this.processorFactories = ingestService.processorFactories; + this.threadPool = ingestService.threadPool; + this.taskQueue = ingestService.taskQueue; + this.pipelines = ingestService.pipelines; + this.state = ingestService.state; + } + private static Map processorFactories(List ingestPlugins, Processor.Parameters parameters) { Map processorFactories = new TreeMap<>(); for (IngestPlugin ingestPlugin : ingestPlugins) { diff --git a/server/src/main/java/org/elasticsearch/ingest/SimulateIngestService.java b/server/src/main/java/org/elasticsearch/ingest/SimulateIngestService.java new file mode 100644 index 0000000000000..2f9da248b2afb --- /dev/null +++ b/server/src/main/java/org/elasticsearch/ingest/SimulateIngestService.java @@ -0,0 +1,78 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest; + +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.SimulateBulkRequest; + +import java.util.HashMap; +import java.util.Map; + +/** + * This is an implementation of IngestService that allows us to substitute pipeline definitions so that users can simulate ingest using + * pipelines that they define on the fly. + */ +public class SimulateIngestService extends IngestService { + private final Map pipelineSubstitutions; + + public SimulateIngestService(IngestService ingestService, BulkRequest request) { + super(ingestService); + if (request instanceof SimulateBulkRequest simulateBulkRequest) { + try { + pipelineSubstitutions = getPipelineSubstitutions(simulateBulkRequest.getPipelineSubstitutions(), ingestService); + } catch (Exception e) { + throw new RuntimeException(e); + } + } else { + throw new IllegalArgumentException("Expecting a SimulateBulkRequest but got " + request.getClass()); + } + } + + /** + * This transforms the pipeline substitutions from a SimulateBulkRequest into a new map, where the key is the pipelineId and the + * value is the Pipeline instance. The Pipeline is created using the Processor.Factories and the ScriptService of the given + * ingestService. + * @param rawPipelineSubstitutions The pipeline substitutions map received from a SimulateBulkRequest + * @param ingestService The ingestService beoing used + * @return A transformed version of rawPipelineSubstitutions, where the values are Pipeline objects + * @throws Exception + */ + private Map getPipelineSubstitutions( + Map> rawPipelineSubstitutions, + IngestService ingestService + ) throws Exception { + Map parsedPipelineSubstitutions = new HashMap<>(); + if (rawPipelineSubstitutions != null) { + for (Map.Entry> entry : rawPipelineSubstitutions.entrySet()) { + String pipelineId = entry.getKey(); + Pipeline pipeline = Pipeline.create( + pipelineId, + entry.getValue(), + ingestService.getProcessorFactories(), + ingestService.getScriptService() + ); + parsedPipelineSubstitutions.put(pipelineId, pipeline); + } + } + return parsedPipelineSubstitutions; + } + + /** + * This method returns the Pipeline for the given pipelineId. If a substitute definition of the pipeline has been defined for the + * current simulate, then that pipeline is returned. Otherwise, the pipeline stored in the cluster state is returned. + */ + @Override + public Pipeline getPipeline(String pipelineId) { + Pipeline pipeline = pipelineSubstitutions.get(pipelineId); + if (pipeline == null) { + pipeline = super.getPipeline(pipelineId); + } + return pipeline; + } +} diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index 8f557b4e9db5c..0dba888b91436 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -60,7 +60,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.TransportVersionsFixupListener; import org.elasticsearch.cluster.version.CompatibilityVersions; -import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.inject.Injector; import org.elasticsearch.common.inject.Key; @@ -80,6 +79,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.Tuple; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; @@ -113,7 +113,6 @@ import org.elasticsearch.indices.SystemIndexMappingUpdateService; import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.indices.analysis.AnalysisModule; -import org.elasticsearch.indices.breaker.BreakerSettings; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; @@ -254,7 +253,17 @@ static NodeConstruction prepareConstruction( SearchModule searchModule = constructor.createSearchModule(settingsModule.getSettings(), threadPool); constructor.createClientAndRegistries(settingsModule.getSettings(), threadPool, searchModule); - constructor.construct(threadPool, settingsModule, searchModule, serviceProvider, forbidPrivateIndexSettings); + ScriptService scriptService = constructor.createScriptService(settingsModule, threadPool, serviceProvider); + + constructor.construct( + threadPool, + settingsModule, + searchModule, + scriptService, + constructor.createAnalysisRegistry(), + serviceProvider, + forbidPrivateIndexSettings + ); return constructor; } catch (IOException e) { @@ -545,54 +554,59 @@ private void createClientAndRegistries(Settings settings, ThreadPool threadPool, }); } + private ScriptService createScriptService(SettingsModule settingsModule, ThreadPool threadPool, NodeServiceProvider serviceProvider) { + Settings settings = settingsModule.getSettings(); + ScriptModule scriptModule = new ScriptModule(settings, pluginsService.filterPlugins(ScriptPlugin.class).toList()); + + ScriptService scriptService = serviceProvider.newScriptService( + pluginsService, + settings, + scriptModule.engines, + scriptModule.contexts, + threadPool::absoluteTimeInMillis + ); + ScriptModule.registerClusterSettingsListeners(scriptService, settingsModule.getClusterSettings()); + modules.add(b -> { + b.bind(ScriptService.class).toInstance(scriptService); + b.bind(UpdateHelper.class).toInstance(new UpdateHelper(scriptService)); + }); + + return scriptService; + } + + private AnalysisRegistry createAnalysisRegistry() throws IOException { + AnalysisRegistry registry = new AnalysisModule( + environment, + pluginsService.filterPlugins(AnalysisPlugin.class).toList(), + pluginsService.getStablePluginRegistry() + ).getAnalysisRegistry(); + modules.bindToInstance(AnalysisRegistry.class, registry); + return registry; + } + private void construct( ThreadPool threadPool, SettingsModule settingsModule, SearchModule searchModule, + ScriptService scriptService, + AnalysisRegistry analysisRegistry, NodeServiceProvider serviceProvider, boolean forbidPrivateIndexSettings ) throws IOException { Settings settings = settingsModule.getSettings(); - final ResourceWatcherService resourceWatcherService = new ResourceWatcherService(settings, threadPool); - resourcesToClose.add(resourceWatcherService); - - final Set taskHeaders = Stream.concat( - pluginsService.filterPlugins(ActionPlugin.class).flatMap(p -> p.getTaskHeaders().stream()), - Task.HEADERS_TO_COPY.stream() - ).collect(Collectors.toSet()); - final TelemetryProvider telemetryProvider = getSinglePlugin(TelemetryPlugin.class).map(p -> p.getTelemetryProvider(settings)) .orElse(TelemetryProvider.NOOP); final Tracer tracer = telemetryProvider.getTracer(); + Set taskHeaders = Stream.concat( + pluginsService.filterPlugins(ActionPlugin.class).flatMap(p -> p.getTaskHeaders().stream()), + Task.HEADERS_TO_COPY.stream() + ).collect(Collectors.toSet()); final TaskManager taskManager = new TaskManager(settings, threadPool, taskHeaders, tracer); - final ScriptModule scriptModule = new ScriptModule(settings, pluginsService.filterPlugins(ScriptPlugin.class).toList()); - final ScriptService scriptService = serviceProvider.newScriptService( - pluginsService, - settings, - scriptModule.engines, - scriptModule.contexts, - threadPool::absoluteTimeInMillis - ); - AnalysisModule analysisModule = new AnalysisModule( - environment, - pluginsService.filterPlugins(AnalysisPlugin.class).toList(), - pluginsService.getStablePluginRegistry() - ); - - ScriptModule.registerClusterSettingsListeners(scriptService, settingsModule.getClusterSettings()); - final NetworkService networkService = new NetworkService( - pluginsService.filterPlugins(DiscoveryPlugin.class) - .map(d -> d.getCustomNameResolver(environment.settings())) - .filter(Objects::nonNull) - .toList() - ); - - List clusterPlugins = pluginsService.filterPlugins(ClusterPlugin.class).toList(); final ClusterService clusterService = new ClusterService(settings, settingsModule.getClusterSettings(), threadPool, taskManager); clusterService.addStateApplier(scriptService); resourcesToClose.add(clusterService); @@ -611,7 +625,7 @@ private void construct( threadPool, environment, scriptService, - analysisModule.getAnalysisRegistry(), + analysisRegistry, pluginsService.filterPlugins(IngestPlugin.class).toList(), client, IngestService.createGrokThreadWatchdog(environment, threadPool), @@ -625,14 +639,8 @@ private void construct( threadPool, client ); - final UsageService usageService = new UsageService(); - final List features = pluginsService.filterPlugins(SystemIndexPlugin.class).map(plugin -> { - SystemIndices.validateFeatureName(plugin.getFeatureName(), plugin.getClass().getCanonicalName()); - return SystemIndices.Feature.fromSystemIndexPlugin(plugin, settings); - }).toList(); - final SystemIndices systemIndices = new SystemIndices(features); - final ExecutorSelector executorSelector = systemIndices.getExecutorSelector(); + SystemIndices systemIndices = createSystemIndices(settings); final MonitorService monitorService = new MonitorService(settings, nodeEnvironment, threadPool); final FsHealthService fsHealthService = new FsHealthService( @@ -652,30 +660,22 @@ private void construct( final ClusterModule clusterModule = new ClusterModule( settings, clusterService, - clusterPlugins, + pluginsService.filterPlugins(ClusterPlugin.class).toList(), clusterInfoService, snapshotsInfoService, threadPool, systemIndices, - writeLoadForecaster + writeLoadForecaster, + telemetryProvider ); modules.add(clusterModule); IndicesModule indicesModule = new IndicesModule(pluginsService.filterPlugins(MapperPlugin.class).toList()); modules.add(indicesModule); - List pluginCircuitBreakers = pluginsService.filterPlugins(CircuitBreakerPlugin.class) - .map(plugin -> plugin.getCircuitBreaker(settings)) - .toList(); - final CircuitBreakerService circuitBreakerService = createCircuitBreakerService( + CircuitBreakerService circuitBreakerService = createCircuitBreakerService( settingsModule.getSettings(), - pluginCircuitBreakers, settingsModule.getClusterSettings() ); - pluginsService.filterPlugins(CircuitBreakerPlugin.class).forEach(plugin -> { - CircuitBreaker breaker = circuitBreakerService.getBreaker(plugin.getCircuitBreaker(settings).getName()); - plugin.setCircuitBreaker(breaker); - }); - resourcesToClose.add(circuitBreakerService); modules.add(new GatewayModule()); CompatibilityVersions compatibilityVersions = new CompatibilityVersions( @@ -721,23 +721,25 @@ private void construct( .flatMap(m -> m.entrySet().stream()) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + FeatureService featureService = new FeatureService(pluginsService.loadServiceProviders(FeatureSpecification.class)); + if (DiscoveryNode.isMasterNode(settings)) { clusterService.addListener(new SystemIndexMappingUpdateService(systemIndices, client)); - clusterService.addListener(new TransportVersionsFixupListener(clusterService, client.admin().cluster(), threadPool)); + clusterService.addListener( + new TransportVersionsFixupListener(clusterService, client.admin().cluster(), featureService, threadPool) + ); } final RerouteService rerouteService = new BatchedRerouteService(clusterService, clusterModule.getAllocationService()::reroute); rerouteServiceReference.set(rerouteService); clusterService.setRerouteService(rerouteService); - FeatureService featureService = new FeatureService(pluginsService.loadServiceProviders(FeatureSpecification.class)); - final IndicesService indicesService = new IndicesService( settings, pluginsService, nodeEnvironment, xContentRegistry, - analysisModule.getAnalysisRegistry(), + analysisRegistry, clusterModule.getIndexNameExpressionResolver(), indicesModule.getMapperRegistry(), namedWriteableRegistry, @@ -819,7 +821,7 @@ record PluginServiceInstances( client, clusterService, threadPool, - resourceWatcherService, + createResourceWatcherService(settings, threadPool), scriptService, xContentRegistry, environment, @@ -872,7 +874,7 @@ record PluginServiceInstances( pluginsService.filterPlugins(ActionPlugin.class).toList(), client, circuitBreakerService, - usageService, + createUsageService(), systemIndices, tracer, clusterService, @@ -881,6 +883,12 @@ record PluginServiceInstances( ); modules.add(actionModule); + final NetworkService networkService = new NetworkService( + pluginsService.filterPlugins(DiscoveryPlugin.class) + .map(d -> d.getCustomNameResolver(environment.settings())) + .filter(Objects::nonNull) + .toList() + ); final RestController restController = actionModule.getRestController(); final NetworkModule networkModule = new NetworkModule( settings, @@ -1047,7 +1055,7 @@ record PluginServiceInstances( searchModule.getFetchPhase(), responseCollectorService, circuitBreakerService, - executorSelector, + systemIndices.getExecutorSelector(), tracer ); @@ -1132,15 +1140,10 @@ record PluginServiceInstances( modules.add(b -> { b.bind(NodeService.class).toInstance(nodeService); - b.bind(ResourceWatcherService.class).toInstance(resourceWatcherService); - b.bind(CircuitBreakerService.class).toInstance(circuitBreakerService); b.bind(BigArrays.class).toInstance(bigArrays); b.bind(PageCacheRecycler.class).toInstance(pageCacheRecycler); - b.bind(ScriptService.class).toInstance(scriptService); - b.bind(AnalysisRegistry.class).toInstance(analysisModule.getAnalysisRegistry()); b.bind(IngestService.class).toInstance(ingestService); b.bind(IndexingPressure.class).toInstance(indexingLimits); - b.bind(UsageService.class).toInstance(usageService); b.bind(AggregationUsageService.class).toInstance(searchModule.getValuesSourceRegistry().getUsageService()); b.bind(MetadataUpgrader.class).toInstance(metadataUpgrader); b.bind(MetaStateService.class).toInstance(metaStateService); @@ -1156,7 +1159,6 @@ record PluginServiceInstances( b.bind(Transport.class).toInstance(transport); b.bind(TransportService.class).toInstance(transportService); b.bind(NetworkService.class).toInstance(networkService); - b.bind(UpdateHelper.class).toInstance(new UpdateHelper(scriptService)); b.bind(IndexMetadataVerifier.class).toInstance(indexMetadataVerifier); b.bind(ClusterInfoService.class).toInstance(clusterInfoService); b.bind(SnapshotsInfoService.class).toInstance(snapshotsInfoService); @@ -1175,9 +1177,7 @@ record PluginServiceInstances( b.bind(RerouteService.class).toInstance(rerouteService); b.bind(ShardLimitValidator.class).toInstance(shardLimitValidator); b.bind(FsHealthService.class).toInstance(fsHealthService); - b.bind(SystemIndices.class).toInstance(systemIndices); b.bind(PluginShutdownService.class).toInstance(pluginShutdownService); - b.bind(ExecutorSelector.class).toInstance(executorSelector); b.bind(IndexSettingProviders.class).toInstance(indexSettingProviders); b.bind(DesiredNodesSettingsValidator.class).toInstance(new DesiredNodesSettingsValidator()); b.bind(HealthNodeTaskExecutor.class).toInstance(healthNodeTaskExecutor); @@ -1199,6 +1199,33 @@ record PluginServiceInstances( postInjection(clusterModule, actionModule, clusterService, transportService, featureService); } + private UsageService createUsageService() { + UsageService usageService = new UsageService(); + modules.bindToInstance(UsageService.class, usageService); + return usageService; + } + + private SystemIndices createSystemIndices(Settings settings) { + List features = pluginsService.filterPlugins(SystemIndexPlugin.class).map(plugin -> { + SystemIndices.validateFeatureName(plugin.getFeatureName(), plugin.getClass().getCanonicalName()); + return SystemIndices.Feature.fromSystemIndexPlugin(plugin, settings); + }).toList(); + + SystemIndices systemIndices = new SystemIndices(features); + modules.add(b -> { + b.bind(SystemIndices.class).toInstance(systemIndices); + b.bind(ExecutorSelector.class).toInstance(systemIndices.getExecutorSelector()); + }); + return systemIndices; + } + + private ResourceWatcherService createResourceWatcherService(Settings settings, ThreadPool threadPool) { + ResourceWatcherService resourceWatcherService = new ResourceWatcherService(settings, threadPool); + resourcesToClose.add(resourceWatcherService); + modules.bindToInstance(ResourceWatcherService.class, resourceWatcherService); + return resourceWatcherService; + } + private Module loadDiagnosticServices( Settings settings, Coordinator coordinator, @@ -1320,21 +1347,31 @@ private Supplier getDocumentParsingObserverSupplier() { } /** - * Creates a new {@link CircuitBreakerService} based on the settings provided. + * Create and initialize a new {@link CircuitBreakerService} based on the settings provided. * * @see Node#BREAKER_TYPE_KEY */ - private static CircuitBreakerService createCircuitBreakerService( - Settings settings, - List breakerSettings, - ClusterSettings clusterSettings - ) { + private CircuitBreakerService createCircuitBreakerService(Settings settings, ClusterSettings clusterSettings) { + var pluginBreakers = pluginsService.filterPlugins(CircuitBreakerPlugin.class) + .map(p -> Tuple.tuple(p, p.getCircuitBreaker(settings))) + .toList(); + String type = Node.BREAKER_TYPE_KEY.get(settings); - return switch (type) { - case "hierarchy" -> new HierarchyCircuitBreakerService(settings, breakerSettings, clusterSettings); + CircuitBreakerService circuitBreakerService = switch (type) { + case "hierarchy" -> new HierarchyCircuitBreakerService( + settings, + pluginBreakers.stream().map(Tuple::v2).toList(), + clusterSettings + ); case "none" -> new NoneCircuitBreakerService(); default -> throw new IllegalArgumentException("Unknown circuit breaker type [" + type + "]"); }; + resourcesToClose.add(circuitBreakerService); + modules.bindToInstance(CircuitBreakerService.class, circuitBreakerService); + + pluginBreakers.forEach(t -> t.v1().setCircuitBreaker(circuitBreakerService.getBreaker(t.v2().getName()))); + + return circuitBreakerService; } /** diff --git a/server/src/main/java/org/elasticsearch/readiness/ReadinessService.java b/server/src/main/java/org/elasticsearch/readiness/ReadinessService.java index 774d47b583686..7f7a55762bf08 100644 --- a/server/src/main/java/org/elasticsearch/readiness/ReadinessService.java +++ b/server/src/main/java/org/elasticsearch/readiness/ReadinessService.java @@ -242,8 +242,11 @@ public void clusterChanged(ClusterChangedEvent event) { this.shuttingDown = shutdownNodeIds.contains(clusterState.nodes().getLocalNodeId()); if (shuttingDown) { - setReady(false); - logger.info("marking node as not ready because it's shutting down"); + // only disable the probe and log if the probe is running + if (ready()) { + setReady(false); + logger.info("marking node as not ready because it's shutting down"); + } } else { if (clusterState.nodes().getLocalNodeId().equals(clusterState.nodes().getMasterNodeId())) { setReady(fileSettingsApplied); diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index a53674882c84d..a0f259f95f14c 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -177,6 +177,9 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp protected final ThreadPool threadPool; + public static final String STATELESS_SHARD_THREAD_NAME = "stateless_shard"; + public static final String STATELESS_TRANSLOG_THREAD_NAME = "stateless_translog"; + public static final String SNAPSHOT_PREFIX = "snap-"; public static final String INDEX_FILE_PREFIX = "index-"; @@ -1975,7 +1978,14 @@ public long getRestoreThrottleTimeInNanos() { } protected void assertSnapshotOrGenericThread() { - assert ThreadPool.assertCurrentThreadPool(ThreadPool.Names.SNAPSHOT, ThreadPool.Names.SNAPSHOT_META, ThreadPool.Names.GENERIC); + // The Stateless plugin adds custom thread pools for object store operations + assert ThreadPool.assertCurrentThreadPool( + ThreadPool.Names.SNAPSHOT, + ThreadPool.Names.SNAPSHOT_META, + ThreadPool.Names.GENERIC, + STATELESS_SHARD_THREAD_NAME, + STATELESS_TRANSLOG_THREAD_NAME + ); } @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java index b6e1240a3f85a..a8f6fa325b468 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.RestApiVersion; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -50,6 +51,7 @@ @ServerlessScope(Scope.PUBLIC) public class RestGetAliasesAction extends BaseRestHandler { + @UpdateForV9 // reject the deprecated ?local parameter private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(RestGetAliasesAction.class); @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/ingest/RestSimulateIngestAction.java b/server/src/main/java/org/elasticsearch/rest/action/ingest/RestSimulateIngestAction.java new file mode 100644 index 0000000000000..e0d9dd95206cf --- /dev/null +++ b/server/src/main/java/org/elasticsearch/rest/action/ingest/RestSimulateIngestAction.java @@ -0,0 +1,179 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.rest.action.ingest; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.bulk.SimulateBulkAction; +import org.elasticsearch.action.bulk.SimulateBulkRequest; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.ingest.ConfigurationUtils; +import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.search.fetch.subphase.FetchSourceContext; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestRequest.Method.POST; + +/** + * This is the REST endpoint for the simulate ingest API. This API executes all pipelines for a document (or documents) that would be + * executed if that document were sent to the given index. The JSON that would be indexed is returned to the user, along with the list of + * pipelines that were executed. The API allows the user to optionally send in substitute definitions for pipelines so that changes can be + * tried out without actually modifying the cluster state. + */ +@ServerlessScope(Scope.PUBLIC) +public class RestSimulateIngestAction extends BaseRestHandler { + + @Override + public List routes() { + return List.of( + new Route(GET, "/_ingest/_simulate"), + new Route(POST, "/_ingest/_simulate"), + new Route(GET, "/_ingest/{index}/_simulate"), + new Route(POST, "/_ingest/{index}/_simulate") + ); + } + + @Override + public String getName() { + return "ingest_simulate_ingest_action"; + } + + @Override + @SuppressWarnings("unchecked") + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + String defaultIndex = request.param("index"); + FetchSourceContext defaultFetchSourceContext = FetchSourceContext.parseFromRestRequest(request); + String defaultPipeline = request.param("pipeline"); + Tuple sourceTuple = request.contentOrSourceParam(); + Map sourceMap = XContentHelper.convertToMap(sourceTuple.v2(), false, sourceTuple.v1()).v2(); + SimulateBulkRequest bulkRequest = new SimulateBulkRequest( + (Map>) sourceMap.remove("pipeline_substitutions") + ); + BytesReference transformedData = convertToBulkRequestXContentBytes(sourceMap); + bulkRequest.add( + transformedData, + defaultIndex, + null, + defaultFetchSourceContext, + defaultPipeline, + null, + true, + true, + request.getXContentType(), + request.getRestApiVersion() + ); + return channel -> client.execute(SimulateBulkAction.INSTANCE, bulkRequest, new SimulateIngestRestToXContentListener(channel)); + } + + /* + * The simulate ingest API is intended to have inputs and outputs that are formatted similarly to the simulate pipeline API for the + * sake of consistency. But internally it uses the same code as the _bulk API, so that we have confidence that we are simulating what + * really happens on ingest. This method transforms simulate-style inputs into an input that the bulk API can accept. + * Non-private for unit testing + */ + static BytesReference convertToBulkRequestXContentBytes(Map sourceMap) throws IOException { + List> docs = ConfigurationUtils.readList(null, null, sourceMap, "docs"); + if (docs.isEmpty()) { + throw new IllegalArgumentException("must specify at least one document in [docs]"); + } + ByteBuffer[] buffers = new ByteBuffer[2 * docs.size()]; + int bufferCount = 0; + for (Map doc : docs) { + if ((doc != null) == false) { + throw new IllegalArgumentException("malformed [docs] section, should include an inner object"); + } + Map document = ConfigurationUtils.readMap(null, null, doc, "_source"); + String index = ConfigurationUtils.readOptionalStringProperty(null, null, doc, IngestDocument.Metadata.INDEX.getFieldName()); + String id = ConfigurationUtils.readOptionalStringProperty(null, null, doc, IngestDocument.Metadata.ID.getFieldName()); + XContentBuilder actionXContentBuilder = XContentFactory.contentBuilder(XContentType.JSON).lfAtEnd(); + actionXContentBuilder.startObject().field("index").startObject(); + if (index != null) { + actionXContentBuilder.field("_index", index); + } + if (id != null) { + actionXContentBuilder.field("_id", id); + } + actionXContentBuilder.endObject().endObject(); + buffers[bufferCount++] = ByteBuffer.wrap(BytesReference.bytes(actionXContentBuilder).toBytesRef().bytes); + XContentBuilder dataXContentBuilder = XContentFactory.contentBuilder(XContentType.JSON).lfAtEnd(); + dataXContentBuilder.startObject(); + for (String key : document.keySet()) { + dataXContentBuilder.field(key, document.get(key)); + } + dataXContentBuilder.endObject(); + buffers[bufferCount++] = ByteBuffer.wrap(BytesReference.bytes(dataXContentBuilder).toBytesRef().bytes); + } + return BytesReference.fromByteBuffers(buffers); + } + + /* + * The simulate ingest API is intended to have inputs and outputs that are formatted similarly to the simulate pipeline API for the + * sake of consistency. But internally it uses the same code as the _bulk API, so that we have confidence that we are simulating what + * really happens on ingest. This class is used in place of RestToXContentListener to transform simulate-style outputs into an + * simulate-style xcontent. + * Non-private for unit testing + */ + static class SimulateIngestRestToXContentListener extends RestToXContentListener { + + SimulateIngestRestToXContentListener(RestChannel channel) { + super(channel); + } + + @Override + public RestResponse buildResponse(BulkResponse response, XContentBuilder builder) throws Exception { + assert response.isFragment() == false; + toXContent(response, builder, channel.request()); + RestStatus restStatus = statusFunction.apply(response); + return new RestResponse(restStatus, builder); + } + + private static void toXContent(BulkResponse response, XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + builder.startArray("docs"); + for (BulkItemResponse item : response) { + builder.startObject(); + builder.startObject("doc"); + if (item.isFailed()) { + builder.field("_id", item.getFailure().getId()); + builder.field("_index", item.getFailure().getIndex()); + builder.startObject("error"); + ElasticsearchException.generateThrowableXContent(builder, params, item.getFailure().getCause()); + builder.endObject(); + } else { + item.getResponse().innerToXContent(builder, params); + } + builder.endObject(); + builder.endObject(); + } + builder.endArray(); + builder.endObject(); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java index a4f641fd6f071..bf73234d6fe57 100644 --- a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java @@ -123,7 +123,6 @@ final class DefaultSearchContext extends SearchContext { private Query query; private ParsedQuery postFilter; private Query aliasFilter; - private int[] docIdsToLoad; private SearchContextAggregations aggregations; private SearchHighlightContext highlight; private SuggestionSearchContext suggest; @@ -149,46 +148,54 @@ final class DefaultSearchContext extends SearchContext { this.readerContext = readerContext; this.request = request; this.fetchPhase = fetchPhase; - this.searchType = request.searchType(); - this.shardTarget = shardTarget; - this.indexService = readerContext.indexService(); - this.indexShard = readerContext.indexShard(); - - Engine.Searcher engineSearcher = readerContext.acquireSearcher("search"); - if (executor == null) { - this.searcher = new ContextIndexSearcher( - engineSearcher.getIndexReader(), - engineSearcher.getSimilarity(), - engineSearcher.getQueryCache(), - engineSearcher.getQueryCachingPolicy(), - lowLevelCancellation - ); - } else { - this.searcher = new ContextIndexSearcher( - engineSearcher.getIndexReader(), - engineSearcher.getSimilarity(), - engineSearcher.getQueryCache(), - engineSearcher.getQueryCachingPolicy(), - lowLevelCancellation, - executor, - maximumNumberOfSlices, - minimumDocsPerSlice + boolean success = false; + try { + this.searchType = request.searchType(); + this.shardTarget = shardTarget; + this.indexService = readerContext.indexService(); + this.indexShard = readerContext.indexShard(); + + Engine.Searcher engineSearcher = readerContext.acquireSearcher("search"); + if (executor == null) { + this.searcher = new ContextIndexSearcher( + engineSearcher.getIndexReader(), + engineSearcher.getSimilarity(), + engineSearcher.getQueryCache(), + engineSearcher.getQueryCachingPolicy(), + lowLevelCancellation + ); + } else { + this.searcher = new ContextIndexSearcher( + engineSearcher.getIndexReader(), + engineSearcher.getSimilarity(), + engineSearcher.getQueryCache(), + engineSearcher.getQueryCachingPolicy(), + lowLevelCancellation, + executor, + maximumNumberOfSlices, + minimumDocsPerSlice + ); + } + releasables.addAll(List.of(engineSearcher, searcher)); + + this.relativeTimeSupplier = relativeTimeSupplier; + this.timeout = timeout; + searchExecutionContext = indexService.newSearchExecutionContext( + request.shardId().id(), + request.shardRequestIndex(), + searcher, + request::nowInMillis, + shardTarget.getClusterAlias(), + request.getRuntimeMappings() ); + queryBoost = request.indexBoost(); + this.lowLevelCancellation = lowLevelCancellation; + success = true; + } finally { + if (success == false) { + close(); + } } - releasables.addAll(List.of(engineSearcher, searcher)); - - this.relativeTimeSupplier = relativeTimeSupplier; - this.timeout = timeout; - searchExecutionContext = indexService.newSearchExecutionContext( - request.shardId().id(), - request.shardRequestIndex(), - searcher, - request::nowInMillis, - shardTarget.getClusterAlias(), - request.getRuntimeMappings() - ); - queryBoost = request.indexBoost(); - this.lowLevelCancellation = lowLevelCancellation; } @Override @@ -719,17 +726,6 @@ public void seqNoAndPrimaryTerm(boolean seqNoAndPrimaryTerm) { this.seqAndPrimaryTerm = seqNoAndPrimaryTerm; } - @Override - public int[] docIdsToLoad() { - return docIdsToLoad; - } - - @Override - public SearchContext docIdsToLoad(int[] docIdsToLoad) { - this.docIdsToLoad = docIdsToLoad; - return this; - } - @Override public DfsSearchResult dfsResult() { return dfsResult; diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 73350d60b256c..b64a4b749669e 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -707,8 +707,7 @@ private QueryFetchSearchResult executeFetchPhase(ReaderContext reader, SearchCon Releasable scope = tracer.withScope(SpanId.forTask(context.getTask())); SearchOperationListenerExecutor executor = new SearchOperationListenerExecutor(context, true, afterQueryTime) ) { - shortcutDocIdsToLoad(context); - fetchPhase.execute(context); + fetchPhase.execute(context, shortcutDocIdsToLoad(context)); if (reader.singleSession()) { freeReaderContext(reader.id()); } @@ -857,11 +856,10 @@ public void executeFetchPhase(ShardFetchRequest request, SearchShardTask task, A } searchContext.assignRescoreDocIds(readerContext.getRescoreDocIds(request.getRescoreDocIds())); searchContext.searcher().setAggregatedDfs(readerContext.getAggregatedDfs(request.getAggregatedDfs())); - searchContext.docIdsToLoad(request.docIds()); try ( SearchOperationListenerExecutor executor = new SearchOperationListenerExecutor(searchContext, true, System.nanoTime()) ) { - fetchPhase.execute(searchContext); + fetchPhase.execute(searchContext, request.docIds()); if (readerContext.singleSession()) { freeReaderContext(request.contextId()); } @@ -1464,7 +1462,7 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc * Shortcut ids to load, we load only "from" and up to "size". The phase controller * handles this as well since the result is always size * shards for Q_T_F */ - private static void shortcutDocIdsToLoad(SearchContext context) { + private static int[] shortcutDocIdsToLoad(SearchContext context) { final int[] docIdsToLoad; int docsOffset = 0; final Suggest suggest = context.queryResult().suggest(); @@ -1502,7 +1500,7 @@ private static void shortcutDocIdsToLoad(SearchContext context) { docIdsToLoad[docsOffset++] = option.getDoc().doc; } } - context.docIdsToLoad(docIdsToLoad); + return docIdsToLoad; } private static void processScroll(InternalScrollSearchRequest request, SearchContext context) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java b/server/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java index d950706b46b82..ecfeff45140fb 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java @@ -388,7 +388,6 @@ private static Comparator comparingKeys() { /** * @return compare by {@link Bucket#getKey()} that will be in the bucket once it is reduced */ - @SuppressWarnings("unchecked") private static Comparator> comparingDelayedKeys() { return DelayedBucket::compareKey; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/MultiBucketCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/MultiBucketCollector.java index e98762f462243..b956658f1226d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/MultiBucketCollector.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/MultiBucketCollector.java @@ -183,17 +183,16 @@ public LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx) ); } } - switch (leafCollectors.size()) { - case 0: + return switch (leafCollectors.size()) { + case 0 -> { if (terminateIfNoop) { throw new CollectionTerminatedException(); } - return LeafBucketCollector.NO_OP_COLLECTOR; - case 1: - return leafCollectors.get(0); - default: - return new MultiLeafBucketCollector(leafCollectors, cacheScores); - } + yield LeafBucketCollector.NO_OP_COLLECTOR; + } + case 1 -> leafCollectors.get(0); + default -> new MultiLeafBucketCollector(leafCollectors, cacheScores); + }; } private static class MultiLeafBucketCollector extends LeafBucketCollector { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/TopBucketBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/TopBucketBuilder.java index 9980918badfd5..61427b446cb6d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/TopBucketBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/TopBucketBuilder.java @@ -102,7 +102,7 @@ static class PriorityQueueTopBucketBuilder= ArrayUtil.MAX_ARRAY_LENGTH) { throw new IllegalArgumentException("can't reduce more than [" + ArrayUtil.MAX_ARRAY_LENGTH + "] buckets"); } - queue = new PriorityQueue>(size) { + queue = new PriorityQueue<>(size) { private final Comparator> comparator = order.delayedBucketComparator(); @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java index b33abb0f95824..7c3c6f8397979 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java @@ -40,11 +40,7 @@ * this collector. */ public class BestBucketsDeferringCollector extends DeferringBucketCollector { - static class Entry { - final AggregationExecutionContext aggCtx; - final PackedLongValues docDeltas; - final PackedLongValues buckets; - + record Entry(AggregationExecutionContext aggCtx, PackedLongValues docDeltas, PackedLongValues buckets) { Entry(AggregationExecutionContext aggCtx, PackedLongValues docDeltas, PackedLongValues buckets) { this.aggCtx = Objects.requireNonNull(aggCtx); this.docDeltas = Objects.requireNonNull(docDeltas); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java index dff95332d3f16..cee90f55597b2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java @@ -256,22 +256,19 @@ private void finishLeaf() { /** Return true if the provided field may have multiple values per document in the leaf **/ private static boolean isMaybeMultivalued(LeafReaderContext context, SortField sortField) throws IOException { SortField.Type type = IndexSortConfig.getSortFieldType(sortField); - switch (type) { - case STRING: + return switch (type) { + case STRING -> { final SortedSetDocValues v1 = context.reader().getSortedSetDocValues(sortField.getField()); - return v1 != null && DocValues.unwrapSingleton(v1) == null; - - case DOUBLE: - case FLOAT: - case LONG: - case INT: + yield v1 != null && DocValues.unwrapSingleton(v1) == null; + } + case DOUBLE, FLOAT, LONG, INT -> { final SortedNumericDocValues v2 = context.reader().getSortedNumericDocValues(sortField.getField()); - return v2 != null && DocValues.unwrapSingleton(v2) == null; - - default: + yield v2 != null && DocValues.unwrapSingleton(v2) == null; + } + default -> // we have no clue whether the field is multi-valued or not so we assume it is. - return true; - } + true; + }; } /** @@ -631,13 +628,5 @@ public void collectDebugInfo(BiConsumer add) { } } - private static class Entry { - final AggregationExecutionContext aggCtx; - final DocIdSet docIdSet; - - Entry(AggregationExecutionContext aggCtx, DocIdSet docIdSet) { - this.aggCtx = aggCtx; - this.docIdSet = docIdSet; - } - } + private record Entry(AggregationExecutionContext aggCtx, DocIdSet docIdSet) {} } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueue.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueue.java index 2c4eb02dfa6d6..a6aab3c52a2a3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueue.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueue.java @@ -29,7 +29,7 @@ */ final class CompositeValuesCollectorQueue extends PriorityQueue implements Releasable { private class Slot { - int value; + final int value; Slot(int initial) { this.value = initial; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java index 927104a92deb2..f2c601e412f92 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java @@ -628,10 +628,10 @@ public Object get(Object key) { @Override public Set> entrySet() { - return new AbstractSet>() { + return new AbstractSet<>() { @Override public Iterator> iterator() { - return new Iterator>() { + return new Iterator<>() { int pos = 0; @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java index 3d79509ad9377..ca9968834e611 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java @@ -43,7 +43,7 @@ class LongValuesSource extends SingleDimensionValuesSource { private final CheckedFunction docValuesFunc; private final LongUnaryOperator rounding; - private BitArray bits; + private final BitArray bits; private LongArray values; private long currentValue; private boolean missingCurrentValue; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregator.java index e0792fca6c28f..1ea6545d8f088 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregator.java @@ -140,7 +140,7 @@ public static FiltersAggregator build( Map metadata ) throws IOException { FilterByFilterAggregator.AdapterBuilder filterByFilterBuilder = - new FilterByFilterAggregator.AdapterBuilder( + new FilterByFilterAggregator.AdapterBuilder<>( name, keyed, keyedBucket, @@ -214,7 +214,7 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I (offsetInOwningOrd, docCount, subAggregationResults) -> { if (offsetInOwningOrd < filters.size()) { return new InternalFilters.InternalBucket( - filters.get(offsetInOwningOrd).key().toString(), + filters.get(offsetInOwningOrd).key(), docCount, subAggregationResults, keyed, @@ -232,13 +232,7 @@ public InternalAggregation buildEmptyAggregation() { InternalAggregations subAggs = buildEmptySubAggregations(); List buckets = new ArrayList<>(filters.size() + (otherBucketKey == null ? 0 : 1)); for (QueryToFilterAdapter filter : filters) { - InternalFilters.InternalBucket bucket = new InternalFilters.InternalBucket( - filter.key().toString(), - 0, - subAggs, - keyed, - keyedBucket - ); + InternalFilters.InternalBucket bucket = new InternalFilters.InternalBucket(filter.key(), 0, subAggs, keyed, keyedBucket); buckets.add(bucket); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileUtils.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileUtils.java index e5fd0aa10ced2..52f63bf24be11 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileUtils.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileUtils.java @@ -203,7 +203,7 @@ public static int[] parseHash(String hashAsString) { public static String stringEncode(long hash) { final int[] res = parseHash(hash); validateZXY(res[0], res[1], res[2]); - return "" + res[0] + "/" + res[1] + "/" + res[2]; + return res[0] + "/" + res[1] + "/" + res[2]; } /** diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java index 2371506082f1b..48b361592519c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java @@ -79,7 +79,7 @@ public String getPreferredName() { return preferredName; } - private String preferredName; + private final String preferredName; IntervalTypeEnum(String preferredName) { this.preferredName = preferredName; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedVariableWidthHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedVariableWidthHistogram.java index ba33373354f3e..de7f29d785c75 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedVariableWidthHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedVariableWidthHistogram.java @@ -36,7 +36,7 @@ public List getBuckets() { return buckets; } - private static ObjectParser PARSER = new ObjectParser<>( + private static final ObjectParser PARSER = new ObjectParser<>( ParsedVariableWidthHistogram.class.getSimpleName(), true, ParsedVariableWidthHistogram::new diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregator.java index 516c9d91a7b65..945ecd7424de3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregator.java @@ -82,7 +82,7 @@ private class BufferValuesPhase extends CollectionPhase { private DoubleArray buffer; private int bufferSize; - private int bufferLimit; + private final int bufferLimit; private MergeBucketsPhase mergeBucketsPhase; BufferValuesPhase(int bufferLimit) { @@ -97,7 +97,7 @@ public CollectionPhase collectValue(LeafBucketCollector sub, int doc, double val if (bufferSize < bufferLimit) { // Add to the buffer i.e store the doc in a new bucket buffer = bigArrays().grow(buffer, bufferSize + 1); - buffer.set((long) bufferSize, val); + buffer.set(bufferSize, val); collectBucket(sub, doc, bufferSize); bufferSize += 1; } @@ -432,7 +432,6 @@ public void close() { // Aggregation parameters private final int numBuckets; private final int shardSize; - private final int bufferLimit; private CollectionPhase collector; @@ -455,9 +454,8 @@ public void close() { this.valuesSource = (ValuesSource.Numeric) valuesSourceConfig.getValuesSource(); this.formatter = valuesSourceConfig.format(); this.shardSize = shardSize; - this.bufferLimit = initialBuffer; - collector = new BufferValuesPhase(this.bufferLimit); + collector = new BufferValuesPhase(initialBuffer); String scoringAgg = subAggsNeedScore(); String nestedAgg = descendsFromNestedAggregator(parent); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java index 7c89061ea32f2..b47259360f263 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java @@ -383,8 +383,16 @@ public static FromFilters adaptIntoFiltersOrNull( return null; } boolean wholeNumbersOnly = false == ((ValuesSource.Numeric) valuesSourceConfig.getValuesSource()).isFloatingPoint(); - FilterByFilterAggregator.AdapterBuilder> filterByFilterBuilder = new FilterByFilterAggregator.AdapterBuilder< - FromFilters>(name, false, false, null, context, parent, cardinality, metadata) { + FilterByFilterAggregator.AdapterBuilder> filterByFilterBuilder = new FilterByFilterAggregator.AdapterBuilder<>( + name, + false, + false, + null, + context, + parent, + cardinality, + metadata + ) { @Override protected FromFilters adapt(CheckedFunction delegate) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollector.java index 5bca7718c9e2a..1344604a8d39c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollector.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollector.java @@ -45,7 +45,7 @@ public class BestDocsDeferringCollector extends DeferringBucketCollector impleme private final List entries = new ArrayList<>(); private BucketCollector deferred; private ObjectArray perBucketSamples; - private int shardSize; + private final int shardSize; private PerSegmentCollects perSegCollector; private final BigArrays bigArrays; private final Consumer circuitBreakerConsumer; @@ -210,7 +210,7 @@ public int getDocCount() { } class PerSegmentCollects extends Scorable { - private AggregationExecutionContext aggCtx; + private final AggregationExecutionContext aggCtx; int maxDocId = Integer.MIN_VALUE; private float currentScore; private int currentDocId = -1; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractInternalTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractInternalTerms.java index 542fcc84a6411..3f4ceda326140 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractInternalTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractInternalTerms.java @@ -204,7 +204,7 @@ protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurrent b) { if (lastBucket != null && cmp.compare(top.current(), lastBucket) != 0) { // the key changed so bundle up the last key's worth of buckets boolean shouldContinue = sink.apply( - new DelayedBucket(AbstractInternalTerms.this::reduceBucket, reduceContext, sameTermBuckets) + new DelayedBucket<>(AbstractInternalTerms.this::reduceBucket, reduceContext, sameTermBuckets) ); if (false == shouldContinue) { return; @@ -228,7 +228,7 @@ protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurrent b) { } if (sameTermBuckets.isEmpty() == false) { - sink.apply(new DelayedBucket(AbstractInternalTerms.this::reduceBucket, reduceContext, sameTermBuckets)); + sink.apply(new DelayedBucket<>(AbstractInternalTerms.this::reduceBucket, reduceContext, sameTermBuckets)); } } @@ -249,7 +249,7 @@ private void reduceLegacy( } for (List sameTermBuckets : bucketMap.values()) { boolean shouldContinue = sink.apply( - new DelayedBucket(AbstractInternalTerms.this::reduceBucket, reduceContext, sameTermBuckets) + new DelayedBucket<>(AbstractInternalTerms.this::reduceBucket, reduceContext, sameTermBuckets) ); if (false == shouldContinue) { return; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/IncludeExclude.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/IncludeExclude.java index 70f258e523527..524c648215345 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/IncludeExclude.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/IncludeExclude.java @@ -170,7 +170,7 @@ public int hashCode() { private Set valids; private Set invalids; - private Long spare = new Long(0); + private final Long spare = new Long(0); private SetBackedLongFilter(int numValids, int numInvalids) { if (numValids > 0) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongKeyedBucketOrds.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongKeyedBucketOrds.java index 279625654e734..6b21b11db5015 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongKeyedBucketOrds.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongKeyedBucketOrds.java @@ -130,8 +130,8 @@ public Iterator keyOrderedIterator(long owningBucketOrd) { } } Iterator toReturn = new Iterator<>() { - Iterator wrapped = keySet.iterator(); - long filterOrd = owningBucketOrd; + final Iterator wrapped = keySet.iterator(); + final long filterOrd = owningBucketOrd; long next; boolean hasNext = true; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregatorFromFilters.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregatorFromFilters.java index 0e0db3ab5054f..949685ec29487 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregatorFromFilters.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregatorFromFilters.java @@ -69,16 +69,7 @@ static StringTermsAggregatorFromFilters adaptIntoFiltersOrNull( return null; } FilterByFilterAggregator.AdapterBuilder filterByFilterBuilder = - new FilterByFilterAggregator.AdapterBuilder( - name, - false, - false, - null, - context, - parent, - cardinality, - metadata - ) { + new FilterByFilterAggregator.AdapterBuilder<>(name, false, false, null, context, parent, cardinality, metadata) { @Override protected StringTermsAggregatorFromFilters adapt( CheckedFunction delegate @@ -164,7 +155,7 @@ protected InternalAggregation adapt(InternalAggregation delegateResult) throws I } TermsEnum terms = valuesSupplier.get().termsEnum(); if (filters.getBuckets().size() > bucketCountThresholds.getShardSize()) { - PriorityQueue queue = new PriorityQueue(bucketCountThresholds.getShardSize()) { + PriorityQueue queue = new PriorityQueue<>(bucketCountThresholds.getShardSize()) { private final Comparator comparator = order.comparator(); @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java index 31c6a4a7e0430..bc407cd1e4761 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java @@ -468,7 +468,7 @@ Aggregator create( && ordinalsValuesSource.supportsGlobalOrdinalsMapping() && // we use the static COLLECT_SEGMENT_ORDS to allow tests to force specific optimizations - (COLLECT_SEGMENT_ORDS != null ? COLLECT_SEGMENT_ORDS.booleanValue() : ratio <= 0.5 && maxOrd <= 2048)) { + (COLLECT_SEGMENT_ORDS != null ? COLLECT_SEGMENT_ORDS : ratio <= 0.5 && maxOrd <= 2048)) { /* * We can use the low cardinality execution mode iff this aggregator: * - has no sub-aggregator AND @@ -505,7 +505,7 @@ Aggregator create( * is only possible if we're collecting from a single * bucket. */ - remapGlobalOrds = REMAP_GLOBAL_ORDS.booleanValue(); + remapGlobalOrds = REMAP_GLOBAL_ORDS; } else { remapGlobalOrds = true; if (includeExclude == null diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorFactory.java index f27efafaf64cf..bbeebf858073a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorFactory.java @@ -111,7 +111,7 @@ public static ExecutionMode fromString(String value) { } } - boolean isHeuristicBased; + final boolean isHeuristicBased; ExecutionMode(boolean isHeuristicBased) { this.isHeuristicBased = isHeuristicBased; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksAggregator.java index b44bc69ae68e6..d04a3744df4ff 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksAggregator.java @@ -54,7 +54,7 @@ public double metric(String name, long bucketOrd) { if (state == null) { return Double.NaN; } else { - return InternalHDRPercentileRanks.percentileRank(state, Double.valueOf(name)); + return InternalHDRPercentileRanks.percentileRank(state, Double.parseDouble(name)); } } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HyperLogLogPlusPlus.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HyperLogLogPlusPlus.java index 08d0907c2a1bd..30225263eb8b4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HyperLogLogPlusPlus.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HyperLogLogPlusPlus.java @@ -244,7 +244,7 @@ private static class HyperLogLogIterator implements AbstractHyperLogLog.RunLenIt private final HyperLogLog hll; int pos; - long start; + final long start; private byte value; HyperLogLogIterator(HyperLogLog hll, long bucket) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedPercentiles.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedPercentiles.java index d1b0f03904ef9..3af30aa16f094 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedPercentiles.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedPercentiles.java @@ -111,7 +111,7 @@ protected static void declarePercentilesFields(ObjectParser 0) { - double key = Double.valueOf(parser.currentName().substring(0, i)); + double key = Double.parseDouble(parser.currentName().substring(0, i)); aggregation.addPercentileAsString(key, parser.text()); } else { aggregation.addPercentile(Double.valueOf(parser.currentName()), Double.valueOf(parser.text())); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregator.java index 5290aac3e055d..7e749b06442f6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregator.java @@ -158,7 +158,6 @@ public void doClose() { private class State { private final ScriptedMetricAggContexts.MapScript.LeafFactory mapScript; - private final Map mapScriptParamsForState; private final Map combineScriptParamsForState; private final Map aggState; private MapScript leafMapScript; @@ -166,7 +165,7 @@ private class State { State() { // Its possible for building the initial state to mutate the parameters as a side effect Map aggParamsForState = ScriptedMetricAggregatorFactory.deepCopyParams(aggParams); - mapScriptParamsForState = ScriptedMetricAggregatorFactory.mergeParams(aggParamsForState, mapScriptParams); + Map mapScriptParamsForState = ScriptedMetricAggregatorFactory.mergeParams(aggParamsForState, mapScriptParams); combineScriptParamsForState = ScriptedMetricAggregatorFactory.mergeParams(aggParamsForState, combineScriptParams); aggState = newInitialState(ScriptedMetricAggregatorFactory.mergeParams(aggParamsForState, initScriptParams)); mapScript = mapScriptFactory.newFactory( diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksAggregator.java index 71082d7abc29c..8328f25a5cab0 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksAggregator.java @@ -54,7 +54,7 @@ public double metric(String name, long bucketOrd) { if (state == null) { return Double.NaN; } else { - return InternalTDigestPercentileRanks.percentileRank(state, Double.valueOf(name)); + return InternalTDigestPercentileRanks.percentileRank(state, Double.parseDouble(name)); } } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregator.java index 55cd1efa40e0d..75f5c472c6665 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregator.java @@ -191,8 +191,7 @@ public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOE for (int i = 0; i < topDocs.scoreDocs.length; i++) { docIdsToLoad[i] = topDocs.scoreDocs[i].doc; } - subSearchContext.docIdsToLoad(docIdsToLoad); - subSearchContext.fetchPhase().execute(subSearchContext); + subSearchContext.fetchPhase().execute(subSearchContext, docIdsToLoad); FetchSearchResult fetchResult = subSearchContext.fetchResult(); if (fetchProfiles != null) { fetchProfiles.add(fetchResult.profileResult()); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovAvgPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovAvgPipelineAggregationBuilder.java index c174dd5458685..c31acfcdd20f2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovAvgPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovAvgPipelineAggregationBuilder.java @@ -37,7 +37,7 @@ public class MovAvgPipelineAggregationBuilder extends AbstractPipelineAggregatio public static final String MOVING_AVG_AGG_DEPRECATION_MSG = "Moving Average aggregation usage is not supported. " + "Use the [moving_fn] aggregation instead."; - public static ParseField NAME_V7 = new ParseField("moving_avg").withAllDeprecated(MOVING_AVG_AGG_DEPRECATION_MSG) + public static final ParseField NAME_V7 = new ParseField("moving_avg").withAllDeprecated(MOVING_AVG_AGG_DEPRECATION_MSG) .forRestApiVersion(RestApiVersion.equalTo(RestApiVersion.V_7)); public static final ContextParser PARSER = (parser, name) -> { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregator.java index 4c2f9a825c1fa..9bd27a9931bd0 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregator.java @@ -92,9 +92,9 @@ public String toString() { } } - private String name; - private String[] bucketsPaths; - private Map metadata; + private final String name; + private final String[] bucketsPaths; + private final Map metadata; protected PipelineAggregator(String name, String[] bucketsPaths, Map metadata) { this.name = name; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffPipelineAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffPipelineAggregator.java index 500c107065520..7225d7652b3b8 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffPipelineAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffPipelineAggregator.java @@ -28,9 +28,9 @@ import static org.elasticsearch.search.aggregations.pipeline.BucketHelpers.resolveBucketValue; public class SerialDiffPipelineAggregator extends PipelineAggregator { - private DocValueFormat formatter; - private GapPolicy gapPolicy; - private int lag; + private final DocValueFormat formatter; + private final GapPolicy gapPolicy; + private final int lag; SerialDiffPipelineAggregator( String name, diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/CoreValuesSourceType.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/CoreValuesSourceType.java index 35b8230a48554..24cceabf2388d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/CoreValuesSourceType.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/CoreValuesSourceType.java @@ -338,14 +338,10 @@ public Function roundingPreparer(AggregationContext @Override public QueryVisitor getSubVisitor(BooleanClause.Occur occur, Query parent) { // Only extract bounds queries that must filter the results - switch (occur) { - case MUST: - case FILTER: - return this; - - default: - return QueryVisitor.EMPTY_VISITOR; - } + return switch (occur) { + case MUST, FILTER -> this; + default -> QueryVisitor.EMPTY_VISITOR; + }; }; @Override @@ -450,5 +446,5 @@ public String typeName() { } /** List containing all members of the enumeration. */ - public static List ALL_CORE = Arrays.asList(CoreValuesSourceType.values()); + public static final List ALL_CORE = Arrays.asList(CoreValuesSourceType.values()); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/FieldContext.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/FieldContext.java index 101e94b6717c4..30db7c984db7a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/FieldContext.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/FieldContext.java @@ -14,39 +14,24 @@ * Used by all field data based aggregators. This determine the context of the field data the aggregators are operating * in. It holds both the field names and the index field datas that are associated with them. */ -public class FieldContext { - - private final String field; - private final IndexFieldData indexFieldData; - private final MappedFieldType fieldType; +public record FieldContext(String field, IndexFieldData indexFieldData, MappedFieldType fieldType) { /** * Constructs a field data context for the given field and its index field data * - * @param field The name of the field - * @param indexFieldData The index field data of the field + * @param field The name of the field + * @param indexFieldData The index field data of the field */ - public FieldContext(String field, IndexFieldData indexFieldData, MappedFieldType fieldType) { - this.field = field; - this.indexFieldData = indexFieldData; - this.fieldType = fieldType; - } - - public String field() { - return field; - } + public FieldContext {} /** * @return The index field datas in this context */ + @Override public IndexFieldData indexFieldData() { return indexFieldData; } - public MappedFieldType fieldType() { - return fieldType; - } - public String getTypeName() { return fieldType.typeName(); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java index 4472083060d6e..7e0c235ee4fb3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java @@ -96,7 +96,6 @@ protected MultiValuesSourceAggregationBuilder(StreamInput in) throws IOException /** * Read from a stream. */ - @SuppressWarnings("unchecked") private void read(StreamInput in) throws IOException { fields = in.readMap(MultiValuesSourceFieldConfig::new); userValueTypeHint = in.readOptionalWriteable(ValueType::readFromStream); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/SamplingContext.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/SamplingContext.java index c1681a2070078..57ea138f63268 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/SamplingContext.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/SamplingContext.java @@ -21,7 +21,7 @@ * This provides information around the current sampling context for aggregations */ public record SamplingContext(double probability, int seed) { - public static SamplingContext NONE = new SamplingContext(1.0, 0); + public static final SamplingContext NONE = new SamplingContext(1.0, 0); public boolean isSampled() { return probability < 1.0; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java index bc83a5b5cd3b1..91bc2d12ac575 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java @@ -69,7 +69,7 @@ public ValuesSourceType getValuesSourceType() { return valuesSourceType; } - private static Set numericValueTypes = Set.of( + private static final Set numericValueTypes = Set.of( ValueType.DOUBLE, ValueType.DATE, ValueType.LONG, @@ -77,7 +77,7 @@ public ValuesSourceType getValuesSourceType() { ValueType.NUMERIC, ValueType.BOOLEAN ); - private static Set stringValueTypes = Set.of(ValueType.STRING, ValueType.IP); + private static final Set stringValueTypes = Set.of(ValueType.STRING, ValueType.IP); /** * This is a bit of a hack to mirror the old {@link ValueType} behavior, which would allow a rough compatibility between types. This diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceRegistry.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceRegistry.java index c33ad5266d4e2..01541b03dad1d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceRegistry.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceRegistry.java @@ -58,7 +58,7 @@ public int hashCode() { public static class Builder { private final AggregationUsageService.Builder usageServiceBuilder; - private Map, List>> aggregatorRegistry = new HashMap<>(); + private final Map, List>> aggregatorRegistry = new HashMap<>(); public Builder() { this.usageServiceBuilder = new AggregationUsageService.Builder(); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptDoubleValues.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptDoubleValues.java index 0e122162e5e87..32f84612fb887 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptDoubleValues.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptDoubleValues.java @@ -81,7 +81,7 @@ private static double toDoubleValue(Object o) { // that scripts return the same internal representation as regular fields, so boolean // values in scripts need to be converted to a number, and the value formatter will // make sure of using true/false in the key_as_string field - return ((Boolean) o).booleanValue() ? 1.0 : 0.0; + return (Boolean) o ? 1.0 : 0.0; } else { throw AggregationErrors.unsupportedScriptValue(o == null ? "null" : o.toString()); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptLongValues.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptLongValues.java index f702be71c49f3..66a8513e7c118 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptLongValues.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptLongValues.java @@ -83,7 +83,7 @@ private static long toLongValue(Object o) { // that scripts return the same internal representation as regular fields, so boolean // values in scripts need to be converted to a number, and the value formatter will // make sure of using true/false in the key_as_string field - return ((Boolean) o).booleanValue() ? 1L : 0L; + return (Boolean) o ? 1L : 0L; } else { throw AggregationErrors.unsupportedScriptValue(o == null ? "null" : o.toString()); } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java index 91ac7356a9670..5c98808c9c169 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -56,7 +56,7 @@ public FetchPhase(List fetchSubPhases) { this.fetchSubPhases[fetchSubPhases.size()] = new InnerHitsPhase(this); } - public void execute(SearchContext context) { + public void execute(SearchContext context, int[] docIdsToLoad) { if (LOGGER.isTraceEnabled()) { LOGGER.trace("{}", new SearchContextSourcePrinter(context)); } @@ -65,7 +65,7 @@ public void execute(SearchContext context) { throw new TaskCancelledException("cancelled"); } - if (context.docIdsToLoad() == null || context.docIdsToLoad().length == 0) { + if (docIdsToLoad == null || docIdsToLoad.length == 0) { // no individual hits to process, so we shortcut SearchHits hits = new SearchHits(new SearchHit[0], context.queryResult().getTotalHits(), context.queryResult().getMaxScore()); context.fetchResult().shardResult(hits, null); @@ -75,7 +75,7 @@ public void execute(SearchContext context) { Profiler profiler = context.getProfilers() == null ? Profiler.NOOP : Profilers.startProfilingFetchPhase(); SearchHits hits = null; try { - hits = buildSearchHits(context, profiler); + hits = buildSearchHits(context, docIdsToLoad, profiler); } finally { // Always finish profiling ProfileResult profileResult = profiler.finish(); @@ -96,7 +96,7 @@ public Source getSource(LeafReaderContext ctx, int doc) { } } - private SearchHits buildSearchHits(SearchContext context, Profiler profiler) { + private SearchHits buildSearchHits(SearchContext context, int[] docIdsToLoad, Profiler profiler) { FetchContext fetchContext = new FetchContext(context); SourceLoader sourceLoader = context.newSourceLoader(); @@ -166,7 +166,7 @@ protected SearchHit nextDoc(int doc) throws IOException { } }; - SearchHit[] hits = docsIterator.iterate(context.shardTarget(), context.searcher().getIndexReader(), context.docIdsToLoad()); + SearchHit[] hits = docsIterator.iterate(context.shardTarget(), context.searcher().getIndexReader(), docIdsToLoad); if (context.isCancelled()) { throw new TaskCancelledException("cancelled"); diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsPhase.java index 44e9a2a6e5193..feb0547a32536 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsPhase.java @@ -89,11 +89,10 @@ private void hitExecute(Map innerHi for (int j = 0; j < topDoc.topDocs.scoreDocs.length; j++) { docIdsToLoad[j] = topDoc.topDocs.scoreDocs[j].doc; } - innerHitsContext.docIdsToLoad(docIdsToLoad); innerHitsContext.setRootId(hit.getId()); innerHitsContext.setRootLookup(rootSource); - fetchPhase.execute(innerHitsContext); + fetchPhase.execute(innerHitsContext, docIdsToLoad); FetchSearchResult fetchResult = innerHitsContext.fetchResult(); SearchHit[] internalHits = fetchResult.fetchResult().hits().getHits(); for (int j = 0; j < internalHits.length; j++) { diff --git a/server/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java b/server/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java index 8bd91c9b9cfe7..c02a959231a61 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java @@ -359,16 +359,6 @@ public void seqNoAndPrimaryTerm(boolean seqNoAndPrimaryTerm) { in.seqNoAndPrimaryTerm(seqNoAndPrimaryTerm); } - @Override - public int[] docIdsToLoad() { - return in.docIdsToLoad(); - } - - @Override - public SearchContext docIdsToLoad(int[] docIdsToLoad) { - return in.docIdsToLoad(docIdsToLoad); - } - @Override public DfsSearchResult dfsResult() { return in.dfsResult(); diff --git a/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java b/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java index ef67d3d19e42f..512df4d15dcb0 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java @@ -12,6 +12,7 @@ import org.apache.lucene.search.TotalHits; import org.elasticsearch.action.search.SearchShardTask; import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.core.Assertions; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; @@ -44,6 +45,7 @@ import org.elasticsearch.search.rescore.RescoreContext; import org.elasticsearch.search.sort.SortAndFormats; import org.elasticsearch.search.suggest.SuggestionSearchContext; +import org.elasticsearch.transport.LeakTracker; import java.io.IOException; import java.util.HashMap; @@ -66,7 +68,14 @@ public abstract class SearchContext implements Releasable { public static final int DEFAULT_TRACK_TOTAL_HITS_UP_TO = 10000; protected final List releasables = new CopyOnWriteArrayList<>(); + private final AtomicBoolean closed = new AtomicBoolean(false); + + { + if (Assertions.ENABLED) { + releasables.add(LeakTracker.wrap(() -> { assert closed.get(); })); + } + } private InnerHitsContext innerHitsContext; private Query rewriteQuery; @@ -313,10 +322,6 @@ public Query rewrittenQuery() { /** controls whether the sequence number and primary term of the last modification to each hit should be returned */ public abstract void seqNoAndPrimaryTerm(boolean seqNoAndPrimaryTerm); - public abstract int[] docIdsToLoad(); - - public abstract SearchContext docIdsToLoad(int[] docIdsToLoad); - public abstract DfsSearchResult dfsResult(); /** diff --git a/server/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java b/server/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java index 8b4824e42cbf4..78a218bb3cd1b 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java @@ -41,8 +41,6 @@ public class SubSearchContext extends FilteredSearchContext { private final FetchSearchResult fetchSearchResult; private final QuerySearchResult querySearchResult; - private int[] docIdsToLoad; - private StoredFieldsContext storedFields; private ScriptFieldsContext scriptFields; private FetchSourceContext fetchSourceContext; @@ -55,8 +53,10 @@ public class SubSearchContext extends FilteredSearchContext { private boolean version; private boolean seqNoAndPrimaryTerm; + @SuppressWarnings("this-escape") public SubSearchContext(SearchContext context) { super(context); + context.addReleasable(this); this.fetchSearchResult = new FetchSearchResult(); this.querySearchResult = new QuerySearchResult(); } @@ -274,17 +274,6 @@ public void seqNoAndPrimaryTerm(boolean seqNoAndPrimaryTerm) { this.seqNoAndPrimaryTerm = seqNoAndPrimaryTerm; } - @Override - public int[] docIdsToLoad() { - return docIdsToLoad; - } - - @Override - public SearchContext docIdsToLoad(int[] docIdsToLoad) { - this.docIdsToLoad = docIdsToLoad; - return this; - } - @Override public CollapseContext collapse() { return null; diff --git a/server/src/main/java/org/elasticsearch/search/rank/RankSearchContext.java b/server/src/main/java/org/elasticsearch/search/rank/RankSearchContext.java index ed6fcd16fb5e2..86f7566683d21 100644 --- a/server/src/main/java/org/elasticsearch/search/rank/RankSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/rank/RankSearchContext.java @@ -59,6 +59,7 @@ public class RankSearchContext extends SearchContext { private final int windowSize; private final QuerySearchResult querySearchResult; + @SuppressWarnings("this-escape") public RankSearchContext(SearchContext parent, Query rankQuery, int windowSize) { this.parent = parent; this.rankQuery = parent.buildFilteredQuery(rankQuery); @@ -481,16 +482,6 @@ public void seqNoAndPrimaryTerm(boolean seqNoAndPrimaryTerm) { throw new UnsupportedOperationException(); } - @Override - public int[] docIdsToLoad() { - throw new UnsupportedOperationException(); - } - - @Override - public SearchContext docIdsToLoad(int[] docIdsToLoad) { - throw new UnsupportedOperationException(); - } - @Override public DfsSearchResult dfsResult() { throw new UnsupportedOperationException(); diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index a2a4c1bd444a5..61923dcff2d78 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -715,7 +715,9 @@ static DataStream updateDataStream(DataStream dataStream, Metadata.Builder metad dataStream.isSystem(), dataStream.isAllowCustomRouting(), dataStream.getIndexMode(), - dataStream.getLifecycle() + dataStream.getLifecycle(), + dataStream.isFailureStore(), + dataStream.getFailureIndices() ); } diff --git a/server/src/main/java/org/elasticsearch/telemetry/metric/DoubleWithAttributes.java b/server/src/main/java/org/elasticsearch/telemetry/metric/DoubleWithAttributes.java index e342b6128998d..ac0a6eec8a6fb 100644 --- a/server/src/main/java/org/elasticsearch/telemetry/metric/DoubleWithAttributes.java +++ b/server/src/main/java/org/elasticsearch/telemetry/metric/DoubleWithAttributes.java @@ -12,4 +12,7 @@ public record DoubleWithAttributes(double value, Map attributes) { + public DoubleWithAttributes(double value) { + this(value, Map.of()); + } } diff --git a/server/src/main/java/org/elasticsearch/telemetry/metric/LongGaugeMetric.java b/server/src/main/java/org/elasticsearch/telemetry/metric/LongGaugeMetric.java new file mode 100644 index 0000000000000..b5287fb18d346 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/telemetry/metric/LongGaugeMetric.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.telemetry.metric; + +import java.util.concurrent.atomic.AtomicLong; + +/** + * This wrapper allow us to record metric with APM (via {@link LongGauge}) while also access its current state via {@link AtomicLong} + */ +public record LongGaugeMetric(AtomicLong value, LongGauge gauge) { + + public static LongGaugeMetric create(MeterRegistry meterRegistry, String name, String description, String unit) { + final AtomicLong value = new AtomicLong(); + return new LongGaugeMetric( + value, + meterRegistry.registerLongGauge(name, description, unit, () -> new LongWithAttributes(value.get())) + ); + } + + public void set(long l) { + value.set(l); + } + + public long get() { + return value.get(); + } +} diff --git a/server/src/main/java/org/elasticsearch/telemetry/metric/LongWithAttributes.java b/server/src/main/java/org/elasticsearch/telemetry/metric/LongWithAttributes.java index eef880431fb83..8ef4dd1f4476d 100644 --- a/server/src/main/java/org/elasticsearch/telemetry/metric/LongWithAttributes.java +++ b/server/src/main/java/org/elasticsearch/telemetry/metric/LongWithAttributes.java @@ -12,4 +12,7 @@ public record LongWithAttributes(long value, Map attributes) { + public LongWithAttributes(long value) { + this(value, Map.of()); + } } diff --git a/server/src/main/java/org/elasticsearch/transport/LeakTracker.java b/server/src/main/java/org/elasticsearch/transport/LeakTracker.java index 4eefd4cd2080a..ea12953e7df12 100644 --- a/server/src/main/java/org/elasticsearch/transport/LeakTracker.java +++ b/server/src/main/java/org/elasticsearch/transport/LeakTracker.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.Assertions; import org.elasticsearch.core.RefCounted; +import org.elasticsearch.core.Releasable; import java.lang.ref.ReferenceQueue; import java.lang.ref.WeakReference; @@ -71,6 +72,20 @@ public void reportLeak() { } } + public static Releasable wrap(Releasable releasable) { + if (Assertions.ENABLED == false) { + return releasable; + } + var leak = INSTANCE.track(releasable); + return () -> { + try { + releasable.close(); + } finally { + leak.close(releasable); + } + }; + } + public static RefCounted wrap(RefCounted refCounted) { if (Assertions.ENABLED == false) { return refCounted; diff --git a/server/src/main/java/org/elasticsearch/transport/TransportInfo.java b/server/src/main/java/org/elasticsearch/transport/TransportInfo.java index e12dc599d5bf0..91520a6223a83 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportInfo.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportInfo.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.node.ReportingService; import org.elasticsearch.xcontent.XContentBuilder; @@ -30,6 +31,7 @@ public class TransportInfo implements ReportingService.Info { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(TransportInfo.class); /** Whether to add hostname to publish host field when serializing. */ + @UpdateForV9 // Remove es.transport.cname_in_publish_address property from TransportInfo in 9.0.0 private static final boolean CNAME_IN_PUBLISH_ADDRESS = parseBoolean( System.getProperty("es.transport.cname_in_publish_address"), false diff --git a/server/src/main/java/org/elasticsearch/transport/TransportService.java b/server/src/main/java/org/elasticsearch/transport/TransportService.java index 8ef6ff3c9d8ef..7550435ac0bb8 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportService.java @@ -38,6 +38,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.node.ReportingService; import org.elasticsearch.tasks.Task; @@ -1293,12 +1294,10 @@ public void onConnectionClosed(Transport.Connection connection) { return; } - // Callback that an exception happened, but on a different thread since we don't - // want handlers to worry about stack overflows. - // Execute on the current thread in the special case of a node shut down to notify the listener even when the threadpool has - // already been shut down. - final String executor = lifecycle.stoppedOrClosed() ? ThreadPool.Names.SAME : ThreadPool.Names.GENERIC; - threadPool.executor(executor).execute(new AbstractRunnable() { + // Callback that an exception happened, but on a different thread since we don't want handlers to worry about stack overflows. + final var executor = threadPool.generic(); + assert executor.isShutdown() == false : "connections should all be closed before threadpool shuts down"; + executor.execute(new AbstractRunnable() { @Override public void doRun() { for (Transport.ResponseContext holderToNotify : pruned) { @@ -1673,11 +1672,11 @@ Releasable withRef() { static { // Ensure that this property, introduced and immediately deprecated in 7.11, is not used in 8.x + @UpdateForV9 // we can remove this whole block in v9 final String PERMIT_HANDSHAKES_FROM_INCOMPATIBLE_BUILDS_KEY = "es.unsafely_permit_handshake_from_incompatible_builds"; if (System.getProperty(PERMIT_HANDSHAKES_FROM_INCOMPATIBLE_BUILDS_KEY) != null) { throw new IllegalArgumentException("system property [" + PERMIT_HANDSHAKES_FROM_INCOMPATIBLE_BUILDS_KEY + "] must not be set"); } - assert Version.CURRENT.major == Version.V_7_0_0.major + 1; // we can remove this whole block in v9 } private record UnregisterChildTransportResponseHandler( diff --git a/server/src/main/java/org/elasticsearch/transport/TransportStats.java b/server/src/main/java/org/elasticsearch/transport/TransportStats.java index 96c5a89256008..13cce6328b84e 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportStats.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportStats.java @@ -9,7 +9,6 @@ package org.elasticsearch.transport; import org.elasticsearch.TransportVersions; -import org.elasticsearch.Version; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -18,6 +17,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -165,11 +165,14 @@ public Map getTransportActionStats() { return transportActionStats; } + @UpdateForV9 // Review and simplify the if-else blocks containing this symbol once v9 is released + private static final boolean IMPOSSIBLE_IN_V9 = true; + private boolean assertHistogramsConsistent() { assert inboundHandlingTimeBucketFrequencies.length == outboundHandlingTimeBucketFrequencies.length; if (inboundHandlingTimeBucketFrequencies.length == 0) { // Stats came from before v8.1 - assert Version.CURRENT.major == Version.V_8_0_0.major; + assert IMPOSSIBLE_IN_V9; } else { assert inboundHandlingTimeBucketFrequencies.length == HandlingTimeTracker.BUCKET_COUNT; } @@ -177,6 +180,7 @@ private boolean assertHistogramsConsistent() { } @Override + @UpdateForV9 // review the "if" blocks checking for non-empty once we have public Iterator toXContentChunked(ToXContent.Params outerParams) { return Iterators.concat(Iterators.single((builder, params) -> { builder.startObject(Fields.TRANSPORT); @@ -191,13 +195,13 @@ public Iterator toXContentChunked(ToXContent.Params outerP histogramToXContent(builder, outboundHandlingTimeBucketFrequencies, Fields.OUTBOUND_HANDLING_TIME_HISTOGRAM); } else { // Stats came from before v8.1 - assert Version.CURRENT.major == Version.V_7_0_0.major + 1; + assert IMPOSSIBLE_IN_V9; } if (transportActionStats.isEmpty() == false) { builder.startObject(Fields.ACTIONS); } else { // Stats came from before v8.8 - assert Version.CURRENT.major == Version.V_7_0_0.major + 1; + assert IMPOSSIBLE_IN_V9; } return builder; }), diff --git a/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification b/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification index eda1e78536a38..a42c1f7192d49 100644 --- a/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification +++ b/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification @@ -8,6 +8,7 @@ org.elasticsearch.features.FeatureInfrastructureFeatures org.elasticsearch.health.HealthFeatures +org.elasticsearch.cluster.service.TransportFeatures org.elasticsearch.cluster.metadata.MetadataFeatures org.elasticsearch.rest.RestFeatures org.elasticsearch.indices.IndicesFeatures diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java index cbe4acd137b2a..e702446406238 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java @@ -37,6 +37,7 @@ import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.tasks.Task; +import org.elasticsearch.telemetry.TelemetryProvider; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.MockUtils; import org.elasticsearch.test.gateway.TestGatewayAllocator; @@ -114,7 +115,14 @@ public DesiredBalance compute( return super.compute(previousDesiredBalance, desiredBalanceInput, pendingDesiredBalanceMoves, isFresh); } }; - var allocator = new DesiredBalanceShardsAllocator(delegate, threadPool, clusterService, computer, (state, action) -> state); + var allocator = new DesiredBalanceShardsAllocator( + delegate, + threadPool, + clusterService, + computer, + (state, action) -> state, + TelemetryProvider.NOOP + ); var allocationService = new MockAllocationService( randomAllocationDeciders(settings, clusterSettings), new TestGatewayAllocator(), diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusActionTests.java index 2348602487518..e3dcc7cca5bbd 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusActionTests.java @@ -8,11 +8,11 @@ package org.elasticsearch.action.admin.cluster.migration; -import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.indices.SystemIndexDescriptorUtils; @@ -84,9 +84,7 @@ private static ClusterState getClusterState() { .numberOfReplicas(0) .build(); - // Once we start testing 9.x, we should update this test to use a 7.x "version created" - assert Version.CURRENT.major < 9; - + @UpdateForV9 // Once we start testing 9.x, we should update this test to use a 7.x "version created" IndexMetadata indexMetadata2 = IndexMetadata.builder(".test-index-2") .settings(Settings.builder().put("index.version.created", TEST_OLD_VERSION).build()) .numberOfShards(1) diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponseTests.java index 6fde4bed97a17..433563b99ef64 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponseTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Tuple; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.test.AbstractWireSerializingTestCase; import java.util.ArrayList; @@ -24,6 +25,7 @@ import java.util.Map; import java.util.function.Predicate; +@UpdateForV9 // no need to round-trip these objects over the wire any more, we only need a checkEqualsAndHashCode test public class GetAliasesResponseTests extends AbstractWireSerializingTestCase { @Override @@ -33,9 +35,8 @@ protected GetAliasesResponse createTestInstance() { /** * NB prior to 8.12 get-aliases was a TransportMasterNodeReadAction so for BwC we must remain able to write these responses so that - * older nodes can read them until we no longer need to support {@link org.elasticsearch.TransportVersions#CLUSTER_FEATURES_ADDED} and - * earlier. The reader implementation below is the production implementation from earlier versions, but moved here because it is unused - * in production now. + * older nodes can read them until we no longer need to support calling this action remotely. The reader implementation below is the + * production implementation from earlier versions, but moved here because it is unused in production now. */ @Override protected Writeable.Reader instanceReader() { diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexActionTests.java index c5639d5989d01..966ac50dfab37 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexActionTests.java @@ -67,7 +67,7 @@ public class TransportCreateIndexActionTests extends ESTestCase { ) .build() ) - .compatibilityVersions( + .nodeIdsToCompatibilityVersions( Map.of( "node-1", new CompatibilityVersions( diff --git a/server/src/test/java/org/elasticsearch/action/bulk/SimulateBulkRequestTests.java b/server/src/test/java/org/elasticsearch/action/bulk/SimulateBulkRequestTests.java new file mode 100644 index 0000000000000..7fe036f97596e --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/bulk/SimulateBulkRequestTests.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.bulk; + +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +public class SimulateBulkRequestTests extends ESTestCase { + + public void testSerialization() throws Exception { + testSerialization(getTestPipelineSubstitutions()); + testSerialization(null); + testSerialization(Map.of()); + } + + private void testSerialization(Map> pipelineSubstitutions) throws IOException { + SimulateBulkRequest simulateBulkRequest = new SimulateBulkRequest(pipelineSubstitutions); + /* + * Note: SimulateBulkRequest does not implement equals or hashCode, so we can't test serialization in the usual way for a + * Writable + */ + SimulateBulkRequest copy = copyWriteable(simulateBulkRequest, null, SimulateBulkRequest::new); + assertThat(copy.getPipelineSubstitutions(), equalTo(simulateBulkRequest.getPipelineSubstitutions())); + } + + private Map> getTestPipelineSubstitutions() { + return new HashMap<>() { + { + put("pipeline1", new HashMap<>() { + { + put("processors", List.of(new HashMap<>() { + { + put("processor2", new HashMap<>()); + } + }, new HashMap<>() { + { + put("processor3", new HashMap<>()); + } + })); + } + }); + put("pipeline2", new HashMap<>() { + { + put("processors", List.of(new HashMap<>() { + { + put("processor3", new HashMap<>()); + } + })); + } + }); + } + }; + } +} diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionTests.java new file mode 100644 index 0000000000000..647eafb5f3cdd --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionTests.java @@ -0,0 +1,206 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.bulk; + +import org.elasticsearch.ResourceAlreadyExistsException; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.ingest.SimulateIndexResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.IndexVersions; +import org.elasticsearch.index.IndexingPressure; +import org.elasticsearch.indices.EmptySystemIndices; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.test.index.IndexVersionUtils; +import org.elasticsearch.test.transport.CapturingTransport; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; + +import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.mock; + +public class TransportSimulateBulkActionTests extends ESTestCase { + + /** Services needed by bulk action */ + private TransportService transportService; + private ClusterService clusterService; + private TestThreadPool threadPool; + + private TestTransportSimulateBulkAction bulkAction; + + class TestTransportSimulateBulkAction extends TransportSimulateBulkAction { + + volatile boolean failIndexCreation = false; + boolean indexCreated = false; // set when the "real" index is created + Runnable beforeIndexCreation = null; + + TestTransportSimulateBulkAction() { + super( + TransportSimulateBulkActionTests.this.threadPool, + transportService, + clusterService, + null, + null, + new ActionFilters(Collections.emptySet()), + new TransportBulkActionTookTests.Resolver(), + new IndexingPressure(Settings.EMPTY), + EmptySystemIndices.INSTANCE + ); + } + + @Override + void createIndex(String index, TimeValue timeout, ActionListener listener) { + indexCreated = true; + if (beforeIndexCreation != null) { + beforeIndexCreation.run(); + } + if (failIndexCreation) { + listener.onFailure(new ResourceAlreadyExistsException("index already exists")); + } else { + listener.onResponse(null); + } + } + } + + @Before + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool(getClass().getName()); + DiscoveryNode discoveryNode = DiscoveryNodeUtils.builder("node") + .version( + VersionUtils.randomCompatibleVersion(random(), Version.CURRENT), + IndexVersions.MINIMUM_COMPATIBLE, + IndexVersionUtils.randomCompatibleVersion(random()) + ) + .build(); + clusterService = createClusterService(threadPool, discoveryNode); + CapturingTransport capturingTransport = new CapturingTransport(); + transportService = capturingTransport.createTransportService( + clusterService.getSettings(), + threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + boundAddress -> clusterService.localNode(), + null, + Collections.emptySet() + ); + transportService.start(); + transportService.acceptIncomingRequests(); + bulkAction = new TestTransportSimulateBulkAction(); + } + + @After + public void tearDown() throws Exception { + ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); + threadPool = null; + clusterService.close(); + super.tearDown(); + } + + public void testIndexData() { + Task task = mock(Task.class); // unused + BulkRequest bulkRequest = new SimulateBulkRequest((Map>) null); + int bulkItemCount = randomIntBetween(0, 200); + for (int i = 0; i < bulkItemCount; i++) { + Map source = Map.of(randomAlphaOfLength(10), randomAlphaOfLength(5)); + IndexRequest indexRequest = new IndexRequest(randomAlphaOfLength(10)).id(randomAlphaOfLength(10)).source(source); + for (int j = 0; j < randomIntBetween(0, 10); j++) { + indexRequest.addPipeline(randomAlphaOfLength(12)); + } + bulkRequest.add(); + } + AtomicBoolean onResponseCalled = new AtomicBoolean(false); + ActionListener listener = new ActionListener<>() { + @Override + public void onResponse(BulkResponse response) { + onResponseCalled.set(true); + BulkItemResponse[] responseItems = response.getItems(); + assertThat(responseItems.length, equalTo(bulkRequest.requests().size())); + for (int i = 0; i < responseItems.length; i++) { + BulkItemResponse responseItem = responseItems[i]; + IndexRequest indexRequest = (IndexRequest) bulkRequest.requests().get(i); + assertNull(responseItem.getFailure()); + assertThat(responseItem.getResponse(), instanceOf(SimulateIndexResponse.class)); + SimulateIndexResponse simulateIndexResponse = responseItem.getResponse(); + assertThat(simulateIndexResponse.getIndex(), equalTo(indexRequest.index())); + /* + * SimulateIndexResponse doesn't have an equals() method, and most of its state is private. So we check that + * its toXContent method produces the expected output. + */ + String output = Strings.toString(simulateIndexResponse); + try { + assertEquals( + XContentHelper.stripWhitespace( + Strings.format( + """ + { + "_index": "%s", + "_source": %s, + "executed_pipelines": [%s] + }""", + indexRequest.index(), + indexRequest.source(), + indexRequest.getExecutedPipelines() + .stream() + .map(pipeline -> "\"" + pipeline + "\"") + .collect(Collectors.joining(",")) + ) + ), + output + ); + } catch (IOException e) { + fail(e); + } + } + } + + @Override + public void onFailure(Exception e) { + fail(e, "Unexpected error"); + } + }; + Set autoCreateIndices = Set.of(); // unused + Map indicesThatCannotBeCreated = Map.of(); // unused + long startTime = 0; + bulkAction.createMissingIndicesAndIndexData( + task, + bulkRequest, + randomAlphaOfLength(10), + listener, + autoCreateIndices, + indicesThatCannotBeCreated, + startTime + ); + assertThat(onResponseCalled.get(), equalTo(true)); + } +} diff --git a/server/src/test/java/org/elasticsearch/action/ingest/SimulateIndexResponseTests.java b/server/src/test/java/org/elasticsearch/action/ingest/SimulateIndexResponseTests.java new file mode 100644 index 0000000000000..7ce3b411e978f --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/ingest/SimulateIndexResponseTests.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.ingest; + +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.RandomObjects; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.equalTo; + +public class SimulateIndexResponseTests extends ESTestCase { + + public void testToXContent() throws IOException { + String id = randomAlphaOfLength(10); + String index = randomAlphaOfLength(5); + long version = randomLongBetween(0, 500); + final List pipelines = new ArrayList<>(); + for (int i = 0; i < randomIntBetween(0, 20); i++) { + pipelines.add(randomAlphaOfLength(20)); + } + String source = """ + {"doc": {"key1": "val1", "key2": "val2"}}"""; + BytesReference sourceBytes = BytesReference.fromByteBuffer(ByteBuffer.wrap(source.getBytes(StandardCharsets.UTF_8))); + SimulateIndexResponse indexResponse = new SimulateIndexResponse(id, index, version, sourceBytes, XContentType.JSON, pipelines); + String output = Strings.toString(indexResponse); + assertEquals( + XContentHelper.stripWhitespace( + Strings.format( + """ + { + "_id": "%s", + "_index": "%s", + "_version": %d, + "_source": %s, + "executed_pipelines": [%s] + }""", + id, + index, + version, + source, + pipelines.stream().map(pipeline -> "\"" + pipeline + "\"").collect(Collectors.joining(",")) + ) + ), + output + ); + } + + public void testSerialization() throws IOException { + // Note: SimulateIndexRequest does not implement equals or hashCode, so we can't test serialization in the usual way for a Writable + SimulateIndexResponse response = randomIndexResponse(); + IndexResponse copy = copyWriteable(response, null, SimulateIndexResponse::new); + assertThat(Strings.toString(response), equalTo(Strings.toString(copy))); + } + + /** + * Returns a tuple of {@link IndexResponse}s. + *

+ * The left element is the actual {@link IndexResponse} to serialize while the right element is the + * expected {@link IndexResponse} after parsing. + */ + private static SimulateIndexResponse randomIndexResponse() { + String id = randomAlphaOfLength(10); + String index = randomAlphaOfLength(5); + long version = randomLongBetween(0, 500); + final List pipelines = new ArrayList<>(); + for (int i = 0; i < randomIntBetween(0, 20); i++) { + pipelines.add(randomAlphaOfLength(20)); + } + XContentType xContentType = randomFrom(XContentType.values()); + BytesReference sourceBytes = RandomObjects.randomSource(random(), xContentType); + return new SimulateIndexResponse(id, index, version, sourceBytes, xContentType, pipelines); + } +} diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java index 424ccdafb87e4..9172b541a8236 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java @@ -293,12 +293,7 @@ public void testMerge() { reducedQueryPhase.suggest(), profile ); - InternalSearchResponse mergedResponse = SearchPhaseController.merge( - false, - reducedQueryPhase, - fetchResults.asList(), - fetchResults::get - ); + InternalSearchResponse mergedResponse = SearchPhaseController.merge(false, reducedQueryPhase, fetchResults); if (trackTotalHits == SearchContext.TRACK_TOTAL_HITS_DISABLED) { assertNull(mergedResponse.hits.getTotalHits()); } else { @@ -412,12 +407,7 @@ protected boolean lessThan(RankDoc a, RankDoc b) { reducedQueryPhase.suggest(), false ); - InternalSearchResponse mergedResponse = SearchPhaseController.merge( - false, - reducedQueryPhase, - fetchResults.asList(), - fetchResults::get - ); + InternalSearchResponse mergedResponse = SearchPhaseController.merge(false, reducedQueryPhase, fetchResults); if (trackTotalHits == SearchContext.TRACK_TOTAL_HITS_DISABLED) { assertNull(mergedResponse.hits.getTotalHits()); } else { diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java b/server/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java index 87401562edd81..ff0f166eb8339 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java @@ -45,6 +45,7 @@ import org.elasticsearch.indices.EmptySystemIndices; import org.elasticsearch.plugins.ClusterPlugin; import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.telemetry.TelemetryProvider; import org.elasticsearch.test.gateway.TestGatewayAllocator; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -155,7 +156,7 @@ public void testRegisterAllocationDeciderDuplicate() { public Collection createAllocationDeciders(Settings settings, ClusterSettings clusterSettings) { return Collections.singletonList(new EnableAllocationDecider(clusterSettings)); } - }), clusterInfoService, null, threadPool, EmptySystemIndices.INSTANCE, WriteLoadForecaster.DEFAULT) + }), clusterInfoService, null, threadPool, EmptySystemIndices.INSTANCE, WriteLoadForecaster.DEFAULT, TelemetryProvider.NOOP) ); assertEquals(e.getMessage(), "Cannot specify allocation decider [" + EnableAllocationDecider.class.getName() + "] twice"); } @@ -166,7 +167,7 @@ public void testRegisterAllocationDecider() { public Collection createAllocationDeciders(Settings settings, ClusterSettings clusterSettings) { return Collections.singletonList(new FakeAllocationDecider()); } - }), clusterInfoService, null, threadPool, EmptySystemIndices.INSTANCE, WriteLoadForecaster.DEFAULT); + }), clusterInfoService, null, threadPool, EmptySystemIndices.INSTANCE, WriteLoadForecaster.DEFAULT, TelemetryProvider.NOOP); assertTrue(module.deciderList.stream().anyMatch(d -> d.getClass().equals(FakeAllocationDecider.class))); } @@ -176,7 +177,7 @@ private ClusterModule newClusterModuleWithShardsAllocator(Settings settings, Str public Map> getShardsAllocators(Settings settings, ClusterSettings clusterSettings) { return Collections.singletonMap(name, supplier); } - }), clusterInfoService, null, threadPool, EmptySystemIndices.INSTANCE, WriteLoadForecaster.DEFAULT); + }), clusterInfoService, null, threadPool, EmptySystemIndices.INSTANCE, WriteLoadForecaster.DEFAULT, TelemetryProvider.NOOP); } public void testRegisterShardsAllocator() { @@ -208,7 +209,8 @@ public void testUnknownShardsAllocator() { null, threadPool, EmptySystemIndices.INSTANCE, - WriteLoadForecaster.DEFAULT + WriteLoadForecaster.DEFAULT, + TelemetryProvider.NOOP ) ); assertEquals("Unknown ShardsAllocator [dne]", e.getMessage()); @@ -272,7 +274,8 @@ public void testRejectsReservedExistingShardsAllocatorName() { null, threadPool, EmptySystemIndices.INSTANCE, - WriteLoadForecaster.DEFAULT + WriteLoadForecaster.DEFAULT, + TelemetryProvider.NOOP ); expectThrows(IllegalArgumentException.class, () -> clusterModule.setExistingShardsAllocators(new TestGatewayAllocator())); } @@ -286,7 +289,8 @@ public void testRejectsDuplicateExistingShardsAllocatorName() { null, threadPool, EmptySystemIndices.INSTANCE, - WriteLoadForecaster.DEFAULT + WriteLoadForecaster.DEFAULT, + TelemetryProvider.NOOP ); expectThrows(IllegalArgumentException.class, () -> clusterModule.setExistingShardsAllocators(new TestGatewayAllocator())); } diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java index 56c82ae12dc45..e0538603573f7 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java @@ -1248,7 +1248,7 @@ public void testHasMixedSystemIndexVersions() throws IOException { // equal mappings versions { var builder = ClusterState.builder(buildClusterState()); - builder.compatibilityVersions( + builder.nodeIdsToCompatibilityVersions( Map.of( "node1", new CompatibilityVersions( @@ -1268,7 +1268,7 @@ public void testHasMixedSystemIndexVersions() throws IOException { // unequal mappings versions { var builder = ClusterState.builder(buildClusterState()); - builder.compatibilityVersions( + builder.nodeIdsToCompatibilityVersions( Map.of( "node1", new CompatibilityVersions( @@ -1288,7 +1288,7 @@ public void testHasMixedSystemIndexVersions() throws IOException { // one node has a mappings version that the other is missing { var builder = ClusterState.builder(buildClusterState()); - builder.compatibilityVersions( + builder.nodeIdsToCompatibilityVersions( Map.of( "node1", new CompatibilityVersions( diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTemplateTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTemplateTests.java index 6033da5d1a68e..f189a07f73039 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTemplateTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTemplateTests.java @@ -9,7 +9,6 @@ import org.elasticsearch.cluster.metadata.ComposableIndexTemplate.DataStreamTemplate; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.index.IndexMode; import org.elasticsearch.test.AbstractXContentSerializingTestCase; import org.elasticsearch.xcontent.XContentParser; @@ -38,8 +37,7 @@ protected DataStreamTemplate mutateInstance(DataStreamTemplate instance) { } public static DataStreamTemplate randomInstance() { - IndexMode indexMode = randomBoolean() ? randomFrom(IndexMode.values()) : null; - return new ComposableIndexTemplate.DataStreamTemplate(randomBoolean(), randomBoolean()); + return new ComposableIndexTemplate.DataStreamTemplate(randomBoolean(), randomBoolean(), randomBoolean()); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java index 1b1e512113712..1bda67030eca1 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java @@ -91,7 +91,9 @@ protected DataStream mutateInstance(DataStream instance) { var allowsCustomRouting = instance.isAllowCustomRouting(); var indexMode = instance.getIndexMode(); var lifecycle = instance.getLifecycle(); - switch (between(0, 9)) { + var failureStore = instance.isFailureStore(); + var failureIndices = instance.getFailureIndices(); + switch (between(0, 10)) { case 0 -> name = randomAlphaOfLength(10); case 1 -> indices = randomValueOtherThan(List.of(), DataStreamTestHelper::randomIndexInstances); case 2 -> generation = instance.getGeneration() + randomIntBetween(1, 10); @@ -120,6 +122,14 @@ protected DataStream mutateInstance(DataStream instance) { case 9 -> lifecycle = randomBoolean() && lifecycle != null ? null : DataStreamLifecycle.newBuilder().dataRetention(randomMillisUpToYear9999()).build(); + case 10 -> { + failureIndices = randomValueOtherThan(List.of(), DataStreamTestHelper::randomIndexInstances); + if (failureIndices.isEmpty()) { + failureStore = false; + } else { + failureStore = true; + } + } } return new DataStream( @@ -132,7 +142,9 @@ protected DataStream mutateInstance(DataStream instance) { isSystem, allowsCustomRouting, indexMode, - lifecycle + lifecycle, + failureStore, + failureIndices ); } @@ -187,7 +199,9 @@ public void testRolloverUpgradeToTsdbDataStream() { ds.isSystem(), ds.isAllowCustomRouting(), indexMode, - ds.getLifecycle() + ds.getLifecycle(), + ds.isFailureStore(), + ds.getFailureIndices() ); var newCoordinates = ds.nextWriteIndexAndGeneration(Metadata.EMPTY_METADATA); @@ -212,7 +226,9 @@ public void testRolloverDowngradeToRegularDataStream() { ds.isSystem(), ds.isAllowCustomRouting(), IndexMode.TIME_SERIES, - ds.getLifecycle() + ds.getLifecycle(), + ds.isFailureStore(), + ds.getFailureIndices() ); var newCoordinates = ds.nextWriteIndexAndGeneration(Metadata.EMPTY_METADATA); @@ -572,7 +588,9 @@ public void testSnapshot() { preSnapshotDataStream.isSystem(), preSnapshotDataStream.isAllowCustomRouting(), preSnapshotDataStream.getIndexMode(), - preSnapshotDataStream.getLifecycle() + preSnapshotDataStream.getLifecycle(), + preSnapshotDataStream.isFailureStore(), + preSnapshotDataStream.getFailureIndices() ); var reconciledDataStream = postSnapshotDataStream.snapshot( @@ -614,7 +632,9 @@ public void testSnapshotWithAllBackingIndicesRemoved() { preSnapshotDataStream.isSystem(), preSnapshotDataStream.isAllowCustomRouting(), preSnapshotDataStream.getIndexMode(), - preSnapshotDataStream.getLifecycle() + preSnapshotDataStream.getLifecycle(), + preSnapshotDataStream.isFailureStore(), + preSnapshotDataStream.getFailureIndices() ); assertNull(postSnapshotDataStream.snapshot(preSnapshotDataStream.getIndices().stream().map(Index::getName).toList())); @@ -1613,6 +1633,11 @@ public void testXContentSerializationWithRollover() throws IOException { if (randomBoolean()) { metadata = Map.of("key", "value"); } + boolean failureStore = randomBoolean(); + List failureIndices = List.of(); + if (failureStore) { + failureIndices = randomIndexInstances(); + } DataStreamLifecycle lifecycle = DataStreamLifecycle.newBuilder().dataRetention(randomMillisUpToYear9999()).build(); DataStream dataStream = new DataStream( @@ -1626,7 +1651,9 @@ public void testXContentSerializationWithRollover() throws IOException { System::currentTimeMillis, randomBoolean(), randomBoolean() ? IndexMode.STANDARD : null, // IndexMode.TIME_SERIES triggers validation that many unit tests doesn't pass - lifecycle + lifecycle, + failureStore, + failureIndices ); try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamServiceTests.java index e7ec430e6bb20..e11f8c0cbe108 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamServiceTests.java @@ -214,6 +214,38 @@ private static AliasMetadata randomAlias(String prefix) { return builder.build(); } + public void testCreateDataStreamWithFailureStore() throws Exception { + final MetadataCreateIndexService metadataCreateIndexService = getMetadataCreateIndexService(); + final String dataStreamName = "my-data-stream"; + ComposableIndexTemplate template = new ComposableIndexTemplate.Builder().indexPatterns(List.of(dataStreamName + "*")) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false, true)) + .build(); + ClusterState cs = ClusterState.builder(new ClusterName("_name")) + .metadata(Metadata.builder().put("template", template).build()) + .build(); + CreateDataStreamClusterStateUpdateRequest req = new CreateDataStreamClusterStateUpdateRequest(dataStreamName); + ClusterState newState = MetadataCreateDataStreamService.createDataStream( + metadataCreateIndexService, + cs, + randomBoolean(), + req, + ActionListener.noop() + ); + var backingIndexName = DataStream.getDefaultBackingIndexName(dataStreamName, 1, req.getStartTime()); + var failureStoreIndexName = DataStream.getDefaultFailureStoreName(dataStreamName, 1, req.getStartTime()); + assertThat(newState.metadata().dataStreams().size(), equalTo(1)); + assertThat(newState.metadata().dataStreams().get(dataStreamName).getName(), equalTo(dataStreamName)); + assertThat(newState.metadata().dataStreams().get(dataStreamName).isSystem(), is(false)); + assertThat(newState.metadata().dataStreams().get(dataStreamName).isHidden(), is(false)); + assertThat(newState.metadata().dataStreams().get(dataStreamName).isReplicated(), is(false)); + assertThat(newState.metadata().index(backingIndexName), notNullValue()); + assertThat(newState.metadata().index(backingIndexName).getSettings().get("index.hidden"), equalTo("true")); + assertThat(newState.metadata().index(backingIndexName).isSystem(), is(false)); + assertThat(newState.metadata().index(failureStoreIndexName), notNullValue()); + assertThat(newState.metadata().index(failureStoreIndexName).getSettings().get("index.hidden"), equalTo("true")); + assertThat(newState.metadata().index(failureStoreIndexName).isSystem(), is(false)); + } + public void testCreateSystemDataStream() throws Exception { final MetadataCreateIndexService metadataCreateIndexService = getMetadataCreateIndexService(); final String dataStreamName = ".system-data-stream"; diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsServiceTests.java index 172b3a6902f88..ba3b1a7387110 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsServiceTests.java @@ -354,7 +354,9 @@ public void testRemoveBrokenBackingIndexReference() { original.isSystem(), original.isAllowCustomRouting(), original.getIndexMode(), - original.getLifecycle() + original.getLifecycle(), + original.isFailureStore(), + original.getFailureIndices() ); var brokenState = ClusterState.builder(state).metadata(Metadata.builder(state.getMetadata()).put(broken).build()).build(); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterAllocationSimulationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterAllocationSimulationTests.java index a057b638c04e2..13ea91a2bc99b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterAllocationSimulationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterAllocationSimulationTests.java @@ -47,6 +47,7 @@ import org.elasticsearch.index.IndexVersion; import org.elasticsearch.snapshots.SnapshotShardSizeInfo; import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.telemetry.TelemetryProvider; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.gateway.TestGatewayAllocator; import org.elasticsearch.threadpool.ThreadPool; @@ -487,7 +488,8 @@ private Map.Entry createNewAllocationSer threadPool, clusterService, (clusterState, routingAllocationAction) -> strategyRef.get() - .executeWithRoutingAllocation(clusterState, "reconcile-desired-balance", routingAllocationAction) + .executeWithRoutingAllocation(clusterState, "reconcile-desired-balance", routingAllocationAction), + TelemetryProvider.NOOP ) { @Override public void allocate(RoutingAllocation allocation, ActionListener listener) { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ContinuousComputationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ContinuousComputationTests.java index f85f3fbd356d9..cebc4860012ad 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ContinuousComputationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ContinuousComputationTests.java @@ -8,7 +8,9 @@ package org.elasticsearch.cluster.routing.allocation.allocator; +import org.apache.logging.log4j.Level; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.junit.AfterClass; @@ -19,11 +21,11 @@ import java.util.concurrent.CyclicBarrier; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.sameInstance; -import static org.junit.Assert.assertEquals; public class ContinuousComputationTests extends ESTestCase { @@ -46,7 +48,7 @@ public static void terminateThreadPool() { public void testConcurrency() throws Exception { final var result = new AtomicReference(); - final var computation = new ContinuousComputation(threadPool) { + final var computation = new ContinuousComputation(threadPool.generic()) { public final Semaphore executePermit = new Semaphore(1); @@ -94,7 +96,7 @@ public void testSkipsObsoleteValues() throws Exception { final var finalInput = new Object(); final var result = new AtomicReference(); - final var computation = new ContinuousComputation(threadPool) { + final var computation = new ContinuousComputation(threadPool.generic()) { @Override protected void processInput(Object input) { assertNotEquals(input, skippedInput); @@ -134,4 +136,59 @@ protected void processInput(Object input) { await.run(); assertBusy(() -> assertFalse(computation.isActive())); } + + public void testFailureHandling() { + final var input1 = new Object(); + final var input2 = new Object(); + + final var successCount = new AtomicInteger(); + final var failureCount = new AtomicInteger(); + + final var computation = new ContinuousComputation<>(r -> { + try { + r.run(); + successCount.incrementAndGet(); + } catch (AssertionError e) { + assertEquals("simulated", asInstanceOf(RuntimeException.class, e.getCause()).getMessage()); + failureCount.incrementAndGet(); + } + }) { + @Override + protected void processInput(Object input) { + if (input == input1) { + onNewInput(input2); + throw new RuntimeException("simulated"); + } + } + + @Override + public String toString() { + return "test computation"; + } + }; + + MockLogAppender.assertThatLogger( + () -> computation.onNewInput(input1), + ContinuousComputation.class, + new MockLogAppender.SeenEventExpectation( + "error log", + ContinuousComputation.class.getCanonicalName(), + Level.ERROR, + "unexpected error processing [test computation]" + ) + ); + + // check that both inputs were processed + assertEquals(1, failureCount.get()); + assertEquals(1, successCount.get()); + + // check that the computation still accepts and processes new inputs + computation.onNewInput(input2); + assertEquals(1, failureCount.get()); + assertEquals(2, successCount.get()); + + computation.onNewInput(input1); + assertEquals(2, failureCount.get()); + assertEquals(3, successCount.get()); + } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java index c1c83b8a2d90e..b4eba769543b8 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java @@ -15,12 +15,11 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.DiskUsage; +import org.elasticsearch.cluster.ESAllocationTestCase; import org.elasticsearch.cluster.TestShardRoutingRoleStrategies; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; -import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.AllocationId; import org.elasticsearch.cluster.routing.IndexRoutingTable; @@ -45,7 +44,6 @@ import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.snapshots.SnapshotShardSizeInfo; -import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.threadpool.ThreadPool; @@ -60,6 +58,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Function; import static java.util.stream.Collectors.toMap; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; @@ -78,7 +77,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -public class DesiredBalanceComputerTests extends ESTestCase { +public class DesiredBalanceComputerTests extends ESAllocationTestCase { static final String TEST_INDEX = "test-index"; @@ -388,33 +387,29 @@ public void testRespectsAssignmentByGatewayAllocators() { public void testSimulatesAchievingDesiredBalanceBeforeDelegating() { var allocateCalled = new AtomicBoolean(); - var desiredBalanceComputer = new DesiredBalanceComputer( - createBuiltInClusterSettings(), - mock(ThreadPool.class), - new ShardsAllocator() { - @Override - public void allocate(RoutingAllocation allocation) { - assertTrue(allocateCalled.compareAndSet(false, true)); - // whatever the allocation in the current cluster state, the desired balance service should start by moving all the - // known shards to their desired locations before delegating to the inner allocator - for (var routingNode : allocation.routingNodes()) { - assertThat( - allocation.routingNodes().toString(), - routingNode.numberOfOwningShards(), - equalTo(routingNode.nodeId().equals("node-2") ? 0 : 2) - ); - for (var shardRouting : routingNode) { - assertTrue(shardRouting.toString(), shardRouting.started()); - } + var desiredBalanceComputer = createDesiredBalanceComputer(new ShardsAllocator() { + @Override + public void allocate(RoutingAllocation allocation) { + assertTrue(allocateCalled.compareAndSet(false, true)); + // whatever the allocation in the current cluster state, the desired balance service should start by moving all the + // known shards to their desired locations before delegating to the inner allocator + for (var routingNode : allocation.routingNodes()) { + assertThat( + allocation.routingNodes().toString(), + routingNode.numberOfOwningShards(), + equalTo(routingNode.nodeId().equals("node-2") ? 0 : 2) + ); + for (var shardRouting : routingNode) { + assertTrue(shardRouting.toString(), shardRouting.started()); } } + } - @Override - public ShardAllocationDecision decideShardAllocation(ShardRouting shard, RoutingAllocation allocation) { - throw new AssertionError("only used for allocation explain"); - } + @Override + public ShardAllocationDecision decideShardAllocation(ShardRouting shard, RoutingAllocation allocation) { + throw new AssertionError("only used for allocation explain"); } - ); + }); var clusterState = createInitialClusterState(3); var index = clusterState.metadata().index(TEST_INDEX).getIndex(); @@ -584,7 +579,7 @@ public void testDesiredBalanceShouldConvergeInABigCluster() { for (int node = 0; node < nodes; node++) { var nodeId = "node-" + node; nodeIds.add(nodeId); - discoveryNodesBuilder.add(createDiscoveryNode(nodeId, DiscoveryNodeRole.roles())); + discoveryNodesBuilder.add(newNode(nodeId)); usedDiskSpace.put(nodeId, 0L); } @@ -692,11 +687,12 @@ public void testDesiredBalanceShouldConvergeInABigCluster() { var settings = Settings.EMPTY; var input = new DesiredBalanceInput(randomInt(), routingAllocationWithDecidersOf(clusterState, clusterInfo, settings), List.of()); - var desiredBalance = new DesiredBalanceComputer( - createBuiltInClusterSettings(), - mock(ThreadPool.class), - new BalancedShardsAllocator(settings) - ).compute(DesiredBalance.INITIAL, input, queue(), ignored -> iteration.incrementAndGet() < 1000); + var desiredBalance = createDesiredBalanceComputer(new BalancedShardsAllocator(settings)).compute( + DesiredBalance.INITIAL, + input, + queue(), + ignored -> iteration.incrementAndGet() < 1000 + ); var desiredDiskUsage = Maps.newMapWithExpectedSize(nodes); for (var assignment : desiredBalance.assignments().entrySet()) { @@ -736,10 +732,7 @@ private String pickAndRemoveRandomValueFrom(List values) { public void testComputeConsideringShardSizes() { - var discoveryNodesBuilder = DiscoveryNodes.builder() - .add(createDiscoveryNode("node-0", DiscoveryNodeRole.roles())) - .add(createDiscoveryNode("node-1", DiscoveryNodeRole.roles())) - .add(createDiscoveryNode("node-2", DiscoveryNodeRole.roles())); + var discoveryNodesBuilder = DiscoveryNodes.builder().add(newNode("node-0")).add(newNode("node-1")).add(newNode("node-2")); var metadataBuilder = Metadata.builder(); var routingTableBuilder = RoutingTable.builder(); @@ -751,7 +744,7 @@ public void testComputeConsideringShardSizes() { metadataBuilder.put( IndexMetadata.builder(indexName) - .settings(indexSettings(IndexVersion.current(), 1, 1).put("index.routing.allocation.exclude._name", "node-2")) + .settings(indexSettings(IndexVersion.current(), 1, 1).put("index.routing.allocation.exclude._id", "node-2")) ); var indexId = metadataBuilder.get(indexName).getIndex(); @@ -784,7 +777,7 @@ public void testComputeConsideringShardSizes() { metadataBuilder.put( IndexMetadata.builder(indexName) - .settings(indexSettings(IndexVersion.current(), 1, 0).put("index.routing.allocation.exclude._name", "node-2")) + .settings(indexSettings(IndexVersion.current(), 1, 0).put("index.routing.allocation.exclude._id", "node-2")) ); var indexId = metadataBuilder.get(indexName).getIndex(); @@ -807,9 +800,8 @@ public void testComputeConsideringShardSizes() { var node1Usage = new DiskUsage("node-1", "node-1", "/data", 1000, 100); var node2Usage = new DiskUsage("node-2", "node-2", "/data", 1000, 1000); - var clusterInfo = new ClusterInfo( - Map.of(node0Usage.nodeId(), node0Usage, node1Usage.nodeId(), node1Usage, node2Usage.getNodeId(), node2Usage), - Map.of(node0Usage.nodeId(), node0Usage, node1Usage.nodeId(), node1Usage, node2Usage.getNodeId(), node2Usage), + var clusterInfo = createClusterInfo( + List.of(node0Usage, node1Usage, node2Usage), Map.ofEntries( // node-0 & node-1 indexSize(clusterState, "index-0", 500, true), @@ -825,10 +817,7 @@ public void testComputeConsideringShardSizes() { indexSize(clusterState, "index-7", 50, true), indexSize(clusterState, "index-8", 50, true), indexSize(clusterState, "index-9", 50, true) - ), - Map.of(), - Map.of(), - Map.of() + ) ); var settings = Settings.builder() @@ -850,11 +839,7 @@ public void testComputeConsideringShardSizes() { ) ); - var desiredBalance = new DesiredBalanceComputer( - createBuiltInClusterSettings(), - mock(ThreadPool.class), - new BalancedShardsAllocator(settings) - ).compute( + var desiredBalance = createDesiredBalanceComputer(new BalancedShardsAllocator(settings)).compute( initial, new DesiredBalanceInput(randomInt(), routingAllocationWithDecidersOf(clusterState, clusterInfo, settings), List.of()), queue(), @@ -872,6 +857,11 @@ public void testComputeConsideringShardSizes() { assertThat(resultDiskUsage, allOf(aMapWithSize(2), hasEntry("node-0", 950L), hasEntry("node-1", 850L))); } + private static ClusterInfo createClusterInfo(List diskUsages, Map shardSizes) { + var diskUsage = diskUsages.stream().collect(toMap(DiskUsage::getNodeId, Function.identity())); + return new ClusterInfo(diskUsage, diskUsage, shardSizes, Map.of(), Map.of(), Map.of()); + } + public void testShouldLogComputationIteration() { checkIterationLogging( 999, @@ -961,9 +951,9 @@ private static ShardId findShardId(ClusterState clusterState, String name) { } static ClusterState createInitialClusterState(int dataNodesCount) { - var discoveryNodes = DiscoveryNodes.builder().add(createDiscoveryNode("master", Set.of(DiscoveryNodeRole.MASTER_ROLE))); + var discoveryNodes = DiscoveryNodes.builder().add(newNode("master", Set.of(DiscoveryNodeRole.MASTER_ROLE))); for (int i = 0; i < dataNodesCount; i++) { - discoveryNodes.add(createDiscoveryNode("node-" + i, Set.of(DiscoveryNodeRole.DATA_ROLE))); + discoveryNodes.add(newNode("node-" + i, Set.of(DiscoveryNodeRole.DATA_ROLE))); } var indexMetadata = IndexMetadata.builder(TEST_INDEX).settings(indexSettings(IndexVersion.current(), 2, 1)).build(); @@ -1019,15 +1009,11 @@ private static ShardRouting mutateAllocationStatus(ShardRouting shardRouting) { } } - private static DiscoveryNode createDiscoveryNode(String id, Set roles) { - return DiscoveryNodeUtils.builder(id).name(id).externalId(UUIDs.randomBase64UUID(random())).roles(roles).build(); - } - /** * @return a {@link DesiredBalanceComputer} which allocates unassigned primaries to node-0 and unassigned replicas to node-1 */ private static DesiredBalanceComputer createDesiredBalanceComputer() { - return new DesiredBalanceComputer(createBuiltInClusterSettings(), mock(ThreadPool.class), new ShardsAllocator() { + return createDesiredBalanceComputer(new ShardsAllocator() { @Override public void allocate(RoutingAllocation allocation) { final var unassignedIterator = allocation.routingNodes().unassigned().iterator(); @@ -1054,6 +1040,10 @@ public ShardAllocationDecision decideShardAllocation(ShardRouting shard, Routing }); } + private static DesiredBalanceComputer createDesiredBalanceComputer(ShardsAllocator shardsAllocator) { + return new DesiredBalanceComputer(createBuiltInClusterSettings(), mock(ThreadPool.class), shardsAllocator); + } + private static void assertDesiredAssignments(DesiredBalance desiredBalance, Map expected) { assertThat(desiredBalance.assignments(), equalTo(expected)); } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java index b67b4ef7e5a7f..1b3fa260db1fa 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java @@ -63,6 +63,7 @@ import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotShardSizeInfo; import org.elasticsearch.snapshots.SnapshotsInfoService; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.threadpool.ThreadPool; @@ -1206,7 +1207,7 @@ public void testRebalanceDoesNotCauseHotSpots() { new ConcurrentRebalanceAllocationDecider(clusterSettings), new ThrottlingAllocationDecider(clusterSettings) }; - var reconciler = new DesiredBalanceReconciler(clusterSettings, mock(ThreadPool.class)); + var reconciler = new DesiredBalanceReconciler(clusterSettings, mock(ThreadPool.class), mock(MeterRegistry.class)); var totalOutgoingMoves = new HashMap(); for (int i = 0; i < numberOfNodes; i++) { @@ -1275,7 +1276,7 @@ public void testShouldLogOnTooManyUndesiredAllocations() { var threadPool = mock(ThreadPool.class); when(threadPool.relativeTimeInMillis()).thenReturn(1L).thenReturn(2L).thenReturn(3L); - var reconciler = new DesiredBalanceReconciler(createBuiltInClusterSettings(), threadPool); + var reconciler = new DesiredBalanceReconciler(createBuiltInClusterSettings(), threadPool, mock(MeterRegistry.class)); var expectedWarningMessage = "[100%] of assigned shards (" + shardCount @@ -1315,7 +1316,10 @@ public void testShouldLogOnTooManyUndesiredAllocations() { } private static void reconcile(RoutingAllocation routingAllocation, DesiredBalance desiredBalance) { - new DesiredBalanceReconciler(createBuiltInClusterSettings(), mock(ThreadPool.class)).reconcile(desiredBalance, routingAllocation); + new DesiredBalanceReconciler(createBuiltInClusterSettings(), mock(ThreadPool.class), mock(MeterRegistry.class)).reconcile( + desiredBalance, + routingAllocation + ); } private static boolean isReconciled(RoutingNode node, DesiredBalance balance) { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java index add94e3b9344b..a4e5ccb7e6fa4 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java @@ -47,6 +47,7 @@ import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.snapshots.SnapshotShardSizeInfo; +import org.elasticsearch.telemetry.TelemetryProvider; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.threadpool.TestThreadPool; @@ -157,7 +158,8 @@ public ClusterState apply(ClusterState clusterState, RerouteStrategy routingAllo createShardsAllocator(), threadPool, clusterService, - reconcileAction + reconcileAction, + TelemetryProvider.NOOP ); assertValidStats(desiredBalanceShardsAllocator.getStats()); var allocationService = createAllocationService(desiredBalanceShardsAllocator, createGatewayAllocator(allocateUnassigned)); @@ -277,7 +279,8 @@ public ClusterState apply(ClusterState clusterState, RerouteStrategy routingAllo createShardsAllocator(), threadPool, clusterService, - reconcileAction + reconcileAction, + TelemetryProvider.NOOP ); var allocationService = new AllocationService( new AllocationDeciders(List.of()), @@ -369,7 +372,8 @@ public DesiredBalance compute( return super.compute(previousDesiredBalance, desiredBalanceInput, pendingDesiredBalanceMoves, isFresh); } }, - reconcileAction + reconcileAction, + TelemetryProvider.NOOP ); var allocationService = createAllocationService(desiredBalanceShardsAllocator, gatewayAllocator); allocationServiceRef.set(allocationService); @@ -471,7 +475,8 @@ public DesiredBalance compute( return super.compute(previousDesiredBalance, desiredBalanceInput, pendingDesiredBalanceMoves, isFresh); } }, - reconcileAction + reconcileAction, + TelemetryProvider.NOOP ); var allocationService = createAllocationService(desiredBalanceShardsAllocator, gatewayAllocator); @@ -561,7 +566,8 @@ public DesiredBalance compute( threadPool, clusterService, desiredBalanceComputer, - (reconcilerClusterState, rerouteStrategy) -> reconcilerClusterState + (reconcilerClusterState, rerouteStrategy) -> reconcilerClusterState, + TelemetryProvider.NOOP ); var service = createAllocationService(desiredBalanceShardsAllocator, createGatewayAllocator()); @@ -613,7 +619,8 @@ public void testResetDesiredBalanceOnNoLongerMaster() { threadPool, clusterService, desiredBalanceComputer, - (reconcilerClusterState, rerouteStrategy) -> reconcilerClusterState + (reconcilerClusterState, rerouteStrategy) -> reconcilerClusterState, + TelemetryProvider.NOOP ); var service = createAllocationService(desiredBalanceShardsAllocator, createGatewayAllocator()); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceStatsTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceStatsTests.java index d962472f23f95..bc71093bdfe98 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceStatsTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceStatsTests.java @@ -68,7 +68,7 @@ public void testToXContent() { "unassigned_shards" : %d, "total_allocations" : %d, "undesired_allocations" : %d, - "undesired_allocations_fraction" : %s + "undesired_allocations_ratio" : %s }""", instance.lastConvergedIndex(), instance.computationActive(), @@ -82,7 +82,7 @@ public void testToXContent() { instance.unassignedShards(), instance.totalAllocations(), instance.undesiredAllocations(), - Double.toString(instance.undesiredAllocationsFraction()) + Double.toString(instance.undesiredAllocationsRatio()) ) ) ); diff --git a/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java b/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java index ece78448a1e8c..ce78d603ea8d2 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.Maps; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.Scheduler; import org.mockito.ArgumentCaptor; @@ -116,7 +117,13 @@ public void testNothingFixedWhenNothingToInfer() { .nodeIdsToCompatibilityVersions(versions(new CompatibilityVersions(TransportVersions.V_8_8_0, Map.of()))) .build(); - TransportVersionsFixupListener listeners = new TransportVersionsFixupListener(taskQueue, client, null, null); + TransportVersionsFixupListener listeners = new TransportVersionsFixupListener( + taskQueue, + client, + new FeatureService(List.of(new TransportFeatures())), + null, + null + ); listeners.clusterChanged(new ClusterChangedEvent("test", testState, ClusterState.EMPTY_STATE)); verify(taskQueue, never()).submitTask(anyString(), any(), any()); @@ -131,7 +138,13 @@ public void testNothingFixedWhenOnNextVersion() { .nodeIdsToCompatibilityVersions(versions(new CompatibilityVersions(NEXT_TRANSPORT_VERSION, Map.of()))) .build(); - TransportVersionsFixupListener listeners = new TransportVersionsFixupListener(taskQueue, client, null, null); + TransportVersionsFixupListener listeners = new TransportVersionsFixupListener( + taskQueue, + client, + new FeatureService(List.of(new TransportFeatures())), + null, + null + ); listeners.clusterChanged(new ClusterChangedEvent("test", testState, ClusterState.EMPTY_STATE)); verify(taskQueue, never()).submitTask(anyString(), any(), any()); @@ -151,7 +164,13 @@ public void testNothingFixedWhenOnPreviousVersion() { ) .build(); - TransportVersionsFixupListener listeners = new TransportVersionsFixupListener(taskQueue, client, null, null); + TransportVersionsFixupListener listeners = new TransportVersionsFixupListener( + taskQueue, + client, + new FeatureService(List.of(new TransportFeatures())), + null, + null + ); listeners.clusterChanged(new ClusterChangedEvent("test", testState, ClusterState.EMPTY_STATE)); verify(taskQueue, never()).submitTask(anyString(), any(), any()); @@ -175,7 +194,13 @@ public void testVersionsAreFixed() { ArgumentCaptor> action = ArgumentCaptor.forClass(ActionListener.class); ArgumentCaptor task = ArgumentCaptor.forClass(NodeTransportVersionTask.class); - TransportVersionsFixupListener listeners = new TransportVersionsFixupListener(taskQueue, client, null, null); + TransportVersionsFixupListener listeners = new TransportVersionsFixupListener( + taskQueue, + client, + new FeatureService(List.of(new TransportFeatures())), + null, + null + ); listeners.clusterChanged(new ClusterChangedEvent("test", testState, ClusterState.EMPTY_STATE)); verify(client).nodesInfo( argThat(transformedMatch(NodesInfoRequest::nodesIds, arrayContainingInAnyOrder("node1", "node2"))), @@ -201,7 +226,13 @@ public void testConcurrentChangesDoNotOverlap() { ) .build(); - TransportVersionsFixupListener listeners = new TransportVersionsFixupListener(taskQueue, client, null, null); + TransportVersionsFixupListener listeners = new TransportVersionsFixupListener( + taskQueue, + client, + new FeatureService(List.of(new TransportFeatures())), + null, + null + ); listeners.clusterChanged(new ClusterChangedEvent("test", testState1, ClusterState.EMPTY_STATE)); verify(client).nodesInfo(argThat(transformedMatch(NodesInfoRequest::nodesIds, arrayContainingInAnyOrder("node1", "node2"))), any()); // don't send back the response yet @@ -240,7 +271,13 @@ public void testFailedRequestsAreRetried() { ArgumentCaptor> action = ArgumentCaptor.forClass(ActionListener.class); ArgumentCaptor retry = ArgumentCaptor.forClass(Runnable.class); - TransportVersionsFixupListener listeners = new TransportVersionsFixupListener(taskQueue, client, scheduler, executor); + TransportVersionsFixupListener listeners = new TransportVersionsFixupListener( + taskQueue, + client, + new FeatureService(List.of(new TransportFeatures())), + scheduler, + executor + ); listeners.clusterChanged(new ClusterChangedEvent("test", testState1, ClusterState.EMPTY_STATE)); verify(client, times(1)).nodesInfo(any(), action.capture()); // do response immediately diff --git a/server/src/test/java/org/elasticsearch/health/HealthPeriodicLoggerTests.java b/server/src/test/java/org/elasticsearch/health/HealthPeriodicLoggerTests.java index d1dfa6533d000..4203a984a8f07 100644 --- a/server/src/test/java/org/elasticsearch/health/HealthPeriodicLoggerTests.java +++ b/server/src/test/java/org/elasticsearch/health/HealthPeriodicLoggerTests.java @@ -106,7 +106,7 @@ public void testConvertToLoggedFields() { // test indicator status assertThat(loggerResults.get(makeHealthStatusString("network_latency")), equalTo("green")); assertThat(loggerResults.get(makeHealthStatusString("slow_task_assignment")), equalTo("yellow")); - assertThat(loggerResults.get(makeHealthStatusString("shards_availability")), equalTo("green")); + assertThat(loggerResults.get(makeHealthStatusString("shards_availability")), equalTo("yellow")); // test calculated overall status assertThat(loggerResults.get(makeHealthStatusString("overall")), equalTo(overallStatus.xContentValue())); @@ -114,7 +114,7 @@ public void testConvertToLoggedFields() { // test calculated message assertThat( loggerResults.get(HealthPeriodicLogger.MESSAGE_FIELD), - equalTo(String.format(Locale.ROOT, "health=%s", overallStatus.xContentValue())) + equalTo(String.format(Locale.ROOT, "health=%s [shards_availability,slow_task_assignment]", overallStatus.xContentValue())) ); // test empty results @@ -124,6 +124,19 @@ public void testConvertToLoggedFields() { assertThat(emptyResults.size(), equalTo(0)); } + + // test all-green results + { + results = getTestIndicatorResultsAllGreen(); + loggerResults = HealthPeriodicLogger.convertToLoggedFields(results); + overallStatus = HealthStatus.merge(results.stream().map(HealthIndicatorResult::status)); + + // test calculated message + assertThat( + loggerResults.get(HealthPeriodicLogger.MESSAGE_FIELD), + equalTo(String.format(Locale.ROOT, "health=%s", overallStatus.xContentValue())) + ); + } } public void testHealthNodeIsSelected() { @@ -432,6 +445,14 @@ public void testLoggingHappens() { private List getTestIndicatorResults() { var networkLatency = new HealthIndicatorResult("network_latency", GREEN, null, null, null, null); var slowTasks = new HealthIndicatorResult("slow_task_assignment", YELLOW, null, null, null, null); + var shardsAvailable = new HealthIndicatorResult("shards_availability", YELLOW, null, null, null, null); + + return List.of(networkLatency, slowTasks, shardsAvailable); + } + + private List getTestIndicatorResultsAllGreen() { + var networkLatency = new HealthIndicatorResult("network_latency", GREEN, null, null, null, null); + var slowTasks = new HealthIndicatorResult("slow_task_assignment", GREEN, null, null, null, null); var shardsAvailable = new HealthIndicatorResult("shards_availability", GREEN, null, null, null, null); return List.of(networkLatency, slowTasks, shardsAvailable); diff --git a/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java b/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java index ecc55c36f61c2..2fa3216ad5556 100644 --- a/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java +++ b/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java @@ -89,58 +89,59 @@ public SearchShardTask getTask() { } public void testLevelPrecedence() { - SearchContext ctx = searchContextWithSourceAndTask(createIndex("index")); - String uuid = UUIDs.randomBase64UUID(); - IndexSettings settings = new IndexSettings(createIndexMetadata("index", settings(uuid)), Settings.EMPTY); - SearchSlowLog log = new SearchSlowLog(settings); - - // For this test, when level is not breached, the level below should be used. - { - log.onQueryPhase(ctx, 40L); - assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.INFO)); - log.onQueryPhase(ctx, 41L); - assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.WARN)); - - log.onFetchPhase(ctx, 40L); - assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.INFO)); - log.onFetchPhase(ctx, 41L); - assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.WARN)); - } + try (SearchContext ctx = searchContextWithSourceAndTask(createIndex("index"))) { + String uuid = UUIDs.randomBase64UUID(); + IndexSettings settings = new IndexSettings(createIndexMetadata("index", settings(uuid)), Settings.EMPTY); + SearchSlowLog log = new SearchSlowLog(settings); + + // For this test, when level is not breached, the level below should be used. + { + log.onQueryPhase(ctx, 40L); + assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.INFO)); + log.onQueryPhase(ctx, 41L); + assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.WARN)); + + log.onFetchPhase(ctx, 40L); + assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.INFO)); + log.onFetchPhase(ctx, 41L); + assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.WARN)); + } - { - log.onQueryPhase(ctx, 30L); - assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.DEBUG)); - log.onQueryPhase(ctx, 31L); - assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.INFO)); + { + log.onQueryPhase(ctx, 30L); + assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.DEBUG)); + log.onQueryPhase(ctx, 31L); + assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.INFO)); - log.onFetchPhase(ctx, 30L); - assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.DEBUG)); - log.onFetchPhase(ctx, 31L); - assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.INFO)); - } + log.onFetchPhase(ctx, 30L); + assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.DEBUG)); + log.onFetchPhase(ctx, 31L); + assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.INFO)); + } - { - log.onQueryPhase(ctx, 20L); - assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.TRACE)); - log.onQueryPhase(ctx, 21L); - assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.DEBUG)); + { + log.onQueryPhase(ctx, 20L); + assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.TRACE)); + log.onQueryPhase(ctx, 21L); + assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.DEBUG)); - log.onFetchPhase(ctx, 20L); - assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.TRACE)); - log.onFetchPhase(ctx, 21L); - assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.DEBUG)); - } + log.onFetchPhase(ctx, 20L); + assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.TRACE)); + log.onFetchPhase(ctx, 21L); + assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.DEBUG)); + } - { - log.onQueryPhase(ctx, 10L); - assertNull(appender.getLastEventAndReset()); - log.onQueryPhase(ctx, 11L); - assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.TRACE)); + { + log.onQueryPhase(ctx, 10L); + assertNull(appender.getLastEventAndReset()); + log.onQueryPhase(ctx, 11L); + assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.TRACE)); - log.onFetchPhase(ctx, 10L); - assertNull(appender.getLastEventAndReset()); - log.onFetchPhase(ctx, 11L); - assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.TRACE)); + log.onFetchPhase(ctx, 10L); + assertNull(appender.getLastEventAndReset()); + log.onFetchPhase(ctx, 11L); + assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.TRACE)); + } } } @@ -160,63 +161,71 @@ private Settings.Builder settings(String uuid) { } public void testTwoLoggersDifferentLevel() { - SearchContext ctx1 = searchContextWithSourceAndTask(createIndex("index-1")); - SearchContext ctx2 = searchContextWithSourceAndTask(createIndex("index-2")); - IndexSettings settings1 = new IndexSettings( - createIndexMetadata( - "index-1", - Settings.builder() - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) - .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) - .put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING.getKey(), "40nanos") - .put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING.getKey(), "40nanos") - ), - Settings.EMPTY - ); - SearchSlowLog log1 = new SearchSlowLog(settings1); + try ( + SearchContext ctx1 = searchContextWithSourceAndTask(createIndex("index-1")); + SearchContext ctx2 = searchContextWithSourceAndTask(createIndex("index-2")) + ) { + IndexSettings settings1 = new IndexSettings( + createIndexMetadata( + "index-1", + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) + .put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING.getKey(), "40nanos") + .put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING.getKey(), "40nanos") + ), + Settings.EMPTY + ); + SearchSlowLog log1 = new SearchSlowLog(settings1); - IndexSettings settings2 = new IndexSettings( - createIndexMetadata( - "index-2", - Settings.builder() - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) - .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) - .put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING.getKey(), "10nanos") - .put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING.getKey(), "10nanos") - ), - Settings.EMPTY - ); - SearchSlowLog log2 = new SearchSlowLog(settings2); - - { - // threshold set on WARN only, should not log - log1.onQueryPhase(ctx1, 11L); - assertNull(appender.getLastEventAndReset()); - log1.onFetchPhase(ctx1, 11L); - assertNull(appender.getLastEventAndReset()); - - // threshold set on TRACE, should log - log2.onQueryPhase(ctx2, 11L); - assertNotNull(appender.getLastEventAndReset()); - log2.onFetchPhase(ctx2, 11L); - assertNotNull(appender.getLastEventAndReset()); + IndexSettings settings2 = new IndexSettings( + createIndexMetadata( + "index-2", + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) + .put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING.getKey(), "10nanos") + .put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING.getKey(), "10nanos") + ), + Settings.EMPTY + ); + SearchSlowLog log2 = new SearchSlowLog(settings2); + + { + // threshold set on WARN only, should not log + log1.onQueryPhase(ctx1, 11L); + assertNull(appender.getLastEventAndReset()); + log1.onFetchPhase(ctx1, 11L); + assertNull(appender.getLastEventAndReset()); + + // threshold set on TRACE, should log + log2.onQueryPhase(ctx2, 11L); + assertNotNull(appender.getLastEventAndReset()); + log2.onFetchPhase(ctx2, 11L); + assertNotNull(appender.getLastEventAndReset()); + } } } public void testMultipleSlowLoggersUseSingleLog4jLogger() { LoggerContext context = (LoggerContext) LogManager.getContext(false); - SearchContext ctx1 = searchContextWithSourceAndTask(createIndex("index-1")); - IndexSettings settings1 = new IndexSettings(createIndexMetadata("index-1", settings(UUIDs.randomBase64UUID())), Settings.EMPTY); - SearchSlowLog log1 = new SearchSlowLog(settings1); - int numberOfLoggersBefore = context.getLoggers().size(); + try (SearchContext ctx1 = searchContextWithSourceAndTask(createIndex("index-1"))) { + IndexSettings settings1 = new IndexSettings(createIndexMetadata("index-1", settings(UUIDs.randomBase64UUID())), Settings.EMPTY); + SearchSlowLog log1 = new SearchSlowLog(settings1); + int numberOfLoggersBefore = context.getLoggers().size(); - SearchContext ctx2 = searchContextWithSourceAndTask(createIndex("index-2")); - IndexSettings settings2 = new IndexSettings(createIndexMetadata("index-2", settings(UUIDs.randomBase64UUID())), Settings.EMPTY); - SearchSlowLog log2 = new SearchSlowLog(settings2); + try (SearchContext ctx2 = searchContextWithSourceAndTask(createIndex("index-2"))) { + IndexSettings settings2 = new IndexSettings( + createIndexMetadata("index-2", settings(UUIDs.randomBase64UUID())), + Settings.EMPTY + ); + SearchSlowLog log2 = new SearchSlowLog(settings2); - int numberOfLoggersAfter = context.getLoggers().size(); - assertThat(numberOfLoggersAfter, equalTo(numberOfLoggersBefore)); + int numberOfLoggersAfter = context.getLoggers().size(); + assertThat(numberOfLoggersAfter, equalTo(numberOfLoggersBefore)); + } + } } private IndexMetadata createIndexMetadata(String index, Settings.Builder put) { @@ -225,49 +234,53 @@ private IndexMetadata createIndexMetadata(String index, Settings.Builder put) { public void testSlowLogHasJsonFields() throws IOException { IndexService index = createIndex("foo"); - SearchContext searchContext = searchContextWithSourceAndTask(index); - ESLogMessage p = SearchSlowLog.SearchSlowLogMessage.of(searchContext, 10); - - assertThat(p.get("elasticsearch.slowlog.message"), equalTo("[foo][0]")); - assertThat(p.get("elasticsearch.slowlog.took"), equalTo("10nanos")); - assertThat(p.get("elasticsearch.slowlog.took_millis"), equalTo("0")); - assertThat(p.get("elasticsearch.slowlog.total_hits"), equalTo("-1")); - assertThat(p.get("elasticsearch.slowlog.stats"), equalTo("[]")); - assertThat(p.get("elasticsearch.slowlog.search_type"), Matchers.nullValue()); - assertThat(p.get("elasticsearch.slowlog.total_shards"), equalTo("1")); - assertThat(p.get("elasticsearch.slowlog.source"), equalTo("{\\\"query\\\":{\\\"match_all\\\":{\\\"boost\\\":1.0}}}")); + try (SearchContext searchContext = searchContextWithSourceAndTask(index)) { + ESLogMessage p = SearchSlowLog.SearchSlowLogMessage.of(searchContext, 10); + + assertThat(p.get("elasticsearch.slowlog.message"), equalTo("[foo][0]")); + assertThat(p.get("elasticsearch.slowlog.took"), equalTo("10nanos")); + assertThat(p.get("elasticsearch.slowlog.took_millis"), equalTo("0")); + assertThat(p.get("elasticsearch.slowlog.total_hits"), equalTo("-1")); + assertThat(p.get("elasticsearch.slowlog.stats"), equalTo("[]")); + assertThat(p.get("elasticsearch.slowlog.search_type"), Matchers.nullValue()); + assertThat(p.get("elasticsearch.slowlog.total_shards"), equalTo("1")); + assertThat(p.get("elasticsearch.slowlog.source"), equalTo("{\\\"query\\\":{\\\"match_all\\\":{\\\"boost\\\":1.0}}}")); + } } public void testSlowLogsWithStats() throws IOException { IndexService index = createIndex("foo"); - SearchContext searchContext = createSearchContext(index, "group1"); - SearchSourceBuilder source = SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()); - searchContext.request().source(source); - searchContext.setTask( - new SearchShardTask(0, "n/a", "n/a", "test", null, Collections.singletonMap(Task.X_OPAQUE_ID_HTTP_HEADER, "my_id")) - ); + try (SearchContext searchContext = createSearchContext(index, "group1")) { + SearchSourceBuilder source = SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()); + searchContext.request().source(source); + searchContext.setTask( + new SearchShardTask(0, "n/a", "n/a", "test", null, Collections.singletonMap(Task.X_OPAQUE_ID_HTTP_HEADER, "my_id")) + ); - ESLogMessage p = SearchSlowLog.SearchSlowLogMessage.of(searchContext, 10); - assertThat(p.get("elasticsearch.slowlog.stats"), equalTo("[\\\"group1\\\"]")); + ESLogMessage p = SearchSlowLog.SearchSlowLogMessage.of(searchContext, 10); + assertThat(p.get("elasticsearch.slowlog.stats"), equalTo("[\\\"group1\\\"]")); + } - searchContext = createSearchContext(index, "group1", "group2"); - source = SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()); - searchContext.request().source(source); - searchContext.setTask( - new SearchShardTask(0, "n/a", "n/a", "test", null, Collections.singletonMap(Task.X_OPAQUE_ID_HTTP_HEADER, "my_id")) - ); - p = SearchSlowLog.SearchSlowLogMessage.of(searchContext, 10); - assertThat(p.get("elasticsearch.slowlog.stats"), equalTo("[\\\"group1\\\", \\\"group2\\\"]")); + try (SearchContext searchContext = createSearchContext(index, "group1", "group2");) { + SearchSourceBuilder source = SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()); + searchContext.request().source(source); + searchContext.setTask( + new SearchShardTask(0, "n/a", "n/a", "test", null, Collections.singletonMap(Task.X_OPAQUE_ID_HTTP_HEADER, "my_id")) + ); + ESLogMessage p = SearchSlowLog.SearchSlowLogMessage.of(searchContext, 10); + assertThat(p.get("elasticsearch.slowlog.stats"), equalTo("[\\\"group1\\\", \\\"group2\\\"]")); + } } public void testSlowLogSearchContextPrinterToLog() throws IOException { IndexService index = createIndex("foo"); - SearchContext searchContext = searchContextWithSourceAndTask(index); - ESLogMessage p = SearchSlowLog.SearchSlowLogMessage.of(searchContext, 10); - assertThat(p.get("elasticsearch.slowlog.message"), equalTo("[foo][0]")); - // Makes sure that output doesn't contain any new lines - assertThat(p.get("elasticsearch.slowlog.source"), not(containsString("\n"))); - assertThat(p.get("elasticsearch.slowlog.id"), equalTo("my_id")); + try (SearchContext searchContext = searchContextWithSourceAndTask(index)) { + ESLogMessage p = SearchSlowLog.SearchSlowLogMessage.of(searchContext, 10); + assertThat(p.get("elasticsearch.slowlog.message"), equalTo("[foo][0]")); + // Makes sure that output doesn't contain any new lines + assertThat(p.get("elasticsearch.slowlog.source"), not(containsString("\n"))); + assertThat(p.get("elasticsearch.slowlog.id"), equalTo("my_id")); + } } public void testSetQueryLevels() { diff --git a/server/src/test/java/org/elasticsearch/index/shard/SearchOperationListenerTests.java b/server/src/test/java/org/elasticsearch/index/shard/SearchOperationListenerTests.java index 714e6a05cd3e9..2baca5662161d 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/SearchOperationListenerTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/SearchOperationListenerTests.java @@ -137,160 +137,161 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest indexingOperationListeners, logger ); - SearchContext ctx = new TestSearchContext((SearchExecutionContext) null); - compositeListener.onQueryPhase(ctx, timeInNanos.get()); - assertEquals(0, preFetch.get()); - assertEquals(0, preQuery.get()); - assertEquals(0, failedFetch.get()); - assertEquals(0, failedQuery.get()); - assertEquals(2, onQuery.get()); - assertEquals(0, onFetch.get()); - assertEquals(0, newContext.get()); - assertEquals(0, newScrollContext.get()); - assertEquals(0, freeContext.get()); - assertEquals(0, freeScrollContext.get()); - assertEquals(0, validateSearchContext.get()); + try (SearchContext ctx = new TestSearchContext((SearchExecutionContext) null)) { + compositeListener.onQueryPhase(ctx, timeInNanos.get()); + assertEquals(0, preFetch.get()); + assertEquals(0, preQuery.get()); + assertEquals(0, failedFetch.get()); + assertEquals(0, failedQuery.get()); + assertEquals(2, onQuery.get()); + assertEquals(0, onFetch.get()); + assertEquals(0, newContext.get()); + assertEquals(0, newScrollContext.get()); + assertEquals(0, freeContext.get()); + assertEquals(0, freeScrollContext.get()); + assertEquals(0, validateSearchContext.get()); - compositeListener.onFetchPhase(ctx, timeInNanos.get()); - assertEquals(0, preFetch.get()); - assertEquals(0, preQuery.get()); - assertEquals(0, failedFetch.get()); - assertEquals(0, failedQuery.get()); - assertEquals(2, onQuery.get()); - assertEquals(2, onFetch.get()); - assertEquals(0, newContext.get()); - assertEquals(0, newScrollContext.get()); - assertEquals(0, freeContext.get()); - assertEquals(0, freeScrollContext.get()); - assertEquals(0, validateSearchContext.get()); + compositeListener.onFetchPhase(ctx, timeInNanos.get()); + assertEquals(0, preFetch.get()); + assertEquals(0, preQuery.get()); + assertEquals(0, failedFetch.get()); + assertEquals(0, failedQuery.get()); + assertEquals(2, onQuery.get()); + assertEquals(2, onFetch.get()); + assertEquals(0, newContext.get()); + assertEquals(0, newScrollContext.get()); + assertEquals(0, freeContext.get()); + assertEquals(0, freeScrollContext.get()); + assertEquals(0, validateSearchContext.get()); - compositeListener.onPreQueryPhase(ctx); - assertEquals(0, preFetch.get()); - assertEquals(2, preQuery.get()); - assertEquals(0, failedFetch.get()); - assertEquals(0, failedQuery.get()); - assertEquals(2, onQuery.get()); - assertEquals(2, onFetch.get()); - assertEquals(0, newContext.get()); - assertEquals(0, newScrollContext.get()); - assertEquals(0, freeContext.get()); - assertEquals(0, freeScrollContext.get()); - assertEquals(0, validateSearchContext.get()); + compositeListener.onPreQueryPhase(ctx); + assertEquals(0, preFetch.get()); + assertEquals(2, preQuery.get()); + assertEquals(0, failedFetch.get()); + assertEquals(0, failedQuery.get()); + assertEquals(2, onQuery.get()); + assertEquals(2, onFetch.get()); + assertEquals(0, newContext.get()); + assertEquals(0, newScrollContext.get()); + assertEquals(0, freeContext.get()); + assertEquals(0, freeScrollContext.get()); + assertEquals(0, validateSearchContext.get()); - compositeListener.onPreFetchPhase(ctx); - assertEquals(2, preFetch.get()); - assertEquals(2, preQuery.get()); - assertEquals(0, failedFetch.get()); - assertEquals(0, failedQuery.get()); - assertEquals(2, onQuery.get()); - assertEquals(2, onFetch.get()); - assertEquals(0, newContext.get()); - assertEquals(0, newScrollContext.get()); - assertEquals(0, freeContext.get()); - assertEquals(0, freeScrollContext.get()); - assertEquals(0, validateSearchContext.get()); + compositeListener.onPreFetchPhase(ctx); + assertEquals(2, preFetch.get()); + assertEquals(2, preQuery.get()); + assertEquals(0, failedFetch.get()); + assertEquals(0, failedQuery.get()); + assertEquals(2, onQuery.get()); + assertEquals(2, onFetch.get()); + assertEquals(0, newContext.get()); + assertEquals(0, newScrollContext.get()); + assertEquals(0, freeContext.get()); + assertEquals(0, freeScrollContext.get()); + assertEquals(0, validateSearchContext.get()); - compositeListener.onFailedFetchPhase(ctx); - assertEquals(2, preFetch.get()); - assertEquals(2, preQuery.get()); - assertEquals(2, failedFetch.get()); - assertEquals(0, failedQuery.get()); - assertEquals(2, onQuery.get()); - assertEquals(2, onFetch.get()); - assertEquals(0, newContext.get()); - assertEquals(0, newScrollContext.get()); - assertEquals(0, freeContext.get()); - assertEquals(0, freeScrollContext.get()); - assertEquals(0, validateSearchContext.get()); + compositeListener.onFailedFetchPhase(ctx); + assertEquals(2, preFetch.get()); + assertEquals(2, preQuery.get()); + assertEquals(2, failedFetch.get()); + assertEquals(0, failedQuery.get()); + assertEquals(2, onQuery.get()); + assertEquals(2, onFetch.get()); + assertEquals(0, newContext.get()); + assertEquals(0, newScrollContext.get()); + assertEquals(0, freeContext.get()); + assertEquals(0, freeScrollContext.get()); + assertEquals(0, validateSearchContext.get()); - compositeListener.onFailedQueryPhase(ctx); - assertEquals(2, preFetch.get()); - assertEquals(2, preQuery.get()); - assertEquals(2, failedFetch.get()); - assertEquals(2, failedQuery.get()); - assertEquals(2, onQuery.get()); - assertEquals(2, onFetch.get()); - assertEquals(0, newContext.get()); - assertEquals(0, newScrollContext.get()); - assertEquals(0, freeContext.get()); - assertEquals(0, freeScrollContext.get()); - assertEquals(0, validateSearchContext.get()); + compositeListener.onFailedQueryPhase(ctx); + assertEquals(2, preFetch.get()); + assertEquals(2, preQuery.get()); + assertEquals(2, failedFetch.get()); + assertEquals(2, failedQuery.get()); + assertEquals(2, onQuery.get()); + assertEquals(2, onFetch.get()); + assertEquals(0, newContext.get()); + assertEquals(0, newScrollContext.get()); + assertEquals(0, freeContext.get()); + assertEquals(0, freeScrollContext.get()); + assertEquals(0, validateSearchContext.get()); - compositeListener.onNewReaderContext(mock(ReaderContext.class)); - assertEquals(2, preFetch.get()); - assertEquals(2, preQuery.get()); - assertEquals(2, failedFetch.get()); - assertEquals(2, failedQuery.get()); - assertEquals(2, onQuery.get()); - assertEquals(2, onFetch.get()); - assertEquals(2, newContext.get()); - assertEquals(0, newScrollContext.get()); - assertEquals(0, freeContext.get()); - assertEquals(0, freeScrollContext.get()); - assertEquals(0, validateSearchContext.get()); + compositeListener.onNewReaderContext(mock(ReaderContext.class)); + assertEquals(2, preFetch.get()); + assertEquals(2, preQuery.get()); + assertEquals(2, failedFetch.get()); + assertEquals(2, failedQuery.get()); + assertEquals(2, onQuery.get()); + assertEquals(2, onFetch.get()); + assertEquals(2, newContext.get()); + assertEquals(0, newScrollContext.get()); + assertEquals(0, freeContext.get()); + assertEquals(0, freeScrollContext.get()); + assertEquals(0, validateSearchContext.get()); - compositeListener.onNewScrollContext(mock(ReaderContext.class)); - assertEquals(2, preFetch.get()); - assertEquals(2, preQuery.get()); - assertEquals(2, failedFetch.get()); - assertEquals(2, failedQuery.get()); - assertEquals(2, onQuery.get()); - assertEquals(2, onFetch.get()); - assertEquals(2, newContext.get()); - assertEquals(2, newScrollContext.get()); - assertEquals(0, freeContext.get()); - assertEquals(0, freeScrollContext.get()); - assertEquals(0, validateSearchContext.get()); + compositeListener.onNewScrollContext(mock(ReaderContext.class)); + assertEquals(2, preFetch.get()); + assertEquals(2, preQuery.get()); + assertEquals(2, failedFetch.get()); + assertEquals(2, failedQuery.get()); + assertEquals(2, onQuery.get()); + assertEquals(2, onFetch.get()); + assertEquals(2, newContext.get()); + assertEquals(2, newScrollContext.get()); + assertEquals(0, freeContext.get()); + assertEquals(0, freeScrollContext.get()); + assertEquals(0, validateSearchContext.get()); - compositeListener.onFreeReaderContext(mock(ReaderContext.class)); - assertEquals(2, preFetch.get()); - assertEquals(2, preQuery.get()); - assertEquals(2, failedFetch.get()); - assertEquals(2, failedQuery.get()); - assertEquals(2, onQuery.get()); - assertEquals(2, onFetch.get()); - assertEquals(2, newContext.get()); - assertEquals(2, newScrollContext.get()); - assertEquals(2, freeContext.get()); - assertEquals(0, freeScrollContext.get()); - assertEquals(0, validateSearchContext.get()); + compositeListener.onFreeReaderContext(mock(ReaderContext.class)); + assertEquals(2, preFetch.get()); + assertEquals(2, preQuery.get()); + assertEquals(2, failedFetch.get()); + assertEquals(2, failedQuery.get()); + assertEquals(2, onQuery.get()); + assertEquals(2, onFetch.get()); + assertEquals(2, newContext.get()); + assertEquals(2, newScrollContext.get()); + assertEquals(2, freeContext.get()); + assertEquals(0, freeScrollContext.get()); + assertEquals(0, validateSearchContext.get()); - compositeListener.onFreeScrollContext(mock(ReaderContext.class)); - assertEquals(2, preFetch.get()); - assertEquals(2, preQuery.get()); - assertEquals(2, failedFetch.get()); - assertEquals(2, failedQuery.get()); - assertEquals(2, onQuery.get()); - assertEquals(2, onFetch.get()); - assertEquals(2, newContext.get()); - assertEquals(2, newScrollContext.get()); - assertEquals(2, freeContext.get()); - assertEquals(2, freeScrollContext.get()); - assertEquals(0, validateSearchContext.get()); + compositeListener.onFreeScrollContext(mock(ReaderContext.class)); + assertEquals(2, preFetch.get()); + assertEquals(2, preQuery.get()); + assertEquals(2, failedFetch.get()); + assertEquals(2, failedQuery.get()); + assertEquals(2, onQuery.get()); + assertEquals(2, onFetch.get()); + assertEquals(2, newContext.get()); + assertEquals(2, newScrollContext.get()); + assertEquals(2, freeContext.get()); + assertEquals(2, freeScrollContext.get()); + assertEquals(0, validateSearchContext.get()); - if (throwingListeners == 0) { - compositeListener.validateReaderContext(mock(ReaderContext.class), Empty.INSTANCE); - } else { - RuntimeException expected = expectThrows( - RuntimeException.class, - () -> compositeListener.validateReaderContext(mock(ReaderContext.class), Empty.INSTANCE) - ); - assertNull(expected.getMessage()); - assertEquals(throwingListeners - 1, expected.getSuppressed().length); - if (throwingListeners > 1) { - assertThat(expected.getSuppressed()[0], not(sameInstance(expected))); + if (throwingListeners == 0) { + compositeListener.validateReaderContext(mock(ReaderContext.class), Empty.INSTANCE); + } else { + RuntimeException expected = expectThrows( + RuntimeException.class, + () -> compositeListener.validateReaderContext(mock(ReaderContext.class), Empty.INSTANCE) + ); + assertNull(expected.getMessage()); + assertEquals(throwingListeners - 1, expected.getSuppressed().length); + if (throwingListeners > 1) { + assertThat(expected.getSuppressed()[0], not(sameInstance(expected))); + } } + assertEquals(2, preFetch.get()); + assertEquals(2, preQuery.get()); + assertEquals(2, failedFetch.get()); + assertEquals(2, failedQuery.get()); + assertEquals(2, onQuery.get()); + assertEquals(2, onFetch.get()); + assertEquals(2, newContext.get()); + assertEquals(2, newScrollContext.get()); + assertEquals(2, freeContext.get()); + assertEquals(2, freeScrollContext.get()); + assertEquals(2, validateSearchContext.get()); } - assertEquals(2, preFetch.get()); - assertEquals(2, preQuery.get()); - assertEquals(2, failedFetch.get()); - assertEquals(2, failedQuery.get()); - assertEquals(2, onQuery.get()); - assertEquals(2, onFetch.get()); - assertEquals(2, newContext.get()); - assertEquals(2, newScrollContext.get()); - assertEquals(2, freeContext.get()); - assertEquals(2, freeScrollContext.get()); - assertEquals(2, validateSearchContext.get()); } } diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java index 6243131141497..3aa84163bf355 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java @@ -2605,12 +2605,32 @@ private static IngestService createWithProcessors( ThreadPool threadPool = mock(ThreadPool.class); when(threadPool.generic()).thenReturn(EsExecutors.DIRECT_EXECUTOR_SERVICE); when(threadPool.executor(anyString())).thenReturn(EsExecutors.DIRECT_EXECUTOR_SERVICE); - return new IngestService(mock(ClusterService.class), threadPool, null, null, null, List.of(new IngestPlugin() { - @Override - public Map getProcessors(final Processor.Parameters parameters) { - return processors; - } - }), client, null, documentParsingObserverSupplier); + IngestService ingestService = new IngestService( + mock(ClusterService.class), + threadPool, + null, + null, + null, + List.of(new IngestPlugin() { + @Override + public Map getProcessors(final Processor.Parameters parameters) { + return processors; + } + }), + client, + null, + documentParsingObserverSupplier + ); + if (randomBoolean()) { + /* + * Testing the copy constructor directly is difficult because there is no equals() method in IngestService, but there is a lot + * of private internal state. Here we use the copy constructor half the time in all of the unit tests, with the assumption that + * if none of our tests observe any difference then the copy constructor is working as expected. + */ + return new IngestService(ingestService); + } else { + return ingestService; + } } private CompoundProcessor mockCompoundProcessor() { diff --git a/server/src/test/java/org/elasticsearch/ingest/SimulateIngestServiceTests.java b/server/src/test/java/org/elasticsearch/ingest/SimulateIngestServiceTests.java new file mode 100644 index 0000000000000..793cd21fbfd5b --- /dev/null +++ b/server/src/test/java/org/elasticsearch/ingest/SimulateIngestServiceTests.java @@ -0,0 +1,143 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest; + +import org.elasticsearch.action.bulk.SimulateBulkRequest; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.plugins.IngestPlugin; +import org.elasticsearch.plugins.internal.DocumentParsingObserver; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.XContentType; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SimulateIngestServiceTests extends ESTestCase { + + public void testGetPipeline() { + PipelineConfiguration pipelineConfiguration = new PipelineConfiguration("pipeline1", new BytesArray(""" + {"processors": [{"processor1" : {}}]}"""), XContentType.JSON); + IngestMetadata ingestMetadata = new IngestMetadata(Map.of("pipeline1", pipelineConfiguration)); + Map processors = new HashMap<>(); + processors.put( + "processor1", + (factories, tag, description, config) -> new FakeProcessor("processor1", tag, description, ingestDocument -> {}) { + } + ); + processors.put( + "processor2", + (factories, tag, description, config) -> new FakeProcessor("processor2", tag, description, ingestDocument -> {}) { + } + ); + processors.put( + "processor3", + (factories, tag, description, config) -> new FakeProcessor("processor3", tag, description, ingestDocument -> {}) { + } + ); + IngestService ingestService = createWithProcessors(processors); + ingestService.innerUpdatePipelines(ingestMetadata); + { + // First we make sure that if there are no substitutions that we get our original pipeline back: + SimulateBulkRequest simulateBulkRequest = new SimulateBulkRequest((Map>) null); + SimulateIngestService simulateIngestService = new SimulateIngestService(ingestService, simulateBulkRequest); + Pipeline pipeline = simulateIngestService.getPipeline("pipeline1"); + assertThat(pipeline.getProcessors().size(), equalTo(1)); + assertThat(pipeline.getProcessors().get(0).getType(), equalTo("processor1")); + assertNull(simulateIngestService.getPipeline("pipeline2")); + } + { + // Here we make sure that if we have a substitution with the same name as the original pipeline that we get the new one back + Map> pipelineSubstitutions = new HashMap<>() { + { + put("pipeline1", new HashMap<>() { + { + put("processors", List.of(new HashMap<>() { + { + put("processor2", new HashMap<>()); + } + }, new HashMap<>() { + { + put("processor3", new HashMap<>()); + } + })); + } + }); + put("pipeline2", new HashMap<>() { + { + put("processors", List.of(new HashMap<>() { + { + put("processor3", new HashMap<>()); + } + })); + } + }); + } + }; + SimulateBulkRequest simulateBulkRequest = new SimulateBulkRequest(pipelineSubstitutions); + SimulateIngestService simulateIngestService = new SimulateIngestService(ingestService, simulateBulkRequest); + Pipeline pipeline1 = simulateIngestService.getPipeline("pipeline1"); + assertThat(pipeline1.getProcessors().size(), equalTo(2)); + assertThat(pipeline1.getProcessors().get(0).getType(), equalTo("processor2")); + assertThat(pipeline1.getProcessors().get(1).getType(), equalTo("processor3")); + Pipeline pipeline2 = simulateIngestService.getPipeline("pipeline2"); + assertThat(pipeline2.getProcessors().size(), equalTo(1)); + assertThat(pipeline2.getProcessors().get(0).getType(), equalTo("processor3")); + } + { + /* + * Here we make sure that if we have a substitution for a new pipeline we still get the original one back (as well as the new + * one). + */ + Map> pipelineSubstitutions = new HashMap<>() { + { + put("pipeline2", new HashMap<>() { + { + put("processors", List.of(new HashMap<>() { + { + put("processor3", new HashMap<>()); + } + })); + } + }); + } + }; + SimulateBulkRequest simulateBulkRequest = new SimulateBulkRequest(pipelineSubstitutions); + SimulateIngestService simulateIngestService = new SimulateIngestService(ingestService, simulateBulkRequest); + Pipeline pipeline1 = simulateIngestService.getPipeline("pipeline1"); + assertThat(pipeline1.getProcessors().size(), equalTo(1)); + assertThat(pipeline1.getProcessors().get(0).getType(), equalTo("processor1")); + Pipeline pipeline2 = simulateIngestService.getPipeline("pipeline2"); + assertThat(pipeline2.getProcessors().size(), equalTo(1)); + assertThat(pipeline2.getProcessors().get(0).getType(), equalTo("processor3")); + } + } + + private static IngestService createWithProcessors(Map processors) { + Client client = mock(Client.class); + ThreadPool threadPool = mock(ThreadPool.class); + when(threadPool.generic()).thenReturn(EsExecutors.DIRECT_EXECUTOR_SERVICE); + when(threadPool.executor(anyString())).thenReturn(EsExecutors.DIRECT_EXECUTOR_SERVICE); + return new IngestService(mock(ClusterService.class), threadPool, null, null, null, List.of(new IngestPlugin() { + @Override + public Map getProcessors(final Processor.Parameters parameters) { + return processors; + } + }), client, null, () -> DocumentParsingObserver.EMPTY_INSTANCE); + } +} diff --git a/server/src/test/java/org/elasticsearch/readiness/ReadinessServiceTests.java b/server/src/test/java/org/elasticsearch/readiness/ReadinessServiceTests.java index 9e680615019dc..e794752aff15e 100644 --- a/server/src/test/java/org/elasticsearch/readiness/ReadinessServiceTests.java +++ b/server/src/test/java/org/elasticsearch/readiness/ReadinessServiceTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.readiness; +import org.apache.logging.log4j.Level; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -29,6 +30,7 @@ import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.http.HttpStats; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.readiness.ReadinessClientProbe; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -247,7 +249,30 @@ public void testStatusChange() throws Exception { .build(); event = new ClusterChangedEvent("test", newState, previousState); - readinessService.clusterChanged(event); + var mockAppender = new MockLogAppender(); + try (var ignored = mockAppender.capturing(ReadinessService.class)) { + mockAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "node shutting down logged", + ReadinessService.class.getCanonicalName(), + Level.INFO, + "marking node as not ready because it's shutting down" + ) + ); + readinessService.clusterChanged(event); + mockAppender.assertAllExpectationsMatched(); + + mockAppender.addExpectation( + new MockLogAppender.UnseenEventExpectation( + "node shutting down not logged twice", + ReadinessService.class.getCanonicalName(), + Level.INFO, + "marking node as not ready because it's shutting down" + ) + ); + readinessService.clusterChanged(event); + mockAppender.assertAllExpectationsMatched(); + } assertFalse(readinessService.ready()); tcpReadinessProbeFalse(readinessService); diff --git a/server/src/test/java/org/elasticsearch/rest/action/ingest/RestSimulateIngestActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/ingest/RestSimulateIngestActionTests.java new file mode 100644 index 0000000000000..a738a13f62c21 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/rest/action/ingest/RestSimulateIngestActionTests.java @@ -0,0 +1,249 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.rest.action.ingest; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.ingest.SimulateIndexResponse; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.rest.AbstractRestChannel; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.XContentType; + +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +public class RestSimulateIngestActionTests extends ESTestCase { + + public void testConvertToBulkRequestXContentBytes() throws Exception { + { + // No index, no id, which we expect to be fine: + String simulateRequestJson = """ + { + "docs": [ + { + "_source": { + "my-keyword-field": "FOO" + } + }, + { + "_source": { + "my-keyword-field": "BAR" + } + } + ], + "pipeline_substitutions": { + "my-pipeline-2": { + "processors": [ + { + "set": { + "field": "my-new-boolean-field", + "value": true + } + } + ] + } + } + } + """; + String bulkRequestJson = """ + {"index":{}} + {"my-keyword-field":"FOO"} + {"index":{}} + {"my-keyword-field":"BAR"} + """; + testInputJsonConvertsToOutputJson(simulateRequestJson, bulkRequestJson); + } + + { + // index and id: + String simulateRequestJson = """ + { + "docs": [ + { + "_index": "index", + "_id": "123", + "_source": { + "foo": "bar" + } + }, + { + "_index": "index", + "_id": "456", + "_source": { + "foo": "rab" + } + } + ] + } + """; + String bulkRequestJson = """ + {"index":{"_index":"index","_id":"123"}} + {"foo":"bar"} + {"index":{"_index":"index","_id":"456"}} + {"foo":"rab"} + """; + testInputJsonConvertsToOutputJson(simulateRequestJson, bulkRequestJson); + } + + { + // We expect an IllegalArgumentException if there are no docs: + String simulateRequestJson = """ + { + "docs": [ + ] + } + """; + String bulkRequestJson = """ + {"index":{"_index":"index","_id":"123"}} + {"foo":"bar"} + {"index":{"_index":"index","_id":"456"}} + {"foo":"rab"} + """; + expectThrows(IllegalArgumentException.class, () -> testInputJsonConvertsToOutputJson(simulateRequestJson, bulkRequestJson)); + } + + { + // non-trivial source: + String simulateRequestJson = """ + { + "docs": [ + { + "_index": "index", + "_id": "123", + "_source": { + "foo": "bar", + "some_object": { + "prop1": "val1", + "some_array": [1, 2, 3, 4] + } + } + } + ] + } + """; + String bulkRequestJson = """ + {"index":{"_index":"index","_id":"123"}} + {"some_object":{"prop1":"val1","some_array":[1,2,3,4]},"foo":"bar"} + """; + testInputJsonConvertsToOutputJson(simulateRequestJson, bulkRequestJson); + } + } + + private void testInputJsonConvertsToOutputJson(String inputJson, String expectedOutputJson) throws Exception { + Map sourceMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), inputJson, false); + BytesReference bulkXcontentBytes = RestSimulateIngestAction.convertToBulkRequestXContentBytes(sourceMap); + String bulkRequestJson = XContentHelper.convertToJson(bulkXcontentBytes, false, XContentType.JSON); + assertThat(bulkRequestJson, equalTo(expectedOutputJson)); + } + + public void testSimulateIngestRestToXContentListener() throws Exception { + // First, make sure it works with success responses: + BulkItemResponse[] responses = new BulkItemResponse[3]; + responses[0] = getSuccessBulkItemResponse("123", "{\"foo\": \"bar\"}"); + responses[1] = getFailureBulkItemResponse("678", "This has failed"); + responses[2] = getSuccessBulkItemResponse("456", "{\"bar\": \"baz\"}"); + BulkResponse bulkResponse = new BulkResponse(responses, randomLongBetween(0, 50000)); + String expectedXContent = """ + { + "docs" : [ + { + "doc" : { + "_id" : "123", + "_index" : "index1", + "_version" : 3, + "_source" : { + "foo" : "bar" + }, + "executed_pipelines" : [ + "pipeline1", + "pipeline2" + ] + } + }, + { + "doc" : { + "_id" : "678", + "_index" : "index1", + "error" : { + "type" : "runtime_exception", + "reason" : "This has failed" + } + } + }, + { + "doc" : { + "_id" : "456", + "_index" : "index1", + "_version" : 3, + "_source" : { + "bar" : "baz" + }, + "executed_pipelines" : [ + "pipeline1", + "pipeline2" + ] + } + } + ] + }"""; + testSimulateIngestRestToXContentListener(bulkResponse, expectedXContent); + } + + private BulkItemResponse getFailureBulkItemResponse(String id, String failureMessage) { + return BulkItemResponse.failure( + randomInt(), + randomFrom(DocWriteRequest.OpType.values()), + new BulkItemResponse.Failure("index1", id, new RuntimeException(failureMessage)) + ); + } + + private BulkItemResponse getSuccessBulkItemResponse(String id, String source) { + ByteBuffer[] sourceByteBuffer = new ByteBuffer[1]; + sourceByteBuffer[0] = ByteBuffer.wrap(source.getBytes(StandardCharsets.UTF_8)); + return BulkItemResponse.success( + randomInt(), + randomFrom(DocWriteRequest.OpType.values()), + new SimulateIndexResponse( + id, + "index1", + 3, + BytesReference.fromByteBuffers(sourceByteBuffer), + XContentType.JSON, + List.of("pipeline1", "pipeline2") + ) + ); + } + + private void testSimulateIngestRestToXContentListener(BulkResponse bulkResponse, String expectedResult) throws Exception { + final FakeRestRequest request = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).build(); + final SetOnce responseSetOnce = new SetOnce<>(); + RestSimulateIngestAction.SimulateIngestRestToXContentListener listener = + new RestSimulateIngestAction.SimulateIngestRestToXContentListener(new AbstractRestChannel(request, true) { + @Override + public void sendResponse(RestResponse response) { + responseSetOnce.set(response); + } + }); + listener.onResponse(bulkResponse); + RestResponse response = responseSetOnce.get(); + String bulkRequestJson = XContentHelper.convertToJson(response.content(), true, true, XContentType.JSON); + assertThat(bulkRequestJson, equalTo(expectedResult)); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java b/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java index d90a82f08e4f0..ad6ee968c736d 100644 --- a/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java +++ b/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java @@ -178,63 +178,66 @@ protected Engine.Searcher acquireSearcherInternal(String source) { shardSearchRequest, randomNonNegativeLong() ); - DefaultSearchContext context1 = new DefaultSearchContext( - readerContext, - shardSearchRequest, - target, - null, - timeout, - null, - false, - null, - randomInt(), - randomInt() - ); - context1.from(300); - exception = expectThrows(IllegalArgumentException.class, context1::preProcess); - assertThat( - exception.getMessage(), - equalTo( - "Batch size is too large, size must be less than or equal to: [" - + maxResultWindow - + "] but was [310]. Scroll batch sizes cost as much memory as result windows so they are " - + "controlled by the [" - + IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey() - + "] index level setting." + try ( + DefaultSearchContext context1 = new DefaultSearchContext( + readerContext, + shardSearchRequest, + target, + null, + timeout, + null, + false, + null, + randomInt(), + randomInt() ) - ); - - // resultWindow not greater than maxResultWindow and both rescore and sort are not null - context1.from(0); - DocValueFormat docValueFormat = mock(DocValueFormat.class); - SortAndFormats sortAndFormats = new SortAndFormats(new Sort(), new DocValueFormat[] { docValueFormat }); - context1.sort(sortAndFormats); - - RescoreContext rescoreContext = mock(RescoreContext.class); - when(rescoreContext.getWindowSize()).thenReturn(500); - context1.addRescore(rescoreContext); - - exception = expectThrows(IllegalArgumentException.class, context1::preProcess); - assertThat(exception.getMessage(), equalTo("Cannot use [sort] option in conjunction with [rescore].")); - - // rescore is null but sort is not null and rescoreContext.getWindowSize() exceeds maxResultWindow - context1.sort(null); - exception = expectThrows(IllegalArgumentException.class, context1::preProcess); - - assertThat( - exception.getMessage(), - equalTo( - "Rescore window [" - + rescoreContext.getWindowSize() - + "] is too large. " - + "It must be less than [" - + maxRescoreWindow - + "]. This prevents allocating massive heaps for storing the results " - + "to be rescored. This limit can be set by changing the [" - + IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey() - + "] index level setting." - ) - ); + ) { + context1.from(300); + exception = expectThrows(IllegalArgumentException.class, context1::preProcess); + assertThat( + exception.getMessage(), + equalTo( + "Batch size is too large, size must be less than or equal to: [" + + maxResultWindow + + "] but was [310]. Scroll batch sizes cost as much memory as result windows so they are " + + "controlled by the [" + + IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey() + + "] index level setting." + ) + ); + + // resultWindow not greater than maxResultWindow and both rescore and sort are not null + context1.from(0); + DocValueFormat docValueFormat = mock(DocValueFormat.class); + SortAndFormats sortAndFormats = new SortAndFormats(new Sort(), new DocValueFormat[] { docValueFormat }); + context1.sort(sortAndFormats); + + RescoreContext rescoreContext = mock(RescoreContext.class); + when(rescoreContext.getWindowSize()).thenReturn(500); + context1.addRescore(rescoreContext); + + exception = expectThrows(IllegalArgumentException.class, context1::preProcess); + assertThat(exception.getMessage(), equalTo("Cannot use [sort] option in conjunction with [rescore].")); + + // rescore is null but sort is not null and rescoreContext.getWindowSize() exceeds maxResultWindow + context1.sort(null); + exception = expectThrows(IllegalArgumentException.class, context1::preProcess); + + assertThat( + exception.getMessage(), + equalTo( + "Rescore window [" + + rescoreContext.getWindowSize() + + "] is too large. " + + "It must be less than [" + + maxRescoreWindow + + "]. This prevents allocating massive heaps for storing the results " + + "to be rescored. This limit can be set by changing the [" + + IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey() + + "] index level setting." + ) + ); + } readerContext.close(); readerContext = new ReaderContext( @@ -253,90 +256,100 @@ public ScrollContext scrollContext() { } }; // rescore is null but sliceBuilder is not null - DefaultSearchContext context2 = new DefaultSearchContext( - readerContext, - shardSearchRequest, - target, - null, - timeout, - null, - false, - null, - randomInt(), - randomInt() - ); - - SliceBuilder sliceBuilder = mock(SliceBuilder.class); - int numSlices = maxSlicesPerScroll + randomIntBetween(1, 100); - when(sliceBuilder.getMax()).thenReturn(numSlices); - context2.sliceBuilder(sliceBuilder); - - exception = expectThrows(IllegalArgumentException.class, context2::preProcess); - assertThat( - exception.getMessage(), - equalTo( - "The number of slices [" - + numSlices - + "] is too large. It must " - + "be less than [" - + maxSlicesPerScroll - + "]. This limit can be set by changing the [" - + IndexSettings.MAX_SLICES_PER_SCROLL.getKey() - + "] index level setting." + try ( + DefaultSearchContext context2 = new DefaultSearchContext( + readerContext, + shardSearchRequest, + target, + null, + timeout, + null, + false, + null, + randomInt(), + randomInt() ) - ); + ) { - // No exceptions should be thrown - when(shardSearchRequest.getAliasFilter()).thenReturn(AliasFilter.EMPTY); - when(shardSearchRequest.indexBoost()).thenReturn(AbstractQueryBuilder.DEFAULT_BOOST); + SliceBuilder sliceBuilder = mock(SliceBuilder.class); + int numSlices = maxSlicesPerScroll + randomIntBetween(1, 100); + when(sliceBuilder.getMax()).thenReturn(numSlices); + context2.sliceBuilder(sliceBuilder); + + exception = expectThrows(IllegalArgumentException.class, context2::preProcess); + assertThat( + exception.getMessage(), + equalTo( + "The number of slices [" + + numSlices + + "] is too large. It must " + + "be less than [" + + maxSlicesPerScroll + + "]. This limit can be set by changing the [" + + IndexSettings.MAX_SLICES_PER_SCROLL.getKey() + + "] index level setting." + ) + ); + + // No exceptions should be thrown + when(shardSearchRequest.getAliasFilter()).thenReturn(AliasFilter.EMPTY); + when(shardSearchRequest.indexBoost()).thenReturn(AbstractQueryBuilder.DEFAULT_BOOST); + } - DefaultSearchContext context3 = new DefaultSearchContext( - readerContext, - shardSearchRequest, - target, - null, - timeout, - null, - false, - null, - randomInt(), - randomInt() - ); ParsedQuery parsedQuery = ParsedQuery.parsedMatchAllQuery(); - context3.sliceBuilder(null).parsedQuery(parsedQuery).preProcess(); - assertEquals(context3.query(), context3.buildFilteredQuery(parsedQuery.query())); - - when(searchExecutionContext.getFieldType(anyString())).thenReturn(mock(MappedFieldType.class)); - - readerContext.close(); - readerContext = new ReaderContext( - newContextId(), - indexService, - indexShard, - searcherSupplier.get(), - randomNonNegativeLong(), - false - ); - DefaultSearchContext context4 = new DefaultSearchContext( - readerContext, - shardSearchRequest, - target, - null, - timeout, - null, - false, - null, - randomInt(), - randomInt() - ); - context4.sliceBuilder(new SliceBuilder(1, 2)).parsedQuery(parsedQuery).preProcess(); - Query query1 = context4.query(); - context4.sliceBuilder(new SliceBuilder(0, 2)).parsedQuery(parsedQuery).preProcess(); - Query query2 = context4.query(); - assertTrue(query1 instanceof MatchNoDocsQuery || query2 instanceof MatchNoDocsQuery); - - readerContext.close(); - threadPool.shutdown(); + try ( + DefaultSearchContext context3 = new DefaultSearchContext( + readerContext, + shardSearchRequest, + target, + null, + timeout, + null, + false, + null, + randomInt(), + randomInt() + ) + ) { + context3.sliceBuilder(null).parsedQuery(parsedQuery).preProcess(); + assertEquals(context3.query(), context3.buildFilteredQuery(parsedQuery.query())); + + when(searchExecutionContext.getFieldType(anyString())).thenReturn(mock(MappedFieldType.class)); + + readerContext.close(); + readerContext = new ReaderContext( + newContextId(), + indexService, + indexShard, + searcherSupplier.get(), + randomNonNegativeLong(), + false + ); + } + + try ( + DefaultSearchContext context4 = new DefaultSearchContext( + readerContext, + shardSearchRequest, + target, + null, + timeout, + null, + false, + null, + randomInt(), + randomInt() + ) + ) { + context4.sliceBuilder(new SliceBuilder(1, 2)).parsedQuery(parsedQuery).preProcess(); + Query query1 = context4.query(); + context4.sliceBuilder(new SliceBuilder(0, 2)).parsedQuery(parsedQuery).preProcess(); + Query query2 = context4.query(); + assertTrue(query1 instanceof MatchNoDocsQuery || query2 instanceof MatchNoDocsQuery); + + readerContext.close(); + threadPool.shutdown(); + } } } diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java index 8f0444287d07e..1a9982f780a04 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -1948,8 +1948,8 @@ public void testEnableSearchWorkerThreads() throws IOException { try (ReaderContext readerContext = createReaderContext(indexService, indexShard)) { SearchService service = getInstanceFromNode(SearchService.class); SearchShardTask task = new SearchShardTask(0, "type", "action", "description", null, emptyMap()); - { - SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.DFS, randomBoolean()); + + try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.DFS, randomBoolean())) { assertNotNull(searchContext.searcher().getExecutor()); } @@ -1960,8 +1960,7 @@ public void testEnableSearchWorkerThreads() throws IOException { .setPersistentSettings(Settings.builder().put(SEARCH_WORKER_THREADS_ENABLED.getKey(), false).build()) .get(); assertTrue(response.isAcknowledged()); - { - SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.DFS, randomBoolean()); + try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.DFS, randomBoolean())) { assertNull(searchContext.searcher().getExecutor()); } } finally { @@ -1971,8 +1970,9 @@ public void testEnableSearchWorkerThreads() throws IOException { .prepareUpdateSettings() .setPersistentSettings(Settings.builder().putNull(SEARCH_WORKER_THREADS_ENABLED.getKey()).build()) .get(); - SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.DFS, randomBoolean()); - assertNotNull(searchContext.searcher().getExecutor()); + try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.DFS, randomBoolean())) { + assertNotNull(searchContext.searcher().getExecutor()); + } } } } diff --git a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java index 065a8bb22ab68..fafe66c743ce8 100644 --- a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java @@ -135,13 +135,14 @@ private TestSearchContext createContext(ContextIndexSearcher searcher, Query que private void countTestCase(Query query, IndexReader reader, boolean shouldCollectSearch, boolean shouldCollectCount) throws Exception { ContextIndexSearcher searcher = shouldCollectSearch ? newContextSearcher(reader) : noCollectionContextSearcher(reader); - TestSearchContext context = createContext(searcher, query); - context.setSize(0); + try (TestSearchContext context = createContext(searcher, query)) { + context.setSize(0); - QueryPhase.addCollectorsAndSearch(context); + QueryPhase.addCollectorsAndSearch(context); - ContextIndexSearcher countSearcher = shouldCollectCount ? newContextSearcher(reader) : noCollectionContextSearcher(reader); - assertEquals(countSearcher.count(query), context.queryResult().topDocs().topDocs.totalHits.value); + ContextIndexSearcher countSearcher = shouldCollectCount ? newContextSearcher(reader) : noCollectionContextSearcher(reader); + assertEquals(countSearcher.count(query), context.queryResult().topDocs().topDocs.totalHits.value); + } } private void countTestCase(boolean withDeletions) throws Exception { @@ -226,34 +227,30 @@ private int indexDocs(IndexWriterConfig iwc) throws IOException { public void testPostFilterDisablesHitCountShortcut() throws Exception { int numDocs = indexDocs(); - { - TestSearchContext context = createContext(noCollectionContextSearcher(reader), new MatchAllDocsQuery()); + try (TestSearchContext context = createContext(noCollectionContextSearcher(reader), new MatchAllDocsQuery())) { context.setSize(0); QueryPhase.addCollectorsAndSearch(context); assertEquals(numDocs, context.queryResult().topDocs().topDocs.totalHits.value); assertEquals(TotalHits.Relation.EQUAL_TO, context.queryResult().topDocs().topDocs.totalHits.relation); } - { + try (TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 10), new MatchAllDocsQuery())) { // shortcutTotalHitCount makes us not track total hits as part of the top docs collection, hence size is the threshold - TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 10), new MatchAllDocsQuery()); context.setSize(10); QueryPhase.addCollectorsAndSearch(context); assertEquals(numDocs, context.queryResult().topDocs().topDocs.totalHits.value); assertEquals(TotalHits.Relation.EQUAL_TO, context.queryResult().topDocs().topDocs.totalHits.relation); } - { + try (TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery())) { // QueryPhaseCollector does not propagate Weight#count when a post_filter is provided, hence it forces collection despite // the inner TotalHitCountCollector can shortcut - TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery()); context.setSize(0); context.parsedPostFilter(new ParsedQuery(new MatchNoDocsQuery())); QueryPhase.executeQuery(context); assertEquals(0, context.queryResult().topDocs().topDocs.totalHits.value); assertEquals(TotalHits.Relation.EQUAL_TO, context.queryResult().topDocs().topDocs.totalHits.relation); } - { + try (TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery())) { // shortcutTotalHitCount is disabled for filter collectors, hence we collect until track_total_hits - TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery()); context.setSize(10); context.parsedPostFilter(new ParsedQuery(new MatchNoDocsQuery())); QueryPhase.addCollectorsAndSearch(context); @@ -264,46 +261,43 @@ public void testPostFilterDisablesHitCountShortcut() throws Exception { public void testTerminateAfterWithFilter() throws Exception { indexDocs(); - TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery()); - context.terminateAfter(1); - context.setSize(10); - context.parsedPostFilter(new ParsedQuery(new TermQuery(new Term("foo", "bar")))); - QueryPhase.addCollectorsAndSearch(context); - assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value); - assertEquals(TotalHits.Relation.EQUAL_TO, context.queryResult().topDocs().topDocs.totalHits.relation); - assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + try (TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery())) { + context.terminateAfter(1); + context.setSize(10); + context.parsedPostFilter(new ParsedQuery(new TermQuery(new Term("foo", "bar")))); + QueryPhase.addCollectorsAndSearch(context); + assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value); + assertEquals(TotalHits.Relation.EQUAL_TO, context.queryResult().topDocs().topDocs.totalHits.relation); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + } } public void testMinScoreDisablesHitCountShortcut() throws Exception { int numDocs = indexDocs(); - { - TestSearchContext context = createContext(noCollectionContextSearcher(reader), new MatchAllDocsQuery()); + try (TestSearchContext context = createContext(noCollectionContextSearcher(reader), new MatchAllDocsQuery())) { context.setSize(0); QueryPhase.addCollectorsAndSearch(context); assertEquals(numDocs, context.queryResult().topDocs().topDocs.totalHits.value); assertEquals(TotalHits.Relation.EQUAL_TO, context.queryResult().topDocs().topDocs.totalHits.relation); } - { + try (TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 10), new MatchAllDocsQuery())) { // shortcutTotalHitCount makes us not track total hits as part of the top docs collection, hence size is the threshold - TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 10), new MatchAllDocsQuery()); context.setSize(10); QueryPhase.addCollectorsAndSearch(context); assertEquals(numDocs, context.queryResult().topDocs().topDocs.totalHits.value); assertEquals(TotalHits.Relation.EQUAL_TO, context.queryResult().topDocs().topDocs.totalHits.relation); } - { + try (TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery())) { // QueryPhaseCollector does not propagate Weight#count when min_score is provided, hence it forces collection despite // the inner TotalHitCountCollector can shortcut - TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery()); context.setSize(0); context.minimumScore(100); QueryPhase.addCollectorsAndSearch(context); assertEquals(0, context.queryResult().topDocs().topDocs.totalHits.value); assertEquals(TotalHits.Relation.EQUAL_TO, context.queryResult().topDocs().topDocs.totalHits.relation); } - { + try (TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery())) { // shortcutTotalHitCount is disabled for filter collectors, hence we collect until track_total_hits - TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery()); context.setSize(10); context.minimumScore(100); QueryPhase.executeQuery(context); @@ -314,11 +308,12 @@ public void testMinScoreDisablesHitCountShortcut() throws Exception { public void testQueryCapturesThreadPoolStats() throws Exception { indexDocs(); - TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery()); - QueryPhase.addCollectorsAndSearch(context); - QuerySearchResult results = context.queryResult(); - assertThat(results.serviceTimeEWMA(), greaterThanOrEqualTo(0L)); - assertThat(results.nodeQueueSize(), greaterThanOrEqualTo(0)); + try (TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery())) { + QueryPhase.addCollectorsAndSearch(context); + QuerySearchResult results = context.queryResult(); + assertThat(results.serviceTimeEWMA(), greaterThanOrEqualTo(0L)); + assertThat(results.nodeQueueSize(), greaterThanOrEqualTo(0)); + } } public void testInOrderScrollOptimization() throws Exception { @@ -327,29 +322,30 @@ public void testInOrderScrollOptimization() throws Exception { int numDocs = indexDocs(iwc); ScrollContext scrollContext = new ScrollContext(); - TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader), scrollContext); - context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); - context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); - context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW })); - scrollContext.lastEmittedDoc = null; - scrollContext.maxScore = Float.NaN; - scrollContext.totalHits = null; - int size = randomIntBetween(2, 5); - context.setSize(size); - - QueryPhase.addCollectorsAndSearch(context); - assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); - assertEquals(TotalHits.Relation.EQUAL_TO, context.queryResult().topDocs().topDocs.totalHits.relation); - assertNull(context.queryResult().terminatedEarly()); - assertThat(context.terminateAfter(), equalTo(0)); - assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); - - context.setSearcher(earlyTerminationContextSearcher(reader, size)); - QueryPhase.addCollectorsAndSearch(context); - assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); - assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); - assertEquals(TotalHits.Relation.EQUAL_TO, context.queryResult().topDocs().topDocs.totalHits.relation); - assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0].doc, greaterThanOrEqualTo(size)); + try (TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader), scrollContext)) { + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); + context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW })); + scrollContext.lastEmittedDoc = null; + scrollContext.maxScore = Float.NaN; + scrollContext.totalHits = null; + int size = randomIntBetween(2, 5); + context.setSize(size); + + QueryPhase.addCollectorsAndSearch(context); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertEquals(TotalHits.Relation.EQUAL_TO, context.queryResult().topDocs().topDocs.totalHits.relation); + assertNull(context.queryResult().terminatedEarly()); + assertThat(context.terminateAfter(), equalTo(0)); + assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); + + context.setSearcher(earlyTerminationContextSearcher(reader, size)); + QueryPhase.addCollectorsAndSearch(context); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); + assertEquals(TotalHits.Relation.EQUAL_TO, context.queryResult().topDocs().topDocs.totalHits.relation); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0].doc, greaterThanOrEqualTo(size)); + } } /** @@ -360,8 +356,7 @@ public void testInOrderScrollOptimization() throws Exception { */ public void testTerminateAfterSize0HitCountShortcut() throws Exception { int numDocs = indexDocs(); - { - TestSearchContext context = createContext(noCollectionContextSearcher(reader), new MatchAllDocsQuery()); + try (TestSearchContext context = createContext(noCollectionContextSearcher(reader), new MatchAllDocsQuery())) { context.terminateAfter(1); context.setSize(0); QueryPhase.addCollectorsAndSearch(context); @@ -371,8 +366,7 @@ public void testTerminateAfterSize0HitCountShortcut() throws Exception { assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); } // test interaction between trackTotalHits and terminateAfter - { - TestSearchContext context = createContext(noCollectionContextSearcher(reader), new MatchAllDocsQuery()); + try (TestSearchContext context = createContext(noCollectionContextSearcher(reader), new MatchAllDocsQuery())) { context.terminateAfter(10); context.setSize(0); context.trackTotalHitsUpTo(-1); @@ -382,8 +376,7 @@ public void testTerminateAfterSize0HitCountShortcut() throws Exception { assertThat(context.queryResult().topDocs().topDocs.totalHits.relation, equalTo(TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); } - { - TestSearchContext context = createContext(noCollectionContextSearcher(reader), new MatchAllDocsQuery()); + try (TestSearchContext context = createContext(noCollectionContextSearcher(reader), new MatchAllDocsQuery())) { context.terminateAfter(10); context.setSize(0); // terminate_after is not honored, no matter the value of track_total_hits. @@ -406,8 +399,7 @@ public void testTerminateAfterSize0HitCountShortcut() throws Exception { public void testTerminateAfterSize0NoHitCountShortcut() throws Exception { indexDocs(); Query query = new NonCountingTermQuery(new Term("foo", "bar")); - { - TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 1), query); + try (TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 1), query)) { context.terminateAfter(1); context.setSize(0); QueryPhase.executeQuery(context); @@ -417,8 +409,7 @@ public void testTerminateAfterSize0NoHitCountShortcut() throws Exception { assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); } // test interaction between trackTotalHits and terminateAfter - { - TestSearchContext context = createContext(noCollectionContextSearcher(reader), query); + try (TestSearchContext context = createContext(noCollectionContextSearcher(reader), query)) { context.terminateAfter(10); context.setSize(0); // not tracking total hits makes the hit count collection early terminate, in which case terminate_after can't be honored @@ -434,18 +425,21 @@ public void testTerminateAfterSize0NoHitCountShortcut() throws Exception { // we don't use 9 (terminate_after - 1) because it makes the test unpredictable depending on the number of segments and // documents distribution: terminate_after may be honored at time due to the check before pulling each leaf collector. int trackTotalHits = randomIntBetween(1, 8); - TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, trackTotalHits), query); - context.terminateAfter(10); - context.setSize(0); - context.trackTotalHitsUpTo(trackTotalHits); - QueryPhase.executeQuery(context); - assertFalse(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) trackTotalHits)); - assertThat(context.queryResult().topDocs().topDocs.totalHits.relation, equalTo(TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO)); - assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); + try (TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, trackTotalHits), query)) { + context.terminateAfter(10); + context.setSize(0); + context.trackTotalHitsUpTo(trackTotalHits); + QueryPhase.executeQuery(context); + assertFalse(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) trackTotalHits)); + assertThat( + context.queryResult().topDocs().topDocs.totalHits.relation, + equalTo(TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO) + ); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); + } } - { - TestSearchContext context = createContext(newContextSearcher(reader), query); + try (TestSearchContext context = createContext(newContextSearcher(reader), query)) { context.terminateAfter(10); context.setSize(0); // track total hits is higher than terminate_after, in which case collection effectively terminates after 10 documents @@ -466,8 +460,7 @@ public void testTerminateAfterSize0NoHitCountShortcut() throws Exception { */ public void testTerminateAfterWithHitsHitCountShortcut() throws Exception { int numDocs = indexDocs(); - { - TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery()); + try (TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery())) { context.terminateAfter(numDocs); context.setSize(10); QueryPhase.executeQuery(context); @@ -475,8 +468,7 @@ public void testTerminateAfterWithHitsHitCountShortcut() throws Exception { assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(10)); } - { - TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 1), new MatchAllDocsQuery()); + try (TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 1), new MatchAllDocsQuery())) { context.terminateAfter(1); // default track_total_hits, size 1: terminate_after kicks in first context.setSize(1); @@ -487,8 +479,7 @@ public void testTerminateAfterWithHitsHitCountShortcut() throws Exception { assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); } // test interaction between trackTotalHits and terminateAfter - { - TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 7), new MatchAllDocsQuery()); + try (TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 7), new MatchAllDocsQuery())) { context.terminateAfter(7); // total hits tracking disabled but 10 hits need to be collected, terminate_after is lower than size, so it kicks in first context.setSize(10); @@ -499,8 +490,7 @@ public void testTerminateAfterWithHitsHitCountShortcut() throws Exception { assertThat(context.queryResult().topDocs().topDocs.totalHits.relation, equalTo(TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(7)); } - { - TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 7), new MatchAllDocsQuery()); + try (TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 7), new MatchAllDocsQuery())) { context.terminateAfter(7); // size is greater than terminate_after (track_total_hits does not matter): terminate_after kicks in first context.setSize(10); @@ -513,18 +503,19 @@ public void testTerminateAfterWithHitsHitCountShortcut() throws Exception { } { int size = randomIntBetween(1, 6); - TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, size), new MatchAllDocsQuery()); - context.terminateAfter(7); - // size is lower than terminate_after, track_total_hits does not matter: depending on docs distribution we may or may not be - // able to honor terminate_after. low scoring hits are skipped via setMinCompetitiveScore, which bypasses terminate_after - // until the next leaf collector is pulled, when that happens. - context.setSize(size); - context.trackTotalHitsUpTo(randomIntBetween(1, Integer.MAX_VALUE)); - QueryPhase.executeQuery(context); - assertThat(context.queryResult().terminatedEarly(), either(is(true)).or(is(false))); - assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); - assertThat(context.queryResult().topDocs().topDocs.totalHits.relation, equalTo(TotalHits.Relation.EQUAL_TO)); - assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(size)); + try (TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, size), new MatchAllDocsQuery())) { + context.terminateAfter(7); + // size is lower than terminate_after, track_total_hits does not matter: depending on docs distribution we may or may not be + // able to honor terminate_after. low scoring hits are skipped via setMinCompetitiveScore, which bypasses terminate_after + // until the next leaf collector is pulled, when that happens. + context.setSize(size); + context.trackTotalHitsUpTo(randomIntBetween(1, Integer.MAX_VALUE)); + QueryPhase.executeQuery(context); + assertThat(context.queryResult().terminatedEarly(), either(is(true)).or(is(false))); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().topDocs.totalHits.relation, equalTo(TotalHits.Relation.EQUAL_TO)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(size)); + } } } @@ -536,8 +527,7 @@ public void testTerminateAfterWithHitsHitCountShortcut() throws Exception { public void testTerminateAfterWithHitsNoHitCountShortcut() throws Exception { indexDocs(); TermQuery query = new NonCountingTermQuery(new Term("foo", "bar")); - { - TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 1), query); + try (TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 1), query)) { context.terminateAfter(1); context.setSize(1); QueryPhase.addCollectorsAndSearch(context); @@ -547,8 +537,7 @@ public void testTerminateAfterWithHitsNoHitCountShortcut() throws Exception { assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); } // test interaction between trackTotalHits and terminateAfter - { - TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 7), query); + try (TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 7), query)) { context.terminateAfter(7); context.setSize(10); context.trackTotalHitsUpTo(-1); @@ -558,8 +547,7 @@ public void testTerminateAfterWithHitsNoHitCountShortcut() throws Exception { assertThat(context.queryResult().topDocs().topDocs.totalHits.relation, equalTo(TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(7)); } - { - TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 7), query); + try (TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 7), query)) { context.terminateAfter(7); // size is greater than terminate_after context.setSize(10); @@ -571,8 +559,7 @@ public void testTerminateAfterWithHitsNoHitCountShortcut() throws Exception { assertThat(context.queryResult().topDocs().topDocs.totalHits.relation, equalTo(TotalHits.Relation.EQUAL_TO)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(7)); } - { - TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 7), query); + try (TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 7), query)) { context.terminateAfter(7); // size is lower than terminate_after context.setSize(5); @@ -586,8 +573,7 @@ public void testTerminateAfterWithHitsNoHitCountShortcut() throws Exception { assertThat(context.queryResult().topDocs().topDocs.totalHits.relation, equalTo(TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(5)); } - { - TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 7), query); + try (TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 7), query)) { context.terminateAfter(7); // size is greater than terminate_after context.setSize(10); @@ -606,8 +592,7 @@ public void testIndexSortingEarlyTermination() throws Exception { final Sort sort = new Sort(new SortField("rank", SortField.Type.INT)); IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort); int numDocs = indexDocs(iwc); - { - TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery()); + try (TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery())) { context.setSize(1); context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW })); QueryPhase.addCollectorsAndSearch(context); @@ -618,8 +603,7 @@ public void testIndexSortingEarlyTermination() throws Exception { FieldDoc fieldDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[0]; assertThat(fieldDoc.fields[0], equalTo(1)); } - { - TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery()); + try (TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery())) { context.setSize(1); context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW })); context.parsedPostFilter(new ParsedQuery(new MinDocQuery(1))); @@ -631,8 +615,7 @@ public void testIndexSortingEarlyTermination() throws Exception { FieldDoc fieldDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[0]; assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); } - { - TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery()); + try (TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery())) { context.setSize(1); context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW })); QueryPhase.executeQuery(context); @@ -643,8 +626,7 @@ public void testIndexSortingEarlyTermination() throws Exception { FieldDoc fieldDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[0]; assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); } - { - TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery()); + try (TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery())) { context.setSize(1); context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW })); context.setSearcher(earlyTerminationContextSearcher(reader, 1)); @@ -656,8 +638,7 @@ public void testIndexSortingEarlyTermination() throws Exception { FieldDoc fieldDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[0]; assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); } - { - TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery()); + try (TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery())) { context.setSize(1); context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW })); QueryPhase.addCollectorsAndSearch(context); @@ -692,42 +673,43 @@ public void testIndexSortScrollOptimization() throws Exception { searchSortAndFormats.add(new SortAndFormats(new Sort(indexSort.getSort()[0]), new DocValueFormat[] { DocValueFormat.RAW })); for (SortAndFormats searchSortAndFormat : searchSortAndFormats) { ScrollContext scrollContext = new ScrollContext(); - TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader), scrollContext); - context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); - scrollContext.lastEmittedDoc = null; - scrollContext.maxScore = Float.NaN; - scrollContext.totalHits = null; - context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); - context.setSize(10); - context.sort(searchSortAndFormat); - - QueryPhase.addCollectorsAndSearch(context); - assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); - assertNull(context.queryResult().terminatedEarly()); - assertThat(context.terminateAfter(), equalTo(0)); - assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); - int sizeMinus1 = context.queryResult().topDocs().topDocs.scoreDocs.length - 1; - FieldDoc lastDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[sizeMinus1]; - - context.setSearcher(earlyTerminationContextSearcher(reader, 10)); - QueryPhase.addCollectorsAndSearch(context); - assertNull(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); - assertThat(context.terminateAfter(), equalTo(0)); - assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); - FieldDoc firstDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[0]; - for (int i = 0; i < searchSortAndFormat.sort.getSort().length; i++) { - @SuppressWarnings("unchecked") - FieldComparator comparator = (FieldComparator) searchSortAndFormat.sort.getSort()[i].getComparator( - 1, - i == 0 - ); - int cmp = comparator.compareValues(firstDoc.fields[i], lastDoc.fields[i]); - if (cmp == 0) { - continue; + try (TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader), scrollContext)) { + context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); + scrollContext.lastEmittedDoc = null; + scrollContext.maxScore = Float.NaN; + scrollContext.totalHits = null; + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + context.setSize(10); + context.sort(searchSortAndFormat); + + QueryPhase.addCollectorsAndSearch(context); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertNull(context.queryResult().terminatedEarly()); + assertThat(context.terminateAfter(), equalTo(0)); + assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); + int sizeMinus1 = context.queryResult().topDocs().topDocs.scoreDocs.length - 1; + FieldDoc lastDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[sizeMinus1]; + + context.setSearcher(earlyTerminationContextSearcher(reader, 10)); + QueryPhase.addCollectorsAndSearch(context); + assertNull(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertThat(context.terminateAfter(), equalTo(0)); + assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); + FieldDoc firstDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[0]; + for (int i = 0; i < searchSortAndFormat.sort.getSort().length; i++) { + @SuppressWarnings("unchecked") + FieldComparator comparator = (FieldComparator) searchSortAndFormat.sort.getSort()[i].getComparator( + 1, + i == 0 + ); + int cmp = comparator.compareValues(firstDoc.fields[i], lastDoc.fields[i]); + if (cmp == 0) { + continue; + } + assertThat(cmp, equalTo(1)); + break; } - assertThat(cmp, equalTo(1)); - break; } } } @@ -751,8 +733,7 @@ public void testDisableTopScoreCollection() throws Exception { Query q = new SpanNearQuery.Builder("title", true).addClause(new SpanTermQuery(new Term("title", "foo"))) .addClause(new SpanTermQuery(new Term("title", "bar"))) .build(); - { - TestSearchContext context = createContext(newContextSearcher(reader), q); + try (TestSearchContext context = createContext(newContextSearcher(reader), q)) { context.setSize(3); context.trackTotalHitsUpTo(3); CollectorManager collectorManager = QueryPhaseCollectorManager.createQueryPhaseCollectorManager( @@ -767,8 +748,7 @@ public void testDisableTopScoreCollection() throws Exception { assertEquals(context.queryResult().topDocs().topDocs.totalHits.relation, TotalHits.Relation.EQUAL_TO); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(3)); } - { - TestSearchContext context = createContext(newContextSearcher(reader), q); + try (TestSearchContext context = createContext(newContextSearcher(reader), q)) { context.setSize(3); context.trackTotalHitsUpTo(3); context.sort( @@ -834,8 +814,7 @@ public void testNumericSortOptimization() throws Exception { Query q = LongPoint.newRangeQuery(fieldNameLong, startLongValue, startLongValue + numDocs); // 1. Test sort optimization on long field - { - TestSearchContext searchContext = createContext(newContextSearcher(reader), q); + try (TestSearchContext searchContext = createContext(newContextSearcher(reader), q)) { searchContext.sort(formatsLong); searchContext.trackTotalHitsUpTo(10); searchContext.setSize(10); @@ -845,8 +824,7 @@ public void testNumericSortOptimization() throws Exception { } // 2. Test sort optimization on long field with after - { - TestSearchContext searchContext = createContext(newContextSearcher(reader), q); + try (TestSearchContext searchContext = createContext(newContextSearcher(reader), q)) { int afterDoc = (int) randomLongBetween(0, 30); long afterValue = startLongValue + afterDoc; FieldDoc after = new FieldDoc(afterDoc, Float.NaN, new Long[] { afterValue }); @@ -863,8 +841,7 @@ public void testNumericSortOptimization() throws Exception { } // 3. Test sort optimization on long field + date field - { - TestSearchContext searchContext = createContext(newContextSearcher(reader), q); + try (TestSearchContext searchContext = createContext(newContextSearcher(reader), q)) { searchContext.sort(formatsLongDate); searchContext.trackTotalHitsUpTo(10); searchContext.setSize(10); @@ -874,8 +851,7 @@ public void testNumericSortOptimization() throws Exception { } // 4. Test sort optimization on date field - { - TestSearchContext searchContext = createContext(newContextSearcher(reader), q); + try (TestSearchContext searchContext = createContext(newContextSearcher(reader), q)) { searchContext.sort(formatsDate); searchContext.trackTotalHitsUpTo(10); searchContext.setSize(10); @@ -885,8 +861,7 @@ public void testNumericSortOptimization() throws Exception { } // 5. Test sort optimization on date field + long field - { - TestSearchContext searchContext = createContext(newContextSearcher(reader), q); + try (TestSearchContext searchContext = createContext(newContextSearcher(reader), q)) { searchContext.sort(formatsDateLong); searchContext.trackTotalHitsUpTo(10); searchContext.setSize(10); @@ -896,8 +871,7 @@ public void testNumericSortOptimization() throws Exception { } // 6. Test sort optimization on when from > 0 and size = 0 - { - TestSearchContext searchContext = createContext(newContextSearcher(reader), q); + try (TestSearchContext searchContext = createContext(newContextSearcher(reader), q)) { searchContext.sort(formatsLong); searchContext.trackTotalHitsUpTo(10); searchContext.from(5); @@ -910,8 +884,7 @@ public void testNumericSortOptimization() throws Exception { } // 7. Test that sort optimization doesn't break a case where from = 0 and size= 0 - { - TestSearchContext searchContext = createContext(newContextSearcher(reader), q); + try (TestSearchContext searchContext = createContext(newContextSearcher(reader), q)) { searchContext.sort(formatsLong); searchContext.setSize(0); QueryPhase.addCollectorsAndSearch(searchContext); @@ -1009,13 +982,14 @@ public void testMinScore() throws Exception { BooleanQuery booleanQuery = new BooleanQuery.Builder().add(new TermQuery(new Term("foo", "bar")), Occur.MUST) .add(new TermQuery(new Term("filter", "f1")), Occur.SHOULD) .build(); - TestSearchContext context = createContext(newContextSearcher(reader), booleanQuery); - context.minimumScore(0.01f); - context.setSize(1); - context.trackTotalHitsUpTo(5); + try (TestSearchContext context = createContext(newContextSearcher(reader), booleanQuery)) { + context.minimumScore(0.01f); + context.setSize(1); + context.trackTotalHitsUpTo(5); - QueryPhase.addCollectorsAndSearch(context); - assertEquals(10, context.queryResult().topDocs().topDocs.totalHits.value); + QueryPhase.addCollectorsAndSearch(context); + assertEquals(10, context.queryResult().topDocs().topDocs.totalHits.value); + } } public void testCancellationDuringRewrite() throws IOException { @@ -1030,12 +1004,13 @@ public void testCancellationDuringRewrite() throws IOException { reader = DirectoryReader.open(dir); PrefixQuery prefixQuery = new PrefixQuery(new Term("foo", "a"), MultiTermQuery.SCORING_BOOLEAN_REWRITE); - TestSearchContext context = createContext(newContextSearcher(reader), prefixQuery); - SearchShardTask task = new SearchShardTask(randomLong(), "transport", "", "", TaskId.EMPTY_TASK_ID, Collections.emptyMap()); - TaskCancelHelper.cancel(task, "simulated"); - context.setTask(task); - context.searcher().addQueryCancellation(task::ensureNotCancelled); - expectThrows(TaskCancelledException.class, context::rewrittenQuery); + try (TestSearchContext context = createContext(newContextSearcher(reader), prefixQuery)) { + SearchShardTask task = new SearchShardTask(randomLong(), "transport", "", "", TaskId.EMPTY_TASK_ID, Collections.emptyMap()); + TaskCancelHelper.cancel(task, "simulated"); + context.setTask(task); + context.searcher().addQueryCancellation(task::ensureNotCancelled); + expectThrows(TaskCancelledException.class, context::rewrittenQuery); + } } public void testRank() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTimeoutTests.java b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTimeoutTests.java index f0e3c9ac28f00..9c1bdb236c031 100644 --- a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTimeoutTests.java +++ b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTimeoutTests.java @@ -128,20 +128,22 @@ public void testScorerTimeoutPoints() throws IOException { private void scorerTimeoutTest(int size, CheckedConsumer timeoutTrigger) throws IOException { { TimeoutQuery query = newMatchAllScorerTimeoutQuery(timeoutTrigger, false); - SearchContext context = createSearchContext(query, size); - QueryPhase.executeQuery(context); - assertFalse(context.queryResult().searchTimedOut()); - assertEquals(numDocs, context.queryResult().topDocs().topDocs.totalHits.value); - assertEquals(size, context.queryResult().topDocs().topDocs.scoreDocs.length); + try (SearchContext context = createSearchContext(query, size)) { + QueryPhase.executeQuery(context); + assertFalse(context.queryResult().searchTimedOut()); + assertEquals(numDocs, context.queryResult().topDocs().topDocs.totalHits.value); + assertEquals(size, context.queryResult().topDocs().topDocs.scoreDocs.length); + } } { TimeoutQuery query = newMatchAllScorerTimeoutQuery(timeoutTrigger, true); - SearchContext context = createSearchContextWithTimeout(query, size); - QueryPhase.executeQuery(context); - assertTrue(context.queryResult().searchTimedOut()); - int firstSegmentMaxDoc = reader.leaves().get(0).reader().maxDoc(); - assertEquals(Math.min(2048, firstSegmentMaxDoc), context.queryResult().topDocs().topDocs.totalHits.value); - assertEquals(Math.min(size, firstSegmentMaxDoc), context.queryResult().topDocs().topDocs.scoreDocs.length); + try (SearchContext context = createSearchContextWithTimeout(query, size)) { + QueryPhase.executeQuery(context); + assertTrue(context.queryResult().searchTimedOut()); + int firstSegmentMaxDoc = reader.leaves().get(0).reader().maxDoc(); + assertEquals(Math.min(2048, firstSegmentMaxDoc), context.queryResult().topDocs().topDocs.totalHits.value); + assertEquals(Math.min(size, firstSegmentMaxDoc), context.queryResult().topDocs().topDocs.scoreDocs.length); + } } } @@ -174,20 +176,22 @@ public void testBulkScorerTimeout() throws IOException { int size = randomBoolean() ? 0 : randomIntBetween(100, 500); { TimeoutQuery query = newMatchAllBulkScorerTimeoutQuery(false); - SearchContext context = createSearchContext(query, size); - QueryPhase.executeQuery(context); - assertFalse(context.queryResult().searchTimedOut()); - assertEquals(numDocs, context.queryResult().topDocs().topDocs.totalHits.value); - assertEquals(size, context.queryResult().topDocs().topDocs.scoreDocs.length); + try (SearchContext context = createSearchContext(query, size)) { + QueryPhase.executeQuery(context); + assertFalse(context.queryResult().searchTimedOut()); + assertEquals(numDocs, context.queryResult().topDocs().topDocs.totalHits.value); + assertEquals(size, context.queryResult().topDocs().topDocs.scoreDocs.length); + } } { TimeoutQuery query = newMatchAllBulkScorerTimeoutQuery(true); - SearchContext context = createSearchContextWithTimeout(query, size); - QueryPhase.executeQuery(context); - assertTrue(context.queryResult().searchTimedOut()); - int firstSegmentMaxDoc = reader.leaves().get(0).reader().maxDoc(); - assertEquals(Math.min(2048, firstSegmentMaxDoc), context.queryResult().topDocs().topDocs.totalHits.value); - assertEquals(Math.min(size, firstSegmentMaxDoc), context.queryResult().topDocs().topDocs.scoreDocs.length); + try (SearchContext context = createSearchContextWithTimeout(query, size)) { + QueryPhase.executeQuery(context); + assertTrue(context.queryResult().searchTimedOut()); + int firstSegmentMaxDoc = reader.leaves().get(0).reader().maxDoc(); + assertEquals(Math.min(2048, firstSegmentMaxDoc), context.queryResult().topDocs().topDocs.totalHits.value); + assertEquals(Math.min(size, firstSegmentMaxDoc), context.queryResult().topDocs().topDocs.scoreDocs.length); + } } } diff --git a/server/src/test/java/org/elasticsearch/transport/TransportInfoTests.java b/server/src/test/java/org/elasticsearch/transport/TransportInfoTests.java index 83ac8f883dae1..8e23f0e3984b9 100644 --- a/server/src/test/java/org/elasticsearch/transport/TransportInfoTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TransportInfoTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.transport; -import org.elasticsearch.Version; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; @@ -33,10 +32,6 @@ private TransportInfo createTransportInfo(InetAddress address, int port, boolean return new TransportInfo(boundAddress, profiles, cnameInPublishAddressProperty); } - public void testDoNotForgetToRemoveProperty() { - assertTrue("Remove es.transport.cname_in_publish_address property from TransportInfo in 9.0.0", Version.CURRENT.major < 9); - } - public void testCorrectlyDisplayPublishedCname() throws Exception { InetAddress address = InetAddress.getByName("localhost"); int port = 9200; diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java b/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java index 138ab77035b43..e1949d78e86c2 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java @@ -43,6 +43,7 @@ import org.elasticsearch.index.IndexVersions; import org.elasticsearch.snapshots.SnapshotShardSizeInfo; import org.elasticsearch.snapshots.SnapshotsInfoService; +import org.elasticsearch.telemetry.TelemetryProvider; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.gateway.TestGatewayAllocator; @@ -161,7 +162,8 @@ private static DesiredBalanceShardsAllocator createDesiredBalanceShardsAllocator new BalancedShardsAllocator(settings), queue.getThreadPool(), clusterService, - null + null, + TelemetryProvider.NOOP ) { private RoutingAllocation lastAllocation; diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java index e73cd086bc019..9017e88f430b5 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java @@ -113,7 +113,7 @@ public static DataStream newInstance( boolean replicated, @Nullable DataStreamLifecycle lifecycle ) { - return new DataStream(name, indices, generation, metadata, false, replicated, false, false, null, lifecycle); + return new DataStream(name, indices, generation, metadata, false, replicated, false, false, null, lifecycle, false, List.of()); } public static String getLegacyDefaultBackingIndexName( @@ -244,6 +244,11 @@ public static DataStream randomInstance(String dataStreamName, LongSupplier time if (randomBoolean()) { metadata = Map.of("key", "value"); } + List failureIndices = List.of(); + boolean failureStore = randomBoolean(); + if (failureStore) { + failureIndices = randomIndexInstances(); + } return new DataStream( dataStreamName, @@ -256,7 +261,9 @@ public static DataStream randomInstance(String dataStreamName, LongSupplier time timeProvider, randomBoolean(), randomBoolean() ? IndexMode.STANDARD : null, // IndexMode.TIME_SERIES triggers validation that many unit tests doesn't pass - randomBoolean() ? DataStreamLifecycle.newBuilder().dataRetention(randomMillisUpToYear9999()).build() : null + randomBoolean() ? DataStreamLifecycle.newBuilder().dataRetention(randomMillisUpToYear9999()).build() : null, + failureStore, + failureIndices ); } diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java index 6e62f154a1bf6..7d66a2d06a5b8 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java @@ -16,8 +16,11 @@ import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.blobstore.OperationPurpose; import org.elasticsearch.common.blobstore.support.BlobMetadata; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.settings.SecureSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Streams; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil; import org.elasticsearch.snapshots.SnapshotState; @@ -25,13 +28,16 @@ import org.elasticsearch.threadpool.ThreadPool; import java.io.ByteArrayInputStream; +import java.io.IOException; import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.concurrent.Executor; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.empty; @@ -218,6 +224,58 @@ public void testCleanup() throws Exception { assertCleanupResponse(response, 3L, 1L); } + public void testIndexLatest() throws Exception { + // This test verifies that every completed snapshot operation updates a blob called literally 'index.latest' (by default at least), + // which is important because some external systems use the freshness of this specific blob as an indicator of whether a repository + // is in use. Most notably, ESS checks this blob as an extra layer of protection against a bug in the delete-old-repositories + // process incorrectly deleting repositories that have seen recent writes. It's possible that some future development might change + // the meaning of this blob, and that's ok, but we must continue to update it to keep those external systems working. + + createIndex("test-idx-1"); + for (int i = 0; i < 100; i++) { + client().prepareIndex("test-idx-1").setId(Integer.toString(i)).setSource("foo", "bar" + i).get(); + } + + final var repository = getRepository(); + final var blobContents = new HashSet(); + + final var createSnapshot1Response = clusterAdmin().prepareCreateSnapshot(TEST_REPO_NAME, randomIdentifier()) + .setWaitForCompletion(true) + .get(); + assertTrue(blobContents.add(readIndexLatest(repository))); + + clusterAdmin().prepareGetSnapshots(TEST_REPO_NAME).get(); + assertFalse(blobContents.add(readIndexLatest(repository))); + + final var createSnapshot2Response = clusterAdmin().prepareCreateSnapshot(TEST_REPO_NAME, randomIdentifier()) + .setWaitForCompletion(true) + .get(); + assertTrue(blobContents.add(readIndexLatest(repository))); + + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REPO_NAME, createSnapshot1Response.getSnapshotInfo().snapshotId().getName())); + assertTrue(blobContents.add(readIndexLatest(repository))); + + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REPO_NAME, createSnapshot2Response.getSnapshotInfo().snapshotId().getName())); + assertTrue(blobContents.add(readIndexLatest(repository))); + } + + private static BytesReference readIndexLatest(BlobStoreRepository repository) throws IOException { + try (var baos = new BytesStreamOutput()) { + Streams.copy( + repository.blobStore() + .blobContainer(repository.basePath()) + .readBlob( + OperationPurpose.SNAPSHOT, + // Deliberately not using BlobStoreRepository#INDEX_LATEST_BLOB here, it's important for external systems that a + // blob with literally this name is updated on each write: + "index.latest" + ), + baos + ); + return baos.bytes(); + } + } + protected void assertCleanupResponse(CleanupRepositoryResponse response, long bytes, long blobs) { assertThat(response.result().blobs(), equalTo(1L + 2L)); assertThat(response.result().bytes(), equalTo(3L + 2 * 3L)); diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java index c5a9a9ae7c6de..3615686674bab 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java @@ -179,7 +179,7 @@ */ public abstract class AggregatorTestCase extends ESTestCase { private NamedWriteableRegistry namedWriteableRegistry; - private final List releasables = new ArrayList<>(); + private final List releasables = new ArrayList<>(); protected ValuesSourceRegistry valuesSourceRegistry; private AnalysisModule analysisModule; @@ -328,7 +328,7 @@ private AggregationContext createAggregationContext( int maxBucket, boolean isInSortOrderExecutionRequired, MappedFieldType... fieldTypes - ) throws IOException { + ) { MappingLookup mappingLookup = MappingLookup.fromMappers( Mapping.EMPTY, Arrays.stream(fieldTypes).map(this::buildMockFieldMapper).collect(toList()), @@ -416,7 +416,7 @@ protected List objectMappers() { /** * Build a {@link SubSearchContext}s to power {@code top_hits}. */ - private static SubSearchContext buildSubSearchContext( + private SubSearchContext buildSubSearchContext( IndexSettings indexSettings, SearchExecutionContext searchExecutionContext, BitsetFilterCache bitsetFilterCache @@ -455,7 +455,9 @@ private static SubSearchContext buildSubSearchContext( when(ctx.indexShard()).thenReturn(indexShard); when(ctx.newSourceLoader()).thenAnswer(inv -> searchExecutionContext.newSourceLoader(false)); when(ctx.newIdLoader()).thenReturn(IdLoader.fromLeafStoredFieldLoader()); - return new SubSearchContext(ctx); + var res = new SubSearchContext(ctx); + releasables.add(res); // TODO: nasty workaround for not getting the standard resource handling behavior of a real search context + return res; } protected IndexSettings createIndexSettings() { diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java b/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java index 3ec327f7f3332..80d1b82fbfcfe 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java +++ b/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java @@ -452,16 +452,6 @@ public void seqNoAndPrimaryTerm(boolean seqNoAndPrimaryTerm) { } - @Override - public int[] docIdsToLoad() { - return new int[0]; - } - - @Override - public SearchContext docIdsToLoad(int[] docIdsToLoad) { - return null; - } - @Override public DfsSearchResult dfsResult() { return null; diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index aff8a20aa88b6..3327137cef7b7 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -112,6 +112,7 @@ import static java.util.Collections.sort; import static java.util.Collections.unmodifiableList; +import static org.elasticsearch.client.RestClient.IGNORE_RESPONSE_CODES_PARAM; import static org.elasticsearch.core.Strings.format; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; @@ -1157,7 +1158,7 @@ private void wipeRollupJobs() throws IOException { @SuppressWarnings("unchecked") String jobId = (String) ((Map) jobConfig.get("config")).get("id"); Request request = new Request("POST", "/_rollup/job/" + jobId + "/_stop"); - request.addParameter("ignore", "404"); + setIgnoredErrorResponseCodes(request, RestStatus.NOT_FOUND); request.addParameter("wait_for_completion", "true"); request.addParameter("timeout", "10s"); logger.debug("stopping rollup job [{}]", jobId); @@ -1168,7 +1169,7 @@ private void wipeRollupJobs() throws IOException { @SuppressWarnings("unchecked") String jobId = (String) ((Map) jobConfig.get("config")).get("id"); Request request = new Request("DELETE", "/_rollup/job/" + jobId); - request.addParameter("ignore", "404"); // Ignore 404s because they imply someone was racing us to delete this + setIgnoredErrorResponseCodes(request, RestStatus.NOT_FOUND); // 404s imply someone was racing us to delete this logger.debug("deleting rollup job [{}]", jobId); adminClient().performRequest(request); } @@ -1485,8 +1486,9 @@ private static Set runningTasks(Response response) throws IOException { return runningTasks; } - public static void assertOK(Response response) { + public static Response assertOK(Response response) { assertThat(response.getStatusLine().getStatusCode(), anyOf(equalTo(200), equalTo(201))); + return response; } public static ObjectPath assertOKAndCreateObjectPath(Response response) throws IOException { @@ -1845,7 +1847,7 @@ protected static void deleteSnapshot(RestClient restClient, String repository, S throws IOException { final Request request = new Request(HttpDelete.METHOD_NAME, "_snapshot/" + repository + '/' + snapshot); if (ignoreMissing) { - request.addParameter("ignore", "404"); + setIgnoredErrorResponseCodes(request, RestStatus.NOT_FOUND); } final Response response = restClient.performRequest(request); assertThat(response.getStatusLine().getStatusCode(), ignoreMissing ? anyOf(equalTo(200), equalTo(404)) : equalTo(200)); @@ -2243,4 +2245,11 @@ protected Map getHistoricalFeatures() { return historicalFeatures; } + + public static void setIgnoredErrorResponseCodes(Request request, RestStatus... restStatuses) { + request.addParameter( + IGNORE_RESPONSE_CODES_PARAM, + Arrays.stream(restStatuses).map(restStatus -> Integer.toString(restStatus.getStatus())).collect(Collectors.joining(",")) + ); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index e0cd47c48515b..90c3b1f062e94 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -11,7 +11,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -33,6 +32,7 @@ import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.node.Node; import org.elasticsearch.plugins.Plugin; @@ -541,12 +541,7 @@ public void sendRequest( request.writeTo(bStream); final TransportRequest clonedRequest; if (request instanceof BytesTransportRequest) { - // Some request handlers read back a BytesTransportRequest - // into a different class that cannot be re-serialized (i.e. JOIN_VALIDATE_ACTION_NAME), - // in those cases we just copy the raw bytes back to a BytesTransportRequest. - // This is only needed for the BwC for JOIN_VALIDATE_ACTION_NAME and can be removed in the next major - assert Version.CURRENT.major == Version.V_7_17_0.major + 1; - clonedRequest = new BytesTransportRequest(bStream.bytes().streamInput()); + clonedRequest = copyRawBytesForBwC(bStream); } else { RequestHandlerRegistry reg = MockTransportService.this.getRequestHandler(action); clonedRequest = reg.newRequest(bStream.bytes().streamInput()); @@ -576,6 +571,15 @@ protected void doRun() throws IOException { } } + // Some request handlers read back a BytesTransportRequest + // into a different class that cannot be re-serialized (i.e. JOIN_VALIDATE_ACTION_NAME), + // in those cases we just copy the raw bytes back to a BytesTransportRequest. + // This is only needed for the BwC for JOIN_VALIDATE_ACTION_NAME and can be removed in the next major + @UpdateForV9 + private static TransportRequest copyRawBytesForBwC(BytesStreamOutput bStream) throws IOException { + return new BytesTransportRequest(bStream.bytes().streamInput()); + } + @Override public void clearCallback() { synchronized (this) { diff --git a/test/metadata-extractor/src/main/java/org/elasticsearch/extractor/features/HistoricalFeaturesMetadataExtractor.java b/test/metadata-extractor/src/main/java/org/elasticsearch/extractor/features/HistoricalFeaturesMetadataExtractor.java index c969b09ea982d..33162bcfa1eca 100644 --- a/test/metadata-extractor/src/main/java/org/elasticsearch/extractor/features/HistoricalFeaturesMetadataExtractor.java +++ b/test/metadata-extractor/src/main/java/org/elasticsearch/extractor/features/HistoricalFeaturesMetadataExtractor.java @@ -9,6 +9,7 @@ package org.elasticsearch.extractor.features; import org.elasticsearch.Version; +import org.elasticsearch.common.logging.LogConfigurator; import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.xcontent.XContentGenerator; @@ -29,6 +30,11 @@ public class HistoricalFeaturesMetadataExtractor { private final ClassLoader classLoader; + static { + // Make sure we initialize logging since this is normally done by Elasticsearch startup + LogConfigurator.configureESLogging(); + } + public HistoricalFeaturesMetadataExtractor(ClassLoader classLoader) { this.classLoader = classLoader; } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/FeatureFlag.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/FeatureFlag.java index 122989eaec65a..b83cc7bba06e5 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/FeatureFlag.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/FeatureFlag.java @@ -16,7 +16,8 @@ */ public enum FeatureFlag { TIME_SERIES_MODE("es.index_mode_feature_flag_registered=true", Version.fromString("8.0.0"), null), - INFERENCE_RESCORER("es.inference_rescorer_feature_flag_enabled=true", Version.fromString("8.10.0"), null); + INFERENCE_RESCORER("es.inference_rescorer_feature_flag_enabled=true", Version.fromString("8.10.0"), null), + FAILURE_STORE_ENABLED("es.failure_store_feature_flag_enabled=true", Version.fromString("8.12.0"), null); public final String systemProperty; public final Version from; diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/restspec/ClientYamlSuiteRestSpec.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/restspec/ClientYamlSuiteRestSpec.java index be34ee9be0ea1..8662d886cce89 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/restspec/ClientYamlSuiteRestSpec.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/restspec/ClientYamlSuiteRestSpec.java @@ -24,6 +24,8 @@ import java.util.Set; import java.util.stream.Stream; +import static org.elasticsearch.client.RestClient.IGNORE_RESPONSE_CODES_PARAM; + /** * Holds the specification used to turn {@code do} actions in the YAML suite into REST api calls. */ @@ -69,7 +71,7 @@ public boolean isGlobalParameter(String param) { * that they influence the client behaviour and don't get sent to Elasticsearch */ public boolean isClientParameter(String name) { - return "ignore".equals(name); + return IGNORE_RESPONSE_CODES_PARAM.equals(name); } /** diff --git a/x-pack/plugin/apm-data/src/yamlRestTest/java/org/elasticsearch/xpack/apmdata/APMYamlTestSuiteIT.java b/x-pack/plugin/apm-data/src/yamlRestTest/java/org/elasticsearch/xpack/apmdata/APMYamlTestSuiteIT.java index 77cac16a4e90c..5835a41479a68 100644 --- a/x-pack/plugin/apm-data/src/yamlRestTest/java/org/elasticsearch/xpack/apmdata/APMYamlTestSuiteIT.java +++ b/x-pack/plugin/apm-data/src/yamlRestTest/java/org/elasticsearch/xpack/apmdata/APMYamlTestSuiteIT.java @@ -10,13 +10,11 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; import org.junit.ClassRule; -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/101929") public class APMYamlTestSuiteIT extends ESClientYamlSuiteTestCase { @ClassRule diff --git a/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/10_apm.yml b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/10_apm.yml index 0c538c345ebaa..70b6904bc4a28 100644 --- a/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/10_apm.yml +++ b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/10_apm.yml @@ -154,6 +154,11 @@ setup: values: [1.5, 2.5, 3.5] - set: items.0.create._index: index + - do: + # Wait for cluster state changes to be applied before + # querying field mappings. + cluster.health: + wait_for_events: languid - do: indices.get_field_mapping: index: metrics-apm.app.svc1-testing diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/index/shard/CloseFollowerIndexErrorSuppressionHelper.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/index/shard/CloseFollowerIndexErrorSuppressionHelper.java new file mode 100644 index 0000000000000..89ba41317e0e3 --- /dev/null +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/index/shard/CloseFollowerIndexErrorSuppressionHelper.java @@ -0,0 +1,14 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.index.shard; + +public class CloseFollowerIndexErrorSuppressionHelper { + public static void setSuppressCreateEngineErrors(boolean value) { + IndexShard.suppressCreateEngineErrors = value; + } +} diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CloseFollowerIndexIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CloseFollowerIndexIT.java index 64ebb20538832..8e597c3992528 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CloseFollowerIndexIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CloseFollowerIndexIT.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.engine.ReadOnlyEngine; +import org.elasticsearch.index.shard.CloseFollowerIndexErrorSuppressionHelper; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.CcrIntegTestCase; import org.elasticsearch.xpack.core.ccr.action.PutFollowAction; @@ -39,13 +40,16 @@ public class CloseFollowerIndexIT extends CcrIntegTestCase { @Before public void wrapUncaughtExceptionHandler() { + CloseFollowerIndexErrorSuppressionHelper.setSuppressCreateEngineErrors(true); uncaughtExceptionHandler = Thread.getDefaultUncaughtExceptionHandler(); AccessController.doPrivileged((PrivilegedAction) () -> { Thread.setDefaultUncaughtExceptionHandler((t, e) -> { - if (t.getThreadGroup().getName().contains(getTestClass().getSimpleName())) { + if (t.getThreadGroup().getName().contains(getTestClass().getSimpleName()) + && t.getName().equals("elasticsearch-error-rethrower")) { for (StackTraceElement element : e.getStackTrace()) { if (element.getClassName().equals(ReadOnlyEngine.class.getName())) { if (element.getMethodName().equals("assertMaxSeqNoEqualsToGlobalCheckpoint")) { + logger.error("HACK: suppressing uncaught exception thrown from assertMaxSeqNoEqualsToGlobalCheckpoint", e); return; } } @@ -59,6 +63,7 @@ public void wrapUncaughtExceptionHandler() { @After public void restoreUncaughtExceptionHandler() { + CloseFollowerIndexErrorSuppressionHelper.setSuppressCreateEngineErrors(false); AccessController.doPrivileged((PrivilegedAction) () -> { Thread.setDefaultUncaughtExceptionHandler(uncaughtExceptionHandler); return null; diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java index 559cfe0cbe4cc..b06ff73e29960 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java @@ -332,7 +332,9 @@ static DataStream updateLocalDataStream( remoteDataStream.isSystem(), remoteDataStream.isAllowCustomRouting(), remoteDataStream.getIndexMode(), - remoteDataStream.getLifecycle() + remoteDataStream.getLifecycle(), + remoteDataStream.isFailureStore(), + remoteDataStream.getFailureIndices() ); } else { if (localDataStream.isReplicated() == false) { @@ -383,7 +385,9 @@ static DataStream updateLocalDataStream( localDataStream.isSystem(), localDataStream.isAllowCustomRouting(), localDataStream.getIndexMode(), - localDataStream.getLifecycle() + localDataStream.getLifecycle(), + localDataStream.isFailureStore(), + localDataStream.getFailureIndices() ); } } diff --git a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/action/DataStreamLifecycleUsageTransportActionIT.java b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/action/DataStreamLifecycleUsageTransportActionIT.java index 4da0195c9a3c4..c102470628a00 100644 --- a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/action/DataStreamLifecycleUsageTransportActionIT.java +++ b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/action/DataStreamLifecycleUsageTransportActionIT.java @@ -71,7 +71,8 @@ private void cleanup() throws Exception { @SuppressWarnings("unchecked") public void testAction() throws Exception { assertUsageResults(0, 0, 0, 0.0, true); - AtomicLong count = new AtomicLong(0); + AtomicLong totalCount = new AtomicLong(0); + AtomicLong countLifecycleWithRetention = new AtomicLong(0); AtomicLong totalRetentionTimes = new AtomicLong(0); AtomicLong minRetention = new AtomicLong(Long.MAX_VALUE); AtomicLong maxRetention = new AtomicLong(Long.MIN_VALUE); @@ -94,11 +95,13 @@ public void testAction() throws Exception { if (hasLifecycle) { if (randomBoolean()) { lifecycle = new DataStreamLifecycle(null, null, null); + totalCount.incrementAndGet(); } else { long retentionMillis = randomLongBetween(1000, 100000); boolean isEnabled = randomBoolean(); if (isEnabled) { - count.incrementAndGet(); + totalCount.incrementAndGet(); + countLifecycleWithRetention.incrementAndGet(); totalRetentionTimes.addAndGet(retentionMillis); if (retentionMillis < minRetention.get()) { @@ -129,7 +132,9 @@ public void testAction() throws Exception { systemDataStream, randomBoolean(), IndexMode.STANDARD, - lifecycle + lifecycle, + false, + List.of() ); dataStreamMap.put(dataStream.getName(), dataStream); } @@ -141,9 +146,11 @@ public void testAction() throws Exception { }); int expectedMinimumRetention = minRetention.get() == Long.MAX_VALUE ? 0 : minRetention.intValue(); int expectedMaximumRetention = maxRetention.get() == Long.MIN_VALUE ? 0 : maxRetention.intValue(); - double expectedAverageRetention = count.get() == 0 ? 0.0 : totalRetentionTimes.doubleValue() / count.get(); + double expectedAverageRetention = countLifecycleWithRetention.get() == 0 + ? 0.0 + : totalRetentionTimes.doubleValue() / countLifecycleWithRetention.get(); assertUsageResults( - count.intValue(), + totalCount.intValue(), expectedMinimumRetention, expectedMaximumRetention, expectedAverageRetention, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/DataStreamLifecycleUsageTransportAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/DataStreamLifecycleUsageTransportAction.java index 4344b59483651..fb49ba6c7e7a7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/DataStreamLifecycleUsageTransportAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/DataStreamLifecycleUsageTransportAction.java @@ -16,6 +16,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.core.Tuple; import org.elasticsearch.protocol.xpack.XPackUsageRequest; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -24,7 +25,6 @@ import java.util.Collection; import java.util.LongSummaryStatistics; -import java.util.stream.Collectors; public class DataStreamLifecycleUsageTransportAction extends XPackUsageFeatureTransportAction { @@ -54,26 +54,42 @@ protected void masterOperation( ActionListener listener ) { final Collection dataStreams = state.metadata().dataStreams().values(); - LongSummaryStatistics retentionStats = dataStreams.stream() - .filter(ds -> ds.getLifecycle() != null && ds.getLifecycle().isEnabled()) - .filter(ds -> ds.getLifecycle().getEffectiveDataRetention() != null) - .collect(Collectors.summarizingLong(ds -> ds.getLifecycle().getEffectiveDataRetention().getMillis())); - long dataStreamsWithLifecycles = retentionStats.getCount(); - long minRetention = dataStreamsWithLifecycles == 0 ? 0 : retentionStats.getMin(); - long maxRetention = dataStreamsWithLifecycles == 0 ? 0 : retentionStats.getMax(); - double averageRetention = retentionStats.getAverage(); + Tuple stats = calculateStats(dataStreams); + + long minRetention = stats.v2().getCount() == 0 ? 0 : stats.v2().getMin(); + long maxRetention = stats.v2().getCount() == 0 ? 0 : stats.v2().getMax(); + double averageRetention = stats.v2().getAverage(); RolloverConfiguration rolloverConfiguration = clusterService.getClusterSettings() .get(DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING); String rolloverConfigString = rolloverConfiguration.toString(); - final DataStreamLifecycleFeatureSetUsage.LifecycleStats stats = new DataStreamLifecycleFeatureSetUsage.LifecycleStats( - dataStreamsWithLifecycles, + final DataStreamLifecycleFeatureSetUsage.LifecycleStats lifecycleStats = new DataStreamLifecycleFeatureSetUsage.LifecycleStats( + stats.v1(), minRetention, maxRetention, averageRetention, DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING.getDefault(null).toString().equals(rolloverConfigString) ); - final DataStreamLifecycleFeatureSetUsage usage = new DataStreamLifecycleFeatureSetUsage(stats); + final DataStreamLifecycleFeatureSetUsage usage = new DataStreamLifecycleFeatureSetUsage(lifecycleStats); listener.onResponse(new XPackUsageFeatureResponse(usage)); } + + /** + * Counts the number of data streams that have a lifecycle configured (and enabled) and for + * the data streams that have a lifecycle it computes the min/max/average summary of the effective + * configured retention. + */ + public static Tuple calculateStats(Collection dataStreams) { + long dataStreamsWithLifecycles = 0; + LongSummaryStatistics retentionStats = new LongSummaryStatistics(); + for (DataStream dataStream : dataStreams) { + if (dataStream.getLifecycle() != null && dataStream.getLifecycle().isEnabled()) { + dataStreamsWithLifecycles++; + if (dataStream.getLifecycle().getEffectiveDataRetention() != null) { + retentionStats.accept(dataStream.getLifecycle().getEffectiveDataRetention().getMillis()); + } + } + } + return new Tuple<>(dataStreamsWithLifecycles, retentionStats); + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java index a72cbad790a68..22a2c3a880ce5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java @@ -6,6 +6,8 @@ */ package org.elasticsearch.xpack.core.ilm; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.client.internal.Client; @@ -32,6 +34,7 @@ import java.util.Objects; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.action.downsample.DownsampleConfig.generateDownsampleIndexName; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; @@ -40,6 +43,8 @@ */ public class DownsampleAction implements LifecycleAction { + private static final Logger logger = LogManager.getLogger(DownsampleAction.class); + public static final String NAME = "downsample"; public static final String DOWNSAMPLED_INDEX_PREFIX = "downsample-"; public static final String CONDITIONAL_TIME_SERIES_CHECK_KEY = BranchingStep.NAME + "-on-timeseries-check"; @@ -155,7 +160,30 @@ public List toSteps(Client client, String phase, StepKey nextStepKey) { (index, clusterState) -> { IndexMetadata indexMetadata = clusterState.metadata().index(index); assert indexMetadata != null : "invalid cluster metadata. index [" + index.getName() + "] metadata not found"; - return IndexSettings.MODE.get(indexMetadata.getSettings()) == IndexMode.TIME_SERIES; + if (IndexSettings.MODE.get(indexMetadata.getSettings()) != IndexMode.TIME_SERIES) { + return false; + } + + if (index.getName().equals(generateDownsampleIndexName(DOWNSAMPLED_INDEX_PREFIX, indexMetadata, fixedInterval))) { + var downsampleStatus = IndexMetadata.INDEX_DOWNSAMPLE_STATUS.get(indexMetadata.getSettings()); + if (downsampleStatus == IndexMetadata.DownsampleTaskStatus.UNKNOWN) { + // This isn't a downsample index, but it has the name of our target downsample index - very bad, we'll skip the + // downsample action to avoid blocking the lifecycle of this index - if there + // is another downsample action configured in the next phase, it'll be able to proceed successfully + logger.warn( + "index [{}] as part of policy [{}] cannot be downsampled at interval [{}] in phase [{}] because it has" + + " the name of the target downsample index and is itself not a downsampled index. Skipping the downsample " + + "action.", + index.getName(), + indexMetadata.getLifecyclePolicyName(), + fixedInterval, + phase + ); + } + return false; + } + + return true; } ); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/Phase.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/Phase.java index 76c2499e6847f..abb509805b60b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/Phase.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/Phase.java @@ -8,13 +8,13 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ContextParser; import org.elasticsearch.xcontent.ObjectParser.ValueType; @@ -56,7 +56,7 @@ public class Phase implements ToXContentObject, Writeable { // when the phase is read from the cluster state during startup (even before negative timevalues were strictly // disallowed) so this is a hack to treat negative `min_age`s as 0 to prevent those errors. // They will be saved as `0` so this hack can be removed once we no longer have to read cluster states from 7.x. - assert Version.CURRENT.major < 9 : "remove this hack now that we don't have to read 7.x cluster states"; + @UpdateForV9 // remove this hack now that we don't have to read 7.x cluster states final String timeValueString = p.text(); if (timeValueString.startsWith("-")) { logger.warn("phase has negative min_age value of [{}] - this will be treated as a min_age of 0", timeValueString); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java index b091f3bc9f894..c09a661a64924 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java @@ -80,17 +80,23 @@ public final class IndexPrivilege extends Privilege { ClusterSearchShardsAction.NAME, SearchShardsAction.NAME ); - private static final Automaton CREATE_AUTOMATON = patterns("indices:data/write/index*", "indices:data/write/bulk*"); + private static final Automaton CREATE_AUTOMATON = patterns( + "indices:data/write/index*", + "indices:data/write/bulk*", + "indices:data/write/simulate/bulk*" + ); private static final Automaton CREATE_DOC_AUTOMATON = patterns( "indices:data/write/index", "indices:data/write/index[*", "indices:data/write/index:op_type/create", - "indices:data/write/bulk*" + "indices:data/write/bulk*", + "indices:data/write/simulate/bulk*" ); private static final Automaton INDEX_AUTOMATON = patterns( "indices:data/write/index*", "indices:data/write/bulk*", - "indices:data/write/update*" + "indices:data/write/update*", + "indices:data/write/simulate/bulk*" ); private static final Automaton DELETE_AUTOMATON = patterns("indices:data/write/delete*", "indices:data/write/bulk*"); private static final Automaton WRITE_AUTOMATON = patterns("indices:data/write/*", AutoPutMappingAction.NAME); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/TemplateUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/TemplateUtils.java index d0be0ad9cb697..d1fda4ab1bd13 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/TemplateUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/TemplateUtils.java @@ -134,25 +134,10 @@ public static boolean checkTemplateExistsAndVersionIsGTECurrentVersion(String te * @param templateName Name of the index template * @param state Cluster state * @param logger Logger - * @param versionComposableTemplateExpected In which version of Elasticsearch did this template switch to being a composable template? - * null means the template hasn't been switched yet. */ - public static boolean checkTemplateExistsAndIsUpToDate( - String templateName, - String versionKey, - ClusterState state, - Logger logger, - Version versionComposableTemplateExpected - ) { + public static boolean checkTemplateExistsAndIsUpToDate(String templateName, String versionKey, ClusterState state, Logger logger) { - return checkTemplateExistsAndVersionMatches( - templateName, - versionKey, - state, - logger, - Version.CURRENT::equals, - versionComposableTemplateExpected - ); + return checkTemplateExistsAndVersionMatches(templateName, versionKey, state, logger, Version.CURRENT::equals); } /** @@ -162,32 +147,20 @@ public static boolean checkTemplateExistsAndIsUpToDate( * @param state Cluster state * @param logger Logger * @param predicate Predicate to execute on version check - * @param versionComposableTemplateExpected In which version of Elasticsearch did this template switch to being a composable template? - * null means the template hasn't been switched yet. */ public static boolean checkTemplateExistsAndVersionMatches( String templateName, String versionKey, ClusterState state, Logger logger, - Predicate predicate, - Version versionComposableTemplateExpected + Predicate predicate ) { - CompressedXContent mappings; - if (versionComposableTemplateExpected != null && state.nodes().getMinNodeVersion().onOrAfter(versionComposableTemplateExpected)) { - ComposableIndexTemplate templateMeta = state.metadata().templatesV2().get(templateName); - if (templateMeta == null) { - return false; - } - mappings = templateMeta.template().mappings(); - } else { - IndexTemplateMetadata templateMeta = state.metadata().templates().get(templateName); - if (templateMeta == null) { - return false; - } - mappings = templateMeta.getMappings(); + IndexTemplateMetadata templateMeta = state.metadata().templates().get(templateName); + if (templateMeta == null) { + return false; } + CompressedXContent mappings = templateMeta.getMappings(); // check all mappings contain correct version in _meta // we have to parse the source here which is annoying diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/internal/TrialLicenseVersionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/internal/TrialLicenseVersionTests.java index ff62fbc4d4877..032c400ce1fe1 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/internal/TrialLicenseVersionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/internal/TrialLicenseVersionTests.java @@ -32,6 +32,7 @@ public void testRoundTripParsing() { assertThat(TrialLicenseVersion.fromXContent(randomVersion.toString()), equalTo(randomVersion)); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/102286") public void testNewTrialAllowed() { var randomVersion = new TrialLicenseVersion(randomNonNegativeInt()); var subsequentVersion = new TrialLicenseVersion( diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsageTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsageTests.java index 3cdaa7e6015d2..66ab5755f9392 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsageTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsageTests.java @@ -7,10 +7,23 @@ package org.elasticsearch.xpack.core.datastreams; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamLifecycle; +import org.elasticsearch.cluster.metadata.DataStreamTestHelper; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.Index; import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.test.ESTestCase; +import java.util.List; +import java.util.LongSummaryStatistics; +import java.util.UUID; + +import static org.elasticsearch.xpack.core.action.DataStreamLifecycleUsageTransportAction.calculateStats; +import static org.hamcrest.Matchers.is; + public class DataStreamLifecycleFeatureSetUsageTests extends AbstractWireSerializingTestCase { @Override @@ -83,6 +96,61 @@ protected DataStreamLifecycleFeatureSetUsage mutateInstance(DataStreamLifecycleF }; } + public void testLifecycleStats() { + List dataStreams = List.of( + DataStreamTestHelper.newInstance( + randomAlphaOfLength(10), + List.of(new Index(randomAlphaOfLength(10), UUID.randomUUID().toString())), + 1L, + null, + false, + new DataStreamLifecycle() + ), + DataStreamTestHelper.newInstance( + randomAlphaOfLength(10), + List.of(new Index(randomAlphaOfLength(10), UUID.randomUUID().toString())), + 1L, + null, + false, + new DataStreamLifecycle(new DataStreamLifecycle.Retention(TimeValue.timeValueMillis(1000)), null, true) + ), + DataStreamTestHelper.newInstance( + randomAlphaOfLength(10), + List.of(new Index(randomAlphaOfLength(10), UUID.randomUUID().toString())), + 1L, + null, + false, + new DataStreamLifecycle(new DataStreamLifecycle.Retention(TimeValue.timeValueMillis(100)), null, true) + ), + DataStreamTestHelper.newInstance( + randomAlphaOfLength(10), + List.of(new Index(randomAlphaOfLength(10), UUID.randomUUID().toString())), + 1L, + null, + false, + new DataStreamLifecycle(new DataStreamLifecycle.Retention(TimeValue.timeValueMillis(5000)), null, false) + ), + DataStreamTestHelper.newInstance( + randomAlphaOfLength(10), + List.of(new Index(randomAlphaOfLength(10), UUID.randomUUID().toString())), + 1L, + null, + false, + null + ) + ); + + Tuple stats = calculateStats(dataStreams); + // 3 data streams with an enabled lifecycle + assertThat(stats.v1(), is(3L)); + LongSummaryStatistics longSummaryStatistics = stats.v2(); + assertThat(longSummaryStatistics.getMax(), is(1000L)); + assertThat(longSummaryStatistics.getMin(), is(100L)); + // only counting the ones with an effective retention in the summary statistics + assertThat(longSummaryStatistics.getCount(), is(2L)); + assertThat(longSummaryStatistics.getAverage(), is(550.0)); + } + @Override protected Writeable.Reader instanceReader() { return DataStreamLifecycleFeatureSetUsage::new; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/DownsampleActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/DownsampleActionTests.java index 109e8f87627ad..7cb93803de4ee 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/DownsampleActionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/DownsampleActionTests.java @@ -6,8 +6,16 @@ */ package org.elasticsearch.xpack.core.ilm; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.test.EqualsHashCodeTestUtils; import org.elasticsearch.xcontent.XContentParser; @@ -19,7 +27,9 @@ import static org.elasticsearch.xpack.core.ilm.DownsampleAction.CONDITIONAL_DATASTREAM_CHECK_KEY; import static org.elasticsearch.xpack.core.ilm.DownsampleAction.CONDITIONAL_TIME_SERIES_CHECK_KEY; +import static org.elasticsearch.xpack.core.ilm.DownsampleAction.DOWNSAMPLED_INDEX_PREFIX; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; public class DownsampleActionTests extends AbstractActionTestCase { @@ -132,6 +142,92 @@ public void testToSteps() { assertThat(steps.get(14).getNextStepKey(), equalTo(nextStepKey)); } + public void testDownsamplingPrerequisitesStep() { + DateHistogramInterval fixedInterval = ConfigTestHelpers.randomInterval(); + DownsampleAction action = new DownsampleAction(fixedInterval, WAIT_TIMEOUT); + String phase = randomAlphaOfLengthBetween(1, 10); + StepKey nextStepKey = new StepKey( + randomAlphaOfLengthBetween(1, 10), + randomAlphaOfLengthBetween(1, 10), + randomAlphaOfLengthBetween(1, 10) + ); + { + // non time series indices skip the action + BranchingStep branchingStep = getFirstBranchingStep(action, phase, nextStepKey); + IndexMetadata indexMetadata = newIndexMeta("test", Settings.EMPTY); + + ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().put(indexMetadata, true).build()) + .build(); + + branchingStep.performAction(indexMetadata.getIndex(), state); + assertThat(branchingStep.getNextStepKey(), is(nextStepKey)); + } + { + // time series indices execute the action + BranchingStep branchingStep = getFirstBranchingStep(action, phase, nextStepKey); + Settings settings = Settings.builder() + .put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES) + .put("index.routing_path", "uid") + .build(); + IndexMetadata indexMetadata = newIndexMeta("test", settings); + + ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().put(indexMetadata, true).build()) + .build(); + + branchingStep.performAction(indexMetadata.getIndex(), state); + assertThat(branchingStep.getNextStepKey().name(), is(CheckNotDataStreamWriteIndexStep.NAME)); + } + { + // already downsampled indices for the interval skip the action + BranchingStep branchingStep = getFirstBranchingStep(action, phase, nextStepKey); + Settings settings = Settings.builder() + .put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES) + .put("index.routing_path", "uid") + .put(IndexMetadata.INDEX_DOWNSAMPLE_STATUS_KEY, IndexMetadata.DownsampleTaskStatus.SUCCESS) + .put(IndexMetadata.INDEX_DOWNSAMPLE_ORIGIN_NAME.getKey(), "test") + .build(); + String indexName = DOWNSAMPLED_INDEX_PREFIX + fixedInterval + "-test"; + IndexMetadata indexMetadata = newIndexMeta(indexName, settings); + + ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().put(indexMetadata, true).build()) + .build(); + + branchingStep.performAction(indexMetadata.getIndex(), state); + assertThat(branchingStep.getNextStepKey(), is(nextStepKey)); + } + { + // indices with the same name as the target downsample index that are NOT downsample indices skip the action + BranchingStep branchingStep = getFirstBranchingStep(action, phase, nextStepKey); + String indexName = DOWNSAMPLED_INDEX_PREFIX + fixedInterval + "-test"; + IndexMetadata indexMetadata = newIndexMeta(indexName, Settings.EMPTY); + + ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().put(indexMetadata, true).build()) + .build(); + + branchingStep.performAction(indexMetadata.getIndex(), state); + assertThat(branchingStep.getNextStepKey(), is(nextStepKey)); + } + } + + private static BranchingStep getFirstBranchingStep(DownsampleAction action, String phase, StepKey nextStepKey) { + List steps = action.toSteps(null, phase, nextStepKey); + assertNotNull(steps); + assertEquals(15, steps.size()); + + assertTrue(steps.get(0) instanceof BranchingStep); + assertThat(steps.get(0).getKey().name(), equalTo(CONDITIONAL_TIME_SERIES_CHECK_KEY)); + + return (BranchingStep) steps.get(0); + } + + public static IndexMetadata newIndexMeta(String name, Settings indexSettings) { + return IndexMetadata.builder(name).settings(indexSettings(IndexVersion.current(), 1, 1).put(indexSettings)).build(); + } + public void testEqualsAndHashCode() { EqualsHashCodeTestUtils.checkEqualsAndHashCode(createTestInstance(), this::copy, this::notCopy); } diff --git a/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/60_settings.yml b/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/60_settings.yml index 6a33cc47e5c51..131aa4ce62092 100644 --- a/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/60_settings.yml +++ b/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/60_settings.yml @@ -93,10 +93,8 @@ --- "Downsample datastream with tier preference": - skip: - version: "all" - reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/97150" -# version: " - 8.4.99" -# reason: "rollup renamed to downsample in 8.5.0" + version: " - 8.4.99" + reason: "rollup renamed to downsample in 8.5.0" - do: indices.put_index_template: diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorTests.java index b8a66410167ab..b35dc8b7b2e80 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorTests.java @@ -185,6 +185,7 @@ protected void doClose() { operator.close(); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/102264") public void testFailure() throws Exception { DriverContext driverContext = driverContext(); final SequenceLongBlockSourceOperator sourceOperator = new SequenceLongBlockSourceOperator( diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec index b8dd2c6e3cee1..0f6fc42860750 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec @@ -473,7 +473,7 @@ ROW deg = [90, 180, 270] [90, 180, 270] | [1.5707963267948966, 3.141592653589793, 4.71238898038469] ; -warningWithFromSource#[skip:-8.11.99, reason:ql exceptions were updated in 8.12] +warningWithFromSource-Ignore from employees | sort emp_no | limit 1 | eval x = to_long(emp_no) * 10000000 | eval y = to_int(x) > 1 | keep y; warning:Line 1:89: evaluation of [to_int(x)] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:89: org.elasticsearch.xpack.ql.InvalidArgumentException: [100010000000] out of [integer] range diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec index 68bf4108ffcd1..d3abef8021f66 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec @@ -40,7 +40,7 @@ least |"? least(first:integer|long|double|boolean|keyword|tex left |"? left(string:keyword, length:integer)" |[string, length] |["keyword", "integer"] |["", ""] |? | "" | [false, false] | false length |? length(arg1:?) |arg1 |? | "" |? | "" | false | false log10 |"? log10(n:integer|long|double|unsigned_long)" |n |"integer|long|double|unsigned_long" | "" |? | "" | false | false -ltrim |? ltrim(arg1:?) |arg1 |? | "" |? | "" | false | false +ltrim |"keyword|text ltrim(str:keyword|text)" |str |"keyword|text" | "" |"keyword|text" |Removes leading whitespaces from a string.| false | false max |? max(arg1:?) |arg1 |? | "" |? | "" | false | false median |? median(arg1:?) |arg1 |? | "" |? | "" | false | false median_absolute_deviation|? median_absolute_deviation(arg1:?) |arg1 |? | "" |? | "" | false | false @@ -60,7 +60,7 @@ pow |"? pow(base:integer|long|double, exponent:integer|doub replace |"? replace(arg1:?, arg2:?, arg3:?)" | [arg1, arg2, arg3] | [?, ?, ?] |["", "", ""] |? | "" | [false, false, false]| false right |"? right(string:keyword, length:integer)" |[string, length] |["keyword", "integer"] |["", ""] |? | "" | [false, false] | false round |? round(arg1:?, arg2:?) |[arg1, arg2] |[?, ?] |["", ""] |? | "" | [false, false] | false -rtrim |? rtrim(arg1:?) |arg1 |? | "" |? | "" | false | false +rtrim |"keyword|text rtrim(str:keyword|text)" |str |"keyword|text" | "" |"keyword|text" |Removes trailing whitespaces from a string.| false | false sin |"double sin(n:integer|long|double|unsigned_long)" |n |"integer|long|double|unsigned_long" |An angle, in radians |double |Returns the trigonometric sine of an angle | false | false sinh |"double sinh(n:integer|long|double|unsigned_long)"|n |"integer|long|double|unsigned_long" | "" |double | "" | false | false split |? split(arg1:?, arg2:?) |[arg1, arg2] |[?, ?] |["", ""] |? | "" | [false, false] | false @@ -88,9 +88,9 @@ to_string |"? to_string(v:unsigned_long|date|boolean|double|ip|te to_ul |? to_ul(arg1:?) |arg1 |? | "" |? | "" | false | false to_ulong |? to_ulong(arg1:?) |arg1 |? | "" |? | "" | false | false to_unsigned_long |? to_unsigned_long(arg1:?) |arg1 |? | "" |? | "" | false | false -to_ver |"? to_ver(v:keyword|text|version)" |v |"keyword|text|version"| "" |? | "" | false | false -to_version |"? to_version(v:keyword|text|version)" |v |"keyword|text|version"| "" |? | "" | false | false -trim |? trim(arg1:?) |arg1 |? | "" |? | "" | false | false +to_ver |"? to_ver(v:keyword|text|version)" |v |"keyword|text|version"| "" |? | "" | false | false +to_version |"? to_version(v:keyword|text|version)" |v |"keyword|text|version"| "" |? | "" | false | false +trim |"keyword|text trim(str:keyword|text)" |str |"keyword|text" | "" |"keyword|text" |Removes leading and trailing whitespaces from a string.| false | false ; @@ -129,7 +129,7 @@ synopsis:keyword "? left(string:keyword, length:integer)" ? length(arg1:?) "? log10(n:integer|long|double|unsigned_long)" -? ltrim(arg1:?) +"keyword|text ltrim(str:keyword|text)" ? max(arg1:?) ? median(arg1:?) ? median_absolute_deviation(arg1:?) @@ -149,7 +149,7 @@ synopsis:keyword "? replace(arg1:?, arg2:?, arg3:?)" "? right(string:keyword, length:integer)" ? round(arg1:?, arg2:?) -? rtrim(arg1:?) +"keyword|text rtrim(str:keyword|text)" "double sin(n:integer|long|double|unsigned_long)" "double sinh(n:integer|long|double|unsigned_long)" ? split(arg1:?, arg2:?) @@ -179,7 +179,7 @@ synopsis:keyword ? to_unsigned_long(arg1:?) "? to_ver(v:keyword|text|version)" "? to_version(v:keyword|text|version)" -? trim(arg1:?) +"keyword|text trim(str:keyword|text)" ; diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EnrichIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EnrichIT.java index b855fbd15be12..46aaa6fab16a5 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EnrichIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EnrichIT.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.esql.action; -import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.client.internal.Client; @@ -60,7 +59,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/102184") public class EnrichIT extends AbstractEsqlIntegTestCase { @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java index 7dd9f01a9d6c9..384563cb815a4 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java @@ -335,7 +335,8 @@ private static Operator droppingBlockOperator(int totalBlocks, int droppingPosit private class TransportHandler implements TransportRequestHandler { @Override public void messageReceived(LookupRequest request, TransportChannel channel, Task task) { - ActionListener listener = new ChannelActionListener<>(channel); + request.incRef(); + ActionListener listener = ActionListener.runBefore(new ChannelActionListener<>(channel), request::decRef); doLookup( request.sessionId, (CancellableTask) task, diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LTrim.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LTrim.java index 952c3314af80a..382f64fcf831c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LTrim.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LTrim.java @@ -12,6 +12,8 @@ import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.TypeResolutions; @@ -27,8 +29,9 @@ * Removes leading whitespaces from a string. */ public class LTrim extends UnaryScalarFunction implements EvaluatorMapper { - public LTrim(Source source, Expression field) { - super(source, field); + @FunctionInfo(returnType = { "keyword", "text" }, description = "Removes leading whitespaces from a string.") + public LTrim(Source source, @Param(name = "str", type = { "keyword", "text" }) Expression str) { + super(source, str); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RTrim.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RTrim.java index 273a032a90ed3..98fc93b4f6acc 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RTrim.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RTrim.java @@ -12,6 +12,8 @@ import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.TypeResolutions; @@ -27,8 +29,9 @@ * Removes trailing whitespaces from a string. */ public class RTrim extends UnaryScalarFunction implements EvaluatorMapper { - public RTrim(Source source, Expression field) { - super(source, field); + @FunctionInfo(returnType = { "keyword", "text" }, description = "Removes trailing whitespaces from a string.") + public RTrim(Source source, @Param(name = "str", type = { "keyword", "text" }) Expression str) { + super(source, str); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Trim.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Trim.java index b865199c1c2ae..ce15d1db0f8f9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Trim.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Trim.java @@ -12,6 +12,8 @@ import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.TypeResolutions; @@ -27,8 +29,8 @@ * Removes leading and trailing whitespaces from a string. */ public final class Trim extends UnaryScalarFunction implements EvaluatorMapper { - - public Trim(Source source, Expression str) { + @FunctionInfo(returnType = { "keyword", "text" }, description = "Removes leading and trailing whitespaces from a string.") + public Trim(Source source, @Param(name = "str", type = { "keyword", "text" }) Expression str) { super(source, str); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java index e7409543ca68e..bfa9ea449ad46 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java @@ -786,7 +786,7 @@ protected LogicalPlan rule(OrderBy orderBy) { if (child instanceof OrderBy childOrder) { // combine orders - return new OrderBy(orderBy.source(), childOrder.child(), CollectionUtils.combine(orderBy.order(), childOrder.order())); + return new OrderBy(orderBy.source(), childOrder.child(), orderBy.order()); } else if (child instanceof Project) { return pushDownPastProject(orderBy); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQuery.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQuery.java index b554ccb2920aa..e419be2b7e1fc 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQuery.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQuery.java @@ -10,8 +10,10 @@ import org.apache.lucene.index.DocValues; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.PointValues; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.index.Terms; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.IndexSearcher; @@ -321,20 +323,30 @@ public Scorer scorer(LeafReaderContext context) throws IOException { * can't do that because we need the check the number of fields. */ if (lfd instanceof LeafNumericFieldData n) { - return scorer(nextScorer, n); + return scorer(context, nextScorer, n); } if (lfd instanceof LeafOrdinalsFieldData o) { - return scorer(nextScorer, o); + return scorer(context, nextScorer, o); } return scorer(nextScorer, lfd); } - private Scorer scorer(Scorer nextScorer, LeafNumericFieldData lfd) { + private Scorer scorer(LeafReaderContext context, Scorer nextScorer, LeafNumericFieldData lfd) throws IOException { SortedNumericDocValues sortedNumerics = lfd.getLongValues(); if (DocValues.unwrapSingleton(sortedNumerics) != null) { - // Segment contains only single valued fields. - stats.numericSingle++; - return nextScorer; + /* + * Segment contains only single valued fields. But it's possible + * that some fields have 0 values. The most surefire way to check + * is to look at the index for the data. If there isn't an index + * this isn't going to work - but if there is we can compare the + * number of documents in the index to the number of values in it - + * if they are the same we've got a dense singleton. + */ + PointValues points = context.reader().getPointValues(fieldData.getFieldName()); + if (points != null && points.getDocCount() == context.reader().maxDoc()) { + stats.numericSingle++; + return nextScorer; + } } TwoPhaseIterator nextIterator = nextScorer.twoPhaseIterator(); if (nextIterator == null) { @@ -353,12 +365,22 @@ private Scorer scorer(Scorer nextScorer, LeafNumericFieldData lfd) { ); } - private Scorer scorer(Scorer nextScorer, LeafOrdinalsFieldData lfd) { + private Scorer scorer(LeafReaderContext context, Scorer nextScorer, LeafOrdinalsFieldData lfd) throws IOException { SortedSetDocValues sortedSet = lfd.getOrdinalsValues(); if (DocValues.unwrapSingleton(sortedSet) != null) { - // Segment contains only single valued fields. - stats.ordinalsSingle++; - return nextScorer; + /* + * Segment contains only single valued fields. But it's possible + * that some fields have 0 values. The most surefire way to check + * is to look at the index for the data. If there isn't an index + * this isn't going to work - but if there is we can compare the + * number of documents in the index to the number of values in it - + * if they are the same we've got a dense singleton. + */ + Terms terms = context.reader().terms(fieldData.getFieldName()); + if (terms != null && terms.getDocCount() == context.reader().maxDoc()) { + stats.ordinalsSingle++; + return nextScorer; + } } TwoPhaseIterator nextIterator = nextScorer.twoPhaseIterator(); if (nextIterator == null) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index 00000f7755107..99b21225e1985 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -229,7 +229,7 @@ protected final boolean enableWarningsCheck() { } public boolean logResults() { - return false; + return true; } private void doTest() throws Exception { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index c79e77915ac01..37fa17d3cb824 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -935,4 +935,11 @@ static Version randomVersion() { default -> throw new IllegalArgumentException(); }; } + + /** + * All string types (keyword, text, match_only_text, etc). + */ + protected static DataType[] strings() { + return EsqlDataTypes.types().stream().filter(DataTypes::isString).toArray(DataType[]::new); + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/AbstractScalarFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/AbstractScalarFunctionTestCase.java index ae46592a90ac1..08783823fc00f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/AbstractScalarFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/AbstractScalarFunctionTestCase.java @@ -76,13 +76,6 @@ public Set sortedTypesSet(DataType[] validTypes, DataType... additiona return mergedSet; } - /** - * All string types (keyword, text, match_only_text, etc). For passing to {@link #required} or {@link #optional}. - */ - protected static DataType[] strings() { - return EsqlDataTypes.types().stream().filter(DataTypes::isString).toArray(DataType[]::new); - } - /** * All integer types (long, int, short, byte). For passing to {@link #required} or {@link #optional}. */ diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/AbstractTrimTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/AbstractTrimTests.java index fdb9387b410ff..229abbcdb187d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/AbstractTrimTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/AbstractTrimTests.java @@ -8,8 +8,8 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.string; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; -import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.ql.type.DataType; import java.util.ArrayList; @@ -18,11 +18,11 @@ import static org.hamcrest.Matchers.equalTo; -public abstract class AbstractTrimTests extends AbstractScalarFunctionTestCase { +public abstract class AbstractTrimTests extends AbstractFunctionTestCase { static Iterable parameters(String name, boolean trimLeading, boolean trimTrailing) { List suppliers = new ArrayList<>(); for (DataType type : strings()) { - suppliers.add(new TestCaseSupplier("no whitespace/" + type, () -> { + suppliers.add(new TestCaseSupplier("no whitespace/" + type, List.of(type), () -> { String text = randomAlphaOfLength(8); return testCase(name, type, text, text); })); @@ -40,17 +40,17 @@ static Iterable parameters(String name, boolean trimLeading, boolean t Map.entry("information separator one", new char[] { '\u001F' }), Map.entry("whitespace", new char[] { ' ', '\t', '\n', '\u000B', '\f', '\r', '\u001C', '\u001D', '\u001E', '\u001F' }) )) { - suppliers.add(new TestCaseSupplier(type + "/leading " + whitespaces.getKey(), () -> { + suppliers.add(new TestCaseSupplier(type + "/leading " + whitespaces.getKey(), List.of(type), () -> { String text = randomAlphaOfLength(8); String withWhitespace = randomWhiteSpace(whitespaces.getValue()) + text; return testCase(name, type, withWhitespace, trimLeading ? text : withWhitespace); })); - suppliers.add(new TestCaseSupplier(type + "/trailing " + whitespaces.getKey(), () -> { + suppliers.add(new TestCaseSupplier(type + "/trailing " + whitespaces.getKey(), List.of(type), () -> { String text = randomAlphaOfLength(8); String withWhitespace = text + randomWhiteSpace(whitespaces.getValue()); return testCase(name, type, withWhitespace, trimTrailing ? text : withWhitespace); })); - suppliers.add(new TestCaseSupplier(type + "/leading and trailing " + whitespaces.getKey(), () -> { + suppliers.add(new TestCaseSupplier(type + "/leading and trailing " + whitespaces.getKey(), List.of(type), () -> { String text = randomAlphaOfLength(8); String leadingWhitespace = randomWhiteSpace(whitespaces.getValue()); String trailingWhitespace = randomWhiteSpace(whitespaces.getValue()); @@ -61,13 +61,13 @@ static Iterable parameters(String name, boolean trimLeading, boolean t (trimLeading ? "" : leadingWhitespace) + text + (trimTrailing ? "" : trailingWhitespace) ); })); - suppliers.add(new TestCaseSupplier(type + "/all " + whitespaces.getKey(), () -> { + suppliers.add(new TestCaseSupplier(type + "/all " + whitespaces.getKey(), List.of(type), () -> { String text = randomWhiteSpace(whitespaces.getValue()); return testCase(name, type, text, ""); })); } } - return parameterSuppliersFromTypedData(suppliers); + return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, suppliers))); } private static TestCaseSupplier.TestCase testCase(String name, DataType type, String data, String expected) { @@ -79,16 +79,6 @@ private static TestCaseSupplier.TestCase testCase(String name, DataType type, St ); } - @Override - protected final List argSpec() { - return List.of(required(strings())); - } - - @Override - protected final DataType expectedType(List argTypes) { - return argTypes.get(0); - } - private static String randomWhiteSpace(char[] whitespaces) { char[] randomWhitespace = new char[randomIntBetween(1, 8)]; for (int i = 0; i < randomWhitespace.length; i++) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index f63026c28279a..e825f1f96a8b3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -780,7 +780,7 @@ public void testCombineOrderBy() { | sort salary"""); var topN = as(plan, TopN.class); - assertThat(orderNames(topN), contains("salary", "emp_no")); + assertThat(orderNames(topN), contains("salary")); as(topN.child(), EsRelation.class); } @@ -792,7 +792,7 @@ public void testCombineOrderByThroughEval() { | sort x"""); var topN = as(plan, TopN.class); - assertThat(orderNames(topN), contains("x", "emp_no")); + assertThat(orderNames(topN), contains("x")); var eval = as(topN.child(), Eval.class); as(eval.child(), EsRelation.class); } @@ -806,7 +806,7 @@ public void testCombineOrderByThroughEvalWithTwoDefs() { | sort z"""); var topN = as(plan, TopN.class); - assertThat(orderNames(topN), contains("z", "emp_no")); + assertThat(orderNames(topN), contains("z")); var eval = as(topN.child(), Eval.class); assertThat(Expressions.names(eval.fields()), contains("x", "y", "z")); as(eval.child(), EsRelation.class); @@ -820,7 +820,7 @@ public void testCombineOrderByThroughDissect() { | sort x"""); var topN = as(plan, TopN.class); - assertThat(orderNames(topN), contains("x", "emp_no")); + assertThat(orderNames(topN), contains("x")); var dissect = as(topN.child(), Dissect.class); as(dissect.child(), EsRelation.class); } @@ -833,7 +833,7 @@ public void testCombineOrderByThroughGrok() { | sort x"""); var topN = as(plan, TopN.class); - assertThat(orderNames(topN), contains("x", "emp_no")); + assertThat(orderNames(topN), contains("x")); var grok = as(topN.child(), Grok.class); as(grok.child(), EsRelation.class); } @@ -847,7 +847,7 @@ public void testCombineOrderByThroughProject() { var keep = as(plan, Project.class); var topN = as(keep.child(), TopN.class); - assertThat(orderNames(topN), contains("salary", "emp_no")); + assertThat(orderNames(topN), contains("salary")); as(topN.child(), EsRelation.class); } @@ -862,7 +862,7 @@ public void testCombineOrderByThroughProjectAndEval() { var keep = as(plan, Project.class); var topN = as(keep.child(), TopN.class); - assertThat(orderNames(topN), contains("salary", "emp_no")); + assertThat(orderNames(topN), contains("salary")); var eval = as(topN.child(), Eval.class); assertThat(Expressions.names(eval.fields()), contains("e")); as(eval.child(), EsRelation.class); @@ -878,7 +878,7 @@ public void testCombineOrderByThroughProjectWithAlias() { var keep = as(plan, Project.class); var topN = as(keep.child(), TopN.class); - assertThat(orderNames(topN), contains("salary", "emp_no")); + assertThat(orderNames(topN), contains("salary")); as(topN.child(), EsRelation.class); } @@ -890,7 +890,7 @@ public void testCombineOrderByThroughFilter() { | sort salary"""); var topN = as(plan, TopN.class); - assertThat(orderNames(topN), contains("salary", "emp_no")); + assertThat(orderNames(topN), contains("salary")); var filter = as(topN.child(), Filter.class); as(filter.child(), EsRelation.class); } @@ -998,7 +998,7 @@ public void testMultipleMvExpandWithSortAndLimit() { var keep = as(plan, EsqlProject.class); var topN = as(keep.child(), TopN.class); assertThat(topN.limit().fold(), equalTo(5)); - assertThat(orderNames(topN), contains("salary", "first_name")); + assertThat(orderNames(topN), contains("salary")); var limit = as(topN.child(), Limit.class); assertThat(limit.limit().fold(), equalTo(5)); var mvExp = as(limit.child(), MvExpand.class); @@ -1313,10 +1313,10 @@ public void testCombineMultipleOrderByAndLimits() { var keep = as(plan, Project.class); var topN = as(keep.child(), TopN.class); - assertThat(orderNames(topN), contains("emp_no", "first_name")); + assertThat(orderNames(topN), contains("emp_no")); var filter = as(topN.child(), Filter.class); var topN2 = as(filter.child(), TopN.class); - assertThat(orderNames(topN2), contains("salary", "emp_no")); + assertThat(orderNames(topN2), contains("salary")); as(topN2.child(), EsRelation.class); } @@ -1357,12 +1357,6 @@ public void testDontPruneSameFieldDifferentDirectionSortClauses() { new FieldAttribute(EMPTY, "emp_no", mapping.get("emp_no")), Order.OrderDirection.DESC, Order.NullsPosition.FIRST - ), - new Order( - EMPTY, - new FieldAttribute(EMPTY, "salary", mapping.get("salary")), - Order.OrderDirection.ASC, - Order.NullsPosition.LAST ) ) ); @@ -1406,12 +1400,6 @@ public void testPruneRedundantSortClauses() { new FieldAttribute(EMPTY, "emp_no", mapping.get("emp_no")), Order.OrderDirection.DESC, Order.NullsPosition.LAST - ), - new Order( - EMPTY, - new FieldAttribute(EMPTY, "salary", mapping.get("salary")), - Order.OrderDirection.DESC, - Order.NullsPosition.LAST ) ) ); @@ -1436,12 +1424,6 @@ public void testDontPruneSameFieldDifferentDirectionSortClauses_UsingAlias() { new FieldAttribute(EMPTY, "emp_no", mapping.get("emp_no")), Order.OrderDirection.ASC, Order.NullsPosition.LAST - ), - new Order( - EMPTY, - new FieldAttribute(EMPTY, "emp_no", mapping.get("emp_no")), - Order.OrderDirection.DESC, - Order.NullsPosition.FIRST ) ) ); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java index f2cddce199928..c8c8029f994cc 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java @@ -24,6 +24,8 @@ import org.elasticsearch.compute.lucene.LuceneTopNSourceOperator; import org.elasticsearch.compute.operator.SourceOperator; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; import org.elasticsearch.index.cache.query.TrivialQueryCachingPolicy; import org.elasticsearch.index.mapper.MapperServiceTestCase; import org.elasticsearch.search.internal.ContextIndexSearcher; @@ -67,13 +69,15 @@ public static Iterable parameters() throws Exception { private Directory directory = newDirectory(); private IndexReader reader; + private final ArrayList releasables = new ArrayList<>(); + public LocalExecutionPlannerTests(@Name("estimatedRowSizeIsHuge") boolean estimatedRowSizeIsHuge) { this.estimatedRowSizeIsHuge = estimatedRowSizeIsHuge; } @After public void closeIndex() throws IOException { - IOUtils.close(reader, directory); + IOUtils.close(reader, directory, () -> Releasables.close(releasables), releasables::clear); } public void testLuceneSourceOperatorHugeRowSize() throws IOException { @@ -157,6 +161,7 @@ private EsPhysicalOperationProviders esPhysicalOperationProviders() throws IOExc new TestSearchContext(createSearchExecutionContext(createMapperService(mapping(b -> {})), searcher), null, searcher) ); } + releasables.addAll(searchContexts); return new EsPhysicalOperationProviders(searchContexts); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java index cc5b05537c4c6..a6eacae2857e7 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java @@ -57,8 +57,11 @@ interface Setup { public static List params() { List params = new ArrayList<>(); for (String fieldType : new String[] { "long", "integer", "short", "byte", "double", "float", "keyword" }) { - params.add(new Object[] { new StandardSetup(fieldType, false) }); - params.add(new Object[] { new StandardSetup(fieldType, true) }); + for (boolean multivaluedField : new boolean[] { true, false }) { + for (boolean allowEmpty : new boolean[] { true, false }) { + params.add(new Object[] { new StandardSetup(fieldType, multivaluedField, allowEmpty, 100) }); + } + } } params.add(new Object[] { new FieldMissingSetup() }); return params; @@ -196,7 +199,7 @@ private void testCase(SingleValueQuery.Builder builder, boolean rewritesToMatchN } } - private record StandardSetup(String fieldType, boolean multivaluedField) implements Setup { + private record StandardSetup(String fieldType, boolean multivaluedField, boolean empty, int count) implements Setup { @Override public XContentBuilder mapping(XContentBuilder builder) throws IOException { builder.startObject("i").field("type", "long").endObject(); @@ -207,27 +210,32 @@ public XContentBuilder mapping(XContentBuilder builder) throws IOException { @Override public List> build(RandomIndexWriter iw) throws IOException { List> fieldValues = new ArrayList<>(100); - for (int i = 0; i < 100; i++) { - // i == 10 forces at least one multivalued field when we're configured for multivalued fields - boolean makeMultivalued = multivaluedField && (i == 10 || randomBoolean()); - List values; - if (makeMultivalued) { - int count = between(2, 10); - Set set = new HashSet<>(count); - while (set.size() < count) { - set.add(randomValue()); - } - values = List.copyOf(set); - } else { - values = List.of(randomValue()); - } + for (int i = 0; i < count; i++) { + List values = values(i); fieldValues.add(values); iw.addDocument(docFor(i, values)); } - return fieldValues; } + private List values(int i) { + // i == 10 forces at least one multivalued field when we're configured for multivalued fields + boolean makeMultivalued = multivaluedField && (i == 10 || randomBoolean()); + if (makeMultivalued) { + int count = between(2, 10); + Set set = new HashSet<>(count); + while (set.size() < count) { + set.add(randomValue()); + } + return List.copyOf(set); + } + // i == 0 forces at least one empty field when we're configured for empty fields + if (empty && (i == 0 || randomBoolean())) { + return List.of(); + } + return List.of(randomValue()); + } + private Object randomValue() { return switch (fieldType) { case "long" -> randomLong(); @@ -279,7 +287,7 @@ public void assertStats(SingleValueQuery.Builder builder, boolean subHasTwoPhase assertThat(builder.stats().bytesApprox(), equalTo(0)); assertThat(builder.stats().bytesNoApprox(), equalTo(0)); - if (multivaluedField) { + if (multivaluedField || empty) { assertThat(builder.stats().numericSingle(), greaterThanOrEqualTo(0)); if (subHasTwoPhase) { assertThat(builder.stats().numericMultiNoApprox(), equalTo(0)); @@ -300,7 +308,7 @@ public void assertStats(SingleValueQuery.Builder builder, boolean subHasTwoPhase assertThat(builder.stats().numericMultiApprox(), equalTo(0)); assertThat(builder.stats().bytesApprox(), equalTo(0)); assertThat(builder.stats().bytesNoApprox(), equalTo(0)); - if (multivaluedField) { + if (multivaluedField || empty) { assertThat(builder.stats().ordinalsSingle(), greaterThanOrEqualTo(0)); if (subHasTwoPhase) { assertThat(builder.stats().ordinalsMultiNoApprox(), equalTo(0)); diff --git a/x-pack/plugin/identity-provider/build.gradle b/x-pack/plugin/identity-provider/build.gradle index 38ca5dfd344b8..dd085e62efa48 100644 --- a/x-pack/plugin/identity-provider/build.gradle +++ b/x-pack/plugin/identity-provider/build.gradle @@ -34,7 +34,7 @@ dependencies { api "org.opensaml:opensaml-storage-impl:${versions.opensaml}" api "net.shibboleth.utilities:java-support:8.4.0" api "com.google.code.findbugs:jsr305:3.0.2" - api "org.apache.santuario:xmlsec:2.3.2" + api "org.apache.santuario:xmlsec:2.3.4" api "io.dropwizard.metrics:metrics-core:4.1.4" api ( "org.cryptacular:cryptacular:1.2.5") { exclude group: 'org.bouncycastle' diff --git a/x-pack/plugin/identity-provider/src/internalClusterTest/java/org/elasticsearch/xpack/idp/action/SamlIdentityProviderTests.java b/x-pack/plugin/identity-provider/src/internalClusterTest/java/org/elasticsearch/xpack/idp/action/SamlIdentityProviderTests.java index e72d97d212119..76bf415fdcce5 100644 --- a/x-pack/plugin/identity-provider/src/internalClusterTest/java/org/elasticsearch/xpack/idp/action/SamlIdentityProviderTests.java +++ b/x-pack/plugin/identity-provider/src/internalClusterTest/java/org/elasticsearch/xpack/idp/action/SamlIdentityProviderTests.java @@ -276,17 +276,21 @@ public void testSpInitiatedSsoFailsForUserWithNoAccess() throws Exception { initRequest.setJsonEntity(Strings.format(""" {"entity_id":"%s", "acs":"%s","authn_state":%s} """, entityId, acsUrl, Strings.toString(authnStateBuilder))); - Response initResponse = getRestClient().performRequest(initRequest); - ObjectPath initResponseObject = ObjectPath.createFromResponse(initResponse); - assertThat(initResponseObject.evaluate("post_url").toString(), equalTo(acsUrl)); - final String body = initResponseObject.evaluate("saml_response").toString(); + ResponseException e = expectThrows(ResponseException.class, () -> getRestClient().performRequest(initRequest)); + Response response = e.getResponse(); + assertThat(response.getStatusLine().getStatusCode(), equalTo(403)); + ObjectPath initResponseObject = ObjectPath.createFromResponse(response); + assertThat(initResponseObject.evaluate("status"), equalTo(403)); + final String baseSamlResponseObjectPath = "error.saml_initiate_single_sign_on_response."; + assertThat(initResponseObject.evaluate(baseSamlResponseObjectPath + "post_url").toString(), equalTo(acsUrl)); + final String body = initResponseObject.evaluate(baseSamlResponseObjectPath + "saml_response").toString(); assertThat(body, containsString("")); assertThat(body, containsString("InResponseTo=\"" + expectedInResponeTo + "\"")); - Map sp = initResponseObject.evaluate("service_provider"); + Map sp = initResponseObject.evaluate(baseSamlResponseObjectPath + "service_provider"); assertThat(sp, hasKey("entity_id")); assertThat(sp.get("entity_id"), equalTo(entityId)); assertThat( - initResponseObject.evaluate("error"), + initResponseObject.evaluate(baseSamlResponseObjectPath + "error"), equalTo("User [" + SAMPLE_USER_NAME + "] is not permitted to access service [" + entityId + "]") ); } diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/action/SamlInitiateSingleSignOnResponse.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/action/SamlInitiateSingleSignOnResponse.java index d920b29de7bcd..a7cd9c606b3c6 100644 --- a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/action/SamlInitiateSingleSignOnResponse.java +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/action/SamlInitiateSingleSignOnResponse.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; +import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; @@ -72,4 +73,14 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(samlStatus); out.writeOptionalString(error); } + + public void toXContent(XContentBuilder builder) throws IOException { + builder.field("post_url", this.getPostUrl()); + builder.field("saml_response", this.getSamlResponse()); + builder.field("saml_status", this.getSamlStatus()); + builder.field("error", this.getError()); + builder.startObject("service_provider"); + builder.field("entity_id", this.getEntityId()); + builder.endObject(); + } } diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/action/TransportSamlInitiateSingleSignOnAction.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/action/TransportSamlInitiateSingleSignOnAction.java index a41569920ecf8..f2b9c20c79d61 100644 --- a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/action/TransportSamlInitiateSingleSignOnAction.java +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/action/TransportSamlInitiateSingleSignOnAction.java @@ -9,11 +9,12 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; @@ -29,6 +30,7 @@ import org.elasticsearch.xpack.idp.saml.sp.SamlServiceProvider; import org.elasticsearch.xpack.idp.saml.support.SamlAuthenticationState; import org.elasticsearch.xpack.idp.saml.support.SamlFactory; +import org.elasticsearch.xpack.idp.saml.support.SamlInitiateSingleSignOnException; import org.opensaml.saml.saml2.core.Response; import org.opensaml.saml.saml2.core.StatusCode; @@ -80,47 +82,51 @@ protected void doExecute( false, ActionListener.wrap(sp -> { if (null == sp) { - final String message = "Service Provider with Entity ID [" - + request.getSpEntityId() - + "] and ACS [" - + request.getAssertionConsumerService() - + "] is not known to this Identity Provider"; - possiblyReplyWithSamlFailure( - authenticationState, - request.getSpEntityId(), - request.getAssertionConsumerService(), - StatusCode.RESPONDER, - new IllegalArgumentException(message), - listener + writeFailureResponse( + listener, + buildSamlInitiateSingleSignOnException( + authenticationState, + request.getSpEntityId(), + request.getAssertionConsumerService(), + StatusCode.RESPONDER, + RestStatus.BAD_REQUEST, + "Service Provider with Entity ID [{}] and ACS [{}] is not known to this Identity Provider", + request.getSpEntityId(), + request.getAssertionConsumerService() + ) + ); return; } final SecondaryAuthentication secondaryAuthentication = SecondaryAuthentication.readFromContext(securityContext); if (secondaryAuthentication == null) { - possiblyReplyWithSamlFailure( - authenticationState, - request.getSpEntityId(), - request.getAssertionConsumerService(), - StatusCode.REQUESTER, - new ElasticsearchSecurityException("Request is missing secondary authentication", RestStatus.FORBIDDEN), - listener + writeFailureResponse( + listener, + buildSamlInitiateSingleSignOnException( + authenticationState, + request.getSpEntityId(), + request.getAssertionConsumerService(), + StatusCode.REQUESTER, + RestStatus.FORBIDDEN, + "Request is missing secondary authentication" + ) ); return; } buildUserFromAuthentication(secondaryAuthentication, sp, ActionListener.wrap(user -> { if (user == null) { - possiblyReplyWithSamlFailure( - authenticationState, - request.getSpEntityId(), - request.getAssertionConsumerService(), - StatusCode.REQUESTER, - new ElasticsearchSecurityException( - "User [{}] is not permitted to access service [{}]", + writeFailureResponse( + listener, + buildSamlInitiateSingleSignOnException( + authenticationState, + request.getSpEntityId(), + request.getAssertionConsumerService(), + StatusCode.REQUESTER, RestStatus.FORBIDDEN, + "User [{}] is not permitted to access service [{}]", secondaryAuthentication.getUser().principal(), sp.getEntityId() - ), - listener + ) ); return; } @@ -144,23 +150,25 @@ protected void doExecute( listener.onFailure(e); } }, - e -> possiblyReplyWithSamlFailure( + e -> writeFailureResponse( + listener, + buildResponderSamlInitiateSingleSignOnException( + authenticationState, + request.getSpEntityId(), + request.getAssertionConsumerService(), + e + ) + ) + )); + }, + e -> writeFailureResponse( + listener, + buildResponderSamlInitiateSingleSignOnException( authenticationState, request.getSpEntityId(), request.getAssertionConsumerService(), - StatusCode.RESPONDER, - e, - listener + e ) - )); - }, - e -> possiblyReplyWithSamlFailure( - authenticationState, - request.getSpEntityId(), - request.getAssertionConsumerService(), - StatusCode.RESPONDER, - e, - listener ) ) ); @@ -194,15 +202,25 @@ private void buildUserFromAuthentication( }); } - private void possiblyReplyWithSamlFailure( - SamlAuthenticationState authenticationState, - String spEntityId, - String acsUrl, - String statusCode, - Exception e, - ActionListener listener + private void writeFailureResponse( + final ActionListener listener, + final SamlInitiateSingleSignOnException ex + ) { + logger.debug("Failed to generate a successful SAML response: ", ex); + listener.onFailure(ex); + } + + private SamlInitiateSingleSignOnException buildSamlInitiateSingleSignOnException( + final SamlAuthenticationState authenticationState, + final String spEntityId, + final String acsUrl, + final String statusCode, + final RestStatus restStatus, + final String messageFormatStr, + final Object... args ) { - logger.debug("Failed to generate a successful SAML response: ", e); + final SamlInitiateSingleSignOnException ex; + String exceptionMessage = LoggerMessageFormat.format(messageFormatStr, args); if (authenticationState != null) { final FailedAuthenticationResponseMessageBuilder builder = new FailedAuthenticationResponseMessageBuilder( samlFactory, @@ -210,11 +228,34 @@ private void possiblyReplyWithSamlFailure( identityProvider ).setInResponseTo(authenticationState.getAuthnRequestId()).setAcsUrl(acsUrl).setPrimaryStatusCode(statusCode); final Response response = builder.build(); - listener.onResponse( - new SamlInitiateSingleSignOnResponse(spEntityId, acsUrl, samlFactory.getXmlContent(response), statusCode, e.getMessage()) + ex = new SamlInitiateSingleSignOnException( + exceptionMessage, + restStatus, + new SamlInitiateSingleSignOnResponse(spEntityId, acsUrl, samlFactory.getXmlContent(response), statusCode, exceptionMessage) ); } else { - listener.onFailure(e); + ex = new SamlInitiateSingleSignOnException(exceptionMessage, restStatus); } + return ex; + } + + private SamlInitiateSingleSignOnException buildResponderSamlInitiateSingleSignOnException( + final SamlAuthenticationState authenticationState, + final String spEntityId, + final String acsUrl, + final Exception cause + ) { + final String exceptionMessage = cause.getMessage(); + final RestStatus restStatus = ExceptionsHelper.status(cause); + final SamlInitiateSingleSignOnException ex = buildSamlInitiateSingleSignOnException( + authenticationState, + spEntityId, + acsUrl, + StatusCode.RESPONDER, + restStatus, + exceptionMessage + ); + ex.initCause(cause); + return ex; } } diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/rest/action/RestSamlInitiateSingleSignOnAction.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/rest/action/RestSamlInitiateSingleSignOnAction.java index 509c1e06ec45e..3e4d57860fdae 100644 --- a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/rest/action/RestSamlInitiateSingleSignOnAction.java +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/rest/action/RestSamlInitiateSingleSignOnAction.java @@ -68,13 +68,7 @@ protected RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClien @Override public RestResponse buildResponse(SamlInitiateSingleSignOnResponse response, XContentBuilder builder) throws Exception { builder.startObject(); - builder.field("post_url", response.getPostUrl()); - builder.field("saml_response", response.getSamlResponse()); - builder.field("saml_status", response.getSamlStatus()); - builder.field("error", response.getError()); - builder.startObject("service_provider"); - builder.field("entity_id", response.getEntityId()); - builder.endObject(); + response.toXContent(builder); builder.endObject(); return new RestResponse(RestStatus.OK, builder); } diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndex.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndex.java index 2291061af3e98..202b52e0974d8 100644 --- a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndex.java +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndex.java @@ -215,7 +215,7 @@ public void installIndexTemplate(ActionListener listener) { } private boolean isTemplateUpToDate(ClusterState state) { - return TemplateUtils.checkTemplateExistsAndIsUpToDate(TEMPLATE_NAME, TEMPLATE_META_VERSION_KEY, state, logger, null); + return TemplateUtils.checkTemplateExistsAndIsUpToDate(TEMPLATE_NAME, TEMPLATE_META_VERSION_KEY, state, logger); } public void deleteDocument(DocumentVersion version, WriteRequest.RefreshPolicy refreshPolicy, ActionListener listener) { diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/support/SamlInitiateSingleSignOnException.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/support/SamlInitiateSingleSignOnException.java new file mode 100644 index 0000000000000..ba983a84b5199 --- /dev/null +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/support/SamlInitiateSingleSignOnException.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.idp.saml.support; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.idp.action.SamlInitiateSingleSignOnResponse; + +import java.io.IOException; + +public class SamlInitiateSingleSignOnException extends ElasticsearchSecurityException { + + private SamlInitiateSingleSignOnResponse samlInitiateSingleSignOnResponse; + + public SamlInitiateSingleSignOnException( + String msg, + RestStatus status, + SamlInitiateSingleSignOnResponse samlInitiateSingleSignOnResponse + ) { + super(msg, status); + this.samlInitiateSingleSignOnResponse = samlInitiateSingleSignOnResponse; + } + + public SamlInitiateSingleSignOnException(String msg, RestStatus status) { + super(msg, status); + } + + @Override + protected void metadataToXContent(XContentBuilder builder, Params params) throws IOException { + if (this.samlInitiateSingleSignOnResponse != null) { + builder.startObject("saml_initiate_single_sign_on_response"); + this.samlInitiateSingleSignOnResponse.toXContent(builder); + builder.endObject(); + } + } + + public SamlInitiateSingleSignOnResponse getSamlInitiateSingleSignOnResponse() { + return samlInitiateSingleSignOnResponse; + } +} diff --git a/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/action/TransportSamlInitiateSingleSignOnActionTests.java b/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/action/TransportSamlInitiateSingleSignOnActionTests.java index 9436a4e1c39a9..3eb9096efce8d 100644 --- a/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/action/TransportSamlInitiateSingleSignOnActionTests.java +++ b/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/action/TransportSamlInitiateSingleSignOnActionTests.java @@ -33,6 +33,7 @@ import org.elasticsearch.xpack.idp.saml.sp.WildcardServiceProviderResolver; import org.elasticsearch.xpack.idp.saml.support.SamlAuthenticationState; import org.elasticsearch.xpack.idp.saml.support.SamlFactory; +import org.elasticsearch.xpack.idp.saml.support.SamlInitiateSingleSignOnException; import org.elasticsearch.xpack.idp.saml.test.IdpSamlTestCase; import org.mockito.Mockito; import org.opensaml.saml.saml2.core.StatusCode; @@ -112,7 +113,9 @@ public void testGetResponseWithoutSecondaryAuthenticationInSpInitiatedFlow() thr final TransportSamlInitiateSingleSignOnAction action = setupTransportAction(false); action.doExecute(mock(Task.class), request, future); - final SamlInitiateSingleSignOnResponse response = future.get(); + final SamlInitiateSingleSignOnException ex = (SamlInitiateSingleSignOnException) expectThrows(Exception.class, future::get) + .getCause(); + final SamlInitiateSingleSignOnResponse response = ex.getSamlInitiateSingleSignOnResponse(); assertThat(response.getError(), equalTo("Request is missing secondary authentication")); assertThat(response.getSamlStatus(), equalTo(StatusCode.REQUESTER)); assertThat(response.getPostUrl(), equalTo("https://sp.some.org/saml/acs")); diff --git a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java index 3ca8c7302d6dd..4f1efbbca387c 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java @@ -490,6 +490,111 @@ public void testDownsampleTwice() throws Exception { } } + public void testDownsampleTwiceSameInterval() throws Exception { + // Create the ILM policy + Request request = new Request("PUT", "_ilm/policy/" + policy); + request.setJsonEntity(""" + { + "policy": { + "phases": { + "warm": { + "actions": { + "downsample": { + "fixed_interval" : "5m" + } + } + }, + "cold": { + "min_age": "365d", + "actions": {} + } + } + } + } + """); + assertOK(client().performRequest(request)); + + // Create a template + Request createIndexTemplateRequest = new Request("POST", "/_index_template/" + dataStream); + createIndexTemplateRequest.setJsonEntity( + Strings.format(TEMPLATE, dataStream, "2006-01-08T23:40:53.384Z", "2021-01-08T23:40:53.384Z", policy) + ); + assertOK(client().performRequest(createIndexTemplateRequest)); + + index(client(), dataStream, true, null, "@timestamp", "2020-01-01T05:10:00Z", "volume", 11.0, "metricset", randomAlphaOfLength(5)); + + String firstBackingIndex = getBackingIndices(client(), dataStream).get(0); + logger.info("--> firstBackingIndex: {}", firstBackingIndex); + assertBusy( + () -> assertThat( + "index must wait in the " + CheckNotDataStreamWriteIndexStep.NAME + " until it is not the write index anymore", + explainIndex(client(), firstBackingIndex).get("step"), + is(CheckNotDataStreamWriteIndexStep.NAME) + ), + 30, + TimeUnit.SECONDS + ); + + // before we rollover, update template to not contain time boundaries anymore (rollover is blocked otherwise due to index time + // boundaries overlapping after rollover) + Request updateIndexTemplateRequest = new Request("POST", "/_index_template/" + dataStream); + updateIndexTemplateRequest.setJsonEntity(Strings.format(TEMPLATE_NO_TIME_BOUNDARIES, dataStream, policy)); + assertOK(client().performRequest(updateIndexTemplateRequest)); + + // Manual rollover the original index such that it's not the write index in the data stream anymore + rolloverMaxOneDocCondition(client(), dataStream); + + String downsampleIndexName = "downsample-5m-" + firstBackingIndex; + // wait for the downsample index to get to the end of the warm phase + assertBusy(() -> { + assertThat(indexExists(downsampleIndexName), is(true)); + assertThat(indexExists(firstBackingIndex), is(false)); + + assertThat(explainIndex(client(), downsampleIndexName).get("step"), is(PhaseCompleteStep.NAME)); + assertThat(explainIndex(client(), downsampleIndexName).get("phase"), is("warm")); + + Map settings = getOnlyIndexSettings(client(), downsampleIndexName); + assertEquals(firstBackingIndex, settings.get(IndexMetadata.INDEX_DOWNSAMPLE_ORIGIN_NAME.getKey())); + assertEquals(firstBackingIndex, settings.get(IndexMetadata.INDEX_DOWNSAMPLE_SOURCE_NAME.getKey())); + assertEquals(DownsampleTaskStatus.SUCCESS.toString(), settings.get(IndexMetadata.INDEX_DOWNSAMPLE_STATUS.getKey())); + assertEquals(policy, settings.get(LifecycleSettings.LIFECYCLE_NAME_SETTING.getKey())); + }, 60, TimeUnit.SECONDS); + + // update the policy to now contain the downsample action in cold, whilst not existing in warm anymore (this will have our already + // downsampled index attempt to go through the downsample action again when in cold) + + Request updatePolicyRequest = new Request("PUT", "_ilm/policy/" + policy); + updatePolicyRequest.setJsonEntity(""" + { + "policy": { + "phases": { + "warm": { + "actions": { + } + }, + "cold": { + "min_age": "0ms", + "actions": { + "downsample": { + "fixed_interval" : "5m" + } + } + } + } + } + } + """); + assertOK(client().performRequest(updatePolicyRequest)); + + // the downsample index (already part of the data stream as we created it in the warm phase previously) should continue to exist and + // reach the cold/complete/complete step + assertBusy(() -> { + assertThat(indexExists(downsampleIndexName), is(true)); + assertThat(explainIndex(client(), downsampleIndexName).get("step"), is(PhaseCompleteStep.NAME)); + assertThat(explainIndex(client(), downsampleIndexName).get("phase"), is("cold")); + }, 60, TimeUnit.SECONDS); + } + /** * Gets the generated rollup index name for a given index by looking at newly created indices that match the rollup index name pattern * diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java index 42ad64b9c60a3..d708b72d0b7df 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java @@ -9,13 +9,18 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.inference.EmptyTaskSettings; +import org.elasticsearch.inference.InferenceResults; import org.elasticsearch.inference.SecretSettings; import org.elasticsearch.inference.ServiceSettings; import org.elasticsearch.inference.TaskSettings; +import org.elasticsearch.xpack.inference.results.TextEmbeddingResults; import org.elasticsearch.xpack.inference.services.elser.ElserMlNodeServiceSettings; import org.elasticsearch.xpack.inference.services.elser.ElserMlNodeTaskSettings; import org.elasticsearch.xpack.inference.services.huggingface.elser.HuggingFaceElserSecretSettings; import org.elasticsearch.xpack.inference.services.huggingface.elser.HuggingFaceElserServiceSettings; +import org.elasticsearch.xpack.inference.services.openai.OpenAiServiceSettings; +import org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsTaskSettings; +import org.elasticsearch.xpack.inference.services.settings.DefaultSecretSettings; import java.util.ArrayList; import java.util.List; @@ -27,8 +32,14 @@ private InferenceNamedWriteablesProvider() {} public static List getNamedWriteables() { List namedWriteables = new ArrayList<>(); - // Empty default settings - namedWriteables.add(new NamedWriteableRegistry.Entry(EmptyTaskSettings.class, EmptyTaskSettings.NAME, EmptyTaskSettings::new)); + // Inference Results + namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceResults.class, TextEmbeddingResults.NAME, TextEmbeddingResults::new)); + + // Empty default task settings + namedWriteables.add(new NamedWriteableRegistry.Entry(TaskSettings.class, EmptyTaskSettings.NAME, EmptyTaskSettings::new)); + + // Default secret settings + namedWriteables.add(new NamedWriteableRegistry.Entry(SecretSettings.class, DefaultSecretSettings.NAME, DefaultSecretSettings::new)); // ELSER config namedWriteables.add( @@ -50,6 +61,14 @@ public static List getNamedWriteables() { new NamedWriteableRegistry.Entry(SecretSettings.class, HuggingFaceElserSecretSettings.NAME, HuggingFaceElserSecretSettings::new) ); + // OpenAI + namedWriteables.add( + new NamedWriteableRegistry.Entry(ServiceSettings.class, OpenAiServiceSettings.NAME, OpenAiServiceSettings::new) + ); + namedWriteables.add( + new NamedWriteableRegistry.Entry(TaskSettings.class, OpenAiEmbeddingsTaskSettings.NAME, OpenAiEmbeddingsTaskSettings::new) + ); + return namedWriteables; } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java index 2817276631f95..476f19a286d53 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java @@ -51,6 +51,7 @@ import org.elasticsearch.xpack.inference.services.ServiceComponents; import org.elasticsearch.xpack.inference.services.elser.ElserMlNodeService; import org.elasticsearch.xpack.inference.services.huggingface.elser.HuggingFaceElserService; +import org.elasticsearch.xpack.inference.services.openai.OpenAiService; import java.util.Collection; import java.util.List; @@ -183,7 +184,11 @@ public String getFeatureDescription() { @Override public List getInferenceServiceFactories() { - return List.of(ElserMlNodeService::new, context -> new HuggingFaceElserService(httpFactory, serviceComponents)); + return List.of( + ElserMlNodeService::new, + context -> new HuggingFaceElserService(httpFactory, serviceComponents), + context -> new OpenAiService(httpFactory, serviceComponents) + ); } @Override diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/ActionUtils.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/ActionUtils.java new file mode 100644 index 0000000000000..f130258e5f8ec --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/ActionUtils.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.action; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceResults; +import org.elasticsearch.rest.RestStatus; + +import java.util.List; + +public class ActionUtils { + + public static ActionListener> wrapFailuresInElasticsearchException( + String errorMessage, + ActionListener> listener + ) { + return ActionListener.wrap(listener::onResponse, e -> { + var unwrappedException = ExceptionsHelper.unwrapCause(e); + + if (unwrappedException instanceof ElasticsearchException esException) { + listener.onFailure(esException); + } else { + listener.onFailure(createInternalServerError(unwrappedException, errorMessage)); + } + }); + } + + public static ElasticsearchStatusException createInternalServerError(Throwable e, String message) { + return new ElasticsearchStatusException(message, RestStatus.INTERNAL_SERVER_ERROR, e); + } + + private ActionUtils() {} +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceElserAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceElserAction.java index 3da9f92e0dece..3d8518e3fa46e 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceElserAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceElserAction.java @@ -8,11 +8,8 @@ package org.elasticsearch.xpack.inference.external.action.huggingface; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchStatusException; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.inference.InferenceResults; -import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; import org.elasticsearch.xpack.inference.external.http.sender.Sender; import org.elasticsearch.xpack.inference.external.huggingface.HuggingFaceAccount; @@ -25,48 +22,32 @@ import java.util.List; import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.inference.external.action.ActionUtils.createInternalServerError; +import static org.elasticsearch.xpack.inference.external.action.ActionUtils.wrapFailuresInElasticsearchException; public class HuggingFaceElserAction implements ExecutableAction { private final HuggingFaceAccount account; private final HuggingFaceClient client; + private final String errorMessage; public HuggingFaceElserAction(Sender sender, HuggingFaceElserModel model, ServiceComponents serviceComponents) { this.client = new HuggingFaceClient(sender, serviceComponents); this.account = new HuggingFaceAccount(model.getServiceSettings().uri(), model.getSecretSettings().apiKey()); + this.errorMessage = format("Failed to send ELSER Hugging Face request to [%s]", model.getServiceSettings().uri().toString()); } @Override public void execute(List input, ActionListener> listener) { try { HuggingFaceElserRequest request = new HuggingFaceElserRequest(account, new HuggingFaceElserRequestEntity(input)); + ActionListener> wrappedListener = wrapFailuresInElasticsearchException(errorMessage, listener); - ActionListener> wrapFailuresInElasticsearchExceptionListener = ActionListener.wrap( - listener::onResponse, - e -> { - var unwrappedException = ExceptionsHelper.unwrapCause(e); - - if (unwrappedException instanceof ElasticsearchException esException) { - listener.onFailure(esException); - } else { - listener.onFailure(createInternalServerError(unwrappedException)); - } - } - ); - - client.send(request, wrapFailuresInElasticsearchExceptionListener); + client.send(request, wrappedListener); } catch (ElasticsearchException e) { listener.onFailure(e); } catch (Exception e) { - listener.onFailure(createInternalServerError(e)); + listener.onFailure(createInternalServerError(e, errorMessage)); } } - - private ElasticsearchStatusException createInternalServerError(Throwable e) { - return new ElasticsearchStatusException( - format("Failed to send ELSER Hugging Face request to [%s]", account.url()), - RestStatus.INTERNAL_SERVER_ERROR, - e - ); - } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionCreator.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionCreator.java new file mode 100644 index 0000000000000..6c423760d0b35 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionCreator.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.action.openai; + +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.services.ServiceComponents; +import org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsModel; + +import java.util.Map; +import java.util.Objects; + +/** + * Provides a way to construct an {@link ExecutableAction} using the visitor pattern based on the openai model type. + */ +public class OpenAiActionCreator implements OpenAiActionVisitor { + private final Sender sender; + private final ServiceComponents serviceComponents; + + public OpenAiActionCreator(Sender sender, ServiceComponents serviceComponents) { + this.sender = Objects.requireNonNull(sender); + this.serviceComponents = Objects.requireNonNull(serviceComponents); + } + + @Override + public ExecutableAction create(OpenAiEmbeddingsModel model, Map taskSettings) { + var overriddenModel = model.overrideWith(taskSettings); + + return new OpenAiEmbeddingsAction(sender, overriddenModel, serviceComponents); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionVisitor.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionVisitor.java new file mode 100644 index 0000000000000..52d9f2e2132a7 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionVisitor.java @@ -0,0 +1,17 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.action.openai; + +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsModel; + +import java.util.Map; + +public interface OpenAiActionVisitor { + ExecutableAction create(OpenAiEmbeddingsModel model, Map taskSettings); +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiEmbeddingsAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiEmbeddingsAction.java new file mode 100644 index 0000000000000..295c01ace0b77 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiEmbeddingsAction.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.action.openai; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.InferenceResults; +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.external.openai.OpenAiAccount; +import org.elasticsearch.xpack.inference.external.openai.OpenAiClient; +import org.elasticsearch.xpack.inference.external.request.openai.OpenAiEmbeddingsRequest; +import org.elasticsearch.xpack.inference.external.request.openai.OpenAiEmbeddingsRequestEntity; +import org.elasticsearch.xpack.inference.services.ServiceComponents; +import org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsModel; + +import java.net.URI; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.inference.external.action.ActionUtils.createInternalServerError; +import static org.elasticsearch.xpack.inference.external.action.ActionUtils.wrapFailuresInElasticsearchException; + +public class OpenAiEmbeddingsAction implements ExecutableAction { + + private final OpenAiAccount account; + private final OpenAiClient client; + private final OpenAiEmbeddingsModel model; + private final String errorMessage; + + public OpenAiEmbeddingsAction(Sender sender, OpenAiEmbeddingsModel model, ServiceComponents serviceComponents) { + this.model = Objects.requireNonNull(model); + this.account = new OpenAiAccount( + this.model.getServiceSettings().uri(), + this.model.getServiceSettings().organizationId(), + this.model.getSecretSettings().apiKey() + ); + this.client = new OpenAiClient(Objects.requireNonNull(sender), Objects.requireNonNull(serviceComponents)); + this.errorMessage = getErrorMessage(this.model.getServiceSettings().uri()); + } + + private static String getErrorMessage(@Nullable URI uri) { + if (uri != null) { + return format("Failed to send OpenAI embeddings request to [%s]", uri.toString()); + } + + return "Failed to send OpenAI embeddings request"; + } + + @Override + public void execute(List input, ActionListener> listener) { + try { + OpenAiEmbeddingsRequest request = new OpenAiEmbeddingsRequest( + account, + new OpenAiEmbeddingsRequestEntity(input, model.getTaskSettings().model(), model.getTaskSettings().user()) + ); + ActionListener> wrappedListener = wrapFailuresInElasticsearchException(errorMessage, listener); + + client.send(request, wrappedListener); + } catch (ElasticsearchException e) { + listener.onFailure(e); + } catch (Exception e) { + listener.onFailure(createInternalServerError(e, errorMessage)); + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderFactory.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderFactory.java index c94d82e234c0c..acc7a0b3f6077 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderFactory.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderFactory.java @@ -52,7 +52,7 @@ public HttpRequestSenderFactory( this.settings = Objects.requireNonNull(settings); } - public HttpRequestSender createSender(String serviceName) { + public Sender createSender(String serviceName) { return new HttpRequestSender(serviceName, threadPool, httpClientManager, clusterService, settings); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/openai/OpenAiAccount.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/openai/OpenAiAccount.java new file mode 100644 index 0000000000000..a89032277ff8d --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/openai/OpenAiAccount.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.openai; + +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.core.Nullable; + +import java.net.URI; +import java.util.Objects; + +public record OpenAiAccount(@Nullable URI url, @Nullable String organizationId, SecureString apiKey) { + + public OpenAiAccount { + Objects.requireNonNull(apiKey); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/openai/OpenAiClient.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/openai/OpenAiClient.java new file mode 100644 index 0000000000000..a355c871782d6 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/openai/OpenAiClient.java @@ -0,0 +1,53 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.openai; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceResults; +import org.elasticsearch.xpack.inference.external.http.retry.AlwaysRetryingResponseHandler; +import org.elasticsearch.xpack.inference.external.http.retry.ResponseHandler; +import org.elasticsearch.xpack.inference.external.http.retry.RetrySettings; +import org.elasticsearch.xpack.inference.external.http.retry.RetryingHttpSender; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.external.request.openai.OpenAiEmbeddingsRequest; +import org.elasticsearch.xpack.inference.external.response.openai.OpenAiEmbeddingsResponseEntity; +import org.elasticsearch.xpack.inference.services.ServiceComponents; + +import java.io.IOException; +import java.util.List; + +public class OpenAiClient { + private static final Logger logger = LogManager.getLogger(OpenAiClient.class); + private static final ResponseHandler EMBEDDINGS_HANDLER = createEmbeddingsHandler(); + + private final RetryingHttpSender sender; + + public OpenAiClient(Sender sender, ServiceComponents serviceComponents) { + this.sender = new RetryingHttpSender( + sender, + serviceComponents.throttlerManager(), + logger, + new RetrySettings(serviceComponents.settings()), + serviceComponents.threadPool() + ); + } + + public void send(OpenAiEmbeddingsRequest request, ActionListener> listener) throws IOException { + sender.send(request.createRequest(), EMBEDDINGS_HANDLER, listener); + } + + private static ResponseHandler createEmbeddingsHandler() { + return new AlwaysRetryingResponseHandler( + "openai text embedding", + // TODO this is a hack to get the response to fit within List and will be addressed in a follow up PR + result -> List.of(OpenAiEmbeddingsResponseEntity.fromResponse(result)) + ); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/RequestUtils.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/RequestUtils.java new file mode 100644 index 0000000000000..355db7288dacc --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/RequestUtils.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request; + +import org.apache.http.Header; +import org.apache.http.HttpHeaders; +import org.apache.http.message.BasicHeader; +import org.elasticsearch.common.settings.SecureString; + +public class RequestUtils { + + public static Header createAuthBearerHeader(SecureString apiKey) { + return new BasicHeader(HttpHeaders.AUTHORIZATION, "Bearer " + apiKey.toString()); + } + + private RequestUtils() {} +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/huggingface/HuggingFaceElserRequest.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/huggingface/HuggingFaceElserRequest.java index f896bba4ae063..563b0036bdb09 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/huggingface/HuggingFaceElserRequest.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/huggingface/HuggingFaceElserRequest.java @@ -7,12 +7,10 @@ package org.elasticsearch.xpack.inference.external.request.huggingface; -import org.apache.http.Header; import org.apache.http.HttpHeaders; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpRequestBase; import org.apache.http.entity.ByteArrayEntity; -import org.apache.http.message.BasicHeader; import org.elasticsearch.common.Strings; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.inference.external.huggingface.HuggingFaceAccount; @@ -21,6 +19,8 @@ import java.nio.charset.StandardCharsets; import java.util.Objects; +import static org.elasticsearch.xpack.inference.external.request.RequestUtils.createAuthBearerHeader; + public class HuggingFaceElserRequest implements Request { private final HuggingFaceAccount account; @@ -37,12 +37,8 @@ public HttpRequestBase createRequest() { ByteArrayEntity byteEntity = new ByteArrayEntity(Strings.toString(entity).getBytes(StandardCharsets.UTF_8)); httpPost.setEntity(byteEntity); httpPost.setHeader(HttpHeaders.CONTENT_TYPE, XContentType.JSON.mediaTypeWithoutParameters()); - httpPost.setHeader(apiKeyHeader()); + httpPost.setHeader(createAuthBearerHeader(account.apiKey())); return httpPost; } - - private Header apiKeyHeader() { - return new BasicHeader(HttpHeaders.AUTHORIZATION, "Bearer " + account.apiKey().toString()); - } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiEmbeddingsRequest.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiEmbeddingsRequest.java new file mode 100644 index 0000000000000..d195563227d65 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiEmbeddingsRequest.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.openai; + +import org.apache.http.HttpHeaders; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpRequestBase; +import org.apache.http.client.utils.URIBuilder; +import org.apache.http.entity.ByteArrayEntity; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.inference.external.openai.OpenAiAccount; +import org.elasticsearch.xpack.inference.external.request.Request; + +import java.net.URI; +import java.net.URISyntaxException; +import java.nio.charset.StandardCharsets; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.external.request.RequestUtils.createAuthBearerHeader; +import static org.elasticsearch.xpack.inference.external.request.openai.OpenAiUtils.createOrgHeader; + +public class OpenAiEmbeddingsRequest implements Request { + + private final OpenAiAccount account; + private final OpenAiEmbeddingsRequestEntity entity; + + public OpenAiEmbeddingsRequest(OpenAiAccount account, OpenAiEmbeddingsRequestEntity entity) { + this.account = Objects.requireNonNull(account); + this.entity = Objects.requireNonNull(entity); + } + + public HttpRequestBase createRequest() { + try { + URI uriForRequest = account.url() == null ? buildDefaultUri() : account.url(); + + HttpPost httpPost = new HttpPost(uriForRequest); + + ByteArrayEntity byteEntity = new ByteArrayEntity(Strings.toString(entity).getBytes(StandardCharsets.UTF_8)); + httpPost.setEntity(byteEntity); + + httpPost.setHeader(HttpHeaders.CONTENT_TYPE, XContentType.JSON.mediaType()); + httpPost.setHeader(createAuthBearerHeader(account.apiKey())); + + var org = account.organizationId(); + if (org != null) { + httpPost.setHeader(createOrgHeader(org)); + } + + return httpPost; + } catch (URISyntaxException e) { + throw new ElasticsearchStatusException("Failed to construct OpenAI URL", RestStatus.INTERNAL_SERVER_ERROR, e); + } + } + + // default for testing + static URI buildDefaultUri() throws URISyntaxException { + return new URIBuilder().setScheme("https") + .setHost(OpenAiUtils.HOST) + .setPathSegments(OpenAiUtils.VERSION_1, OpenAiUtils.EMBEDDINGS_PATH) + .build(); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiEmbeddingsRequestEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiEmbeddingsRequestEntity.java new file mode 100644 index 0000000000000..38c61e5590fa3 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiEmbeddingsRequestEntity.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.openai; + +import org.elasticsearch.core.Nullable; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +public record OpenAiEmbeddingsRequestEntity(List input, String model, @Nullable String user) implements ToXContentObject { + + private static final String INPUT_FIELD = "input"; + private static final String MODEL_FIELD = "model"; + private static final String USER_FIELD = "user"; + + public OpenAiEmbeddingsRequestEntity { + Objects.requireNonNull(input); + Objects.requireNonNull(model); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(INPUT_FIELD, input); + builder.field(MODEL_FIELD, model); + + if (user != null) { + builder.field(USER_FIELD, user); + } + + builder.endObject(); + return builder; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiUtils.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiUtils.java new file mode 100644 index 0000000000000..a6479b3ecde25 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiUtils.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.openai; + +import org.apache.http.Header; +import org.apache.http.message.BasicHeader; + +public class OpenAiUtils { + public static final String HOST = "api.openai.com"; + public static final String VERSION_1 = "v1"; + public static final String EMBEDDINGS_PATH = "embeddings"; + public static final String ORGANIZATION_HEADER = "OpenAI-Organization"; + + public static Header createOrgHeader(String org) { + return new BasicHeader(ORGANIZATION_HEADER, org); + } + + private OpenAiUtils() {} +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/openai/OpenAiEmbeddingsResponseEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/openai/OpenAiEmbeddingsResponseEntity.java new file mode 100644 index 0000000000000..60b568678987d --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/openai/OpenAiEmbeddingsResponseEntity.java @@ -0,0 +1,131 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.response.openai; + +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentParserUtils; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.inference.external.http.HttpResult; +import org.elasticsearch.xpack.inference.results.TextEmbeddingResults; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.core.Strings.format; + +public class OpenAiEmbeddingsResponseEntity { + + /** + * Parses the OpenAI json response. + * For a request like: + * + *
+     *     
+     *        {
+     *            "inputs": ["hello this is my name", "I wish I was there!"]
+     *        }
+     *     
+     * 
+ * + * The response would look like: + * + *
+     * 
+     * {
+     *  "object": "list",
+     *  "data": [
+     *      {
+     *          "object": "embedding",
+     *          "embedding": [
+     *              -0.009327292,
+     *              .... (1536 floats total for ada-002)
+     *              -0.0028842222,
+     *          ],
+     *          "index": 0
+     *      },
+     *      {
+     *          "object": "embedding",
+     *          "embedding": [ ... ],
+     *          "index": 1
+     *      }
+     *  ],
+     *  "model": "text-embedding-ada-002",
+     *  "usage": {
+     *      "prompt_tokens": 8,
+     *      "total_tokens": 8
+     *  }
+     * }
+     * 
+     * 
+ */ + public static TextEmbeddingResults fromResponse(HttpResult response) throws IOException { + var parserConfig = XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE); + + try (XContentParser jsonParser = XContentFactory.xContent(XContentType.JSON).createParser(parserConfig, response.body())) { + if (jsonParser.currentToken() == null) { + jsonParser.nextToken(); + } + + XContentParser.Token token = jsonParser.currentToken(); + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, token, jsonParser); + + positionParserAtTokenAfterField(jsonParser, "data"); + + List embeddingList = XContentParserUtils.parseList( + jsonParser, + OpenAiEmbeddingsResponseEntity::parseEmbeddingObject + ); + + return new TextEmbeddingResults(embeddingList); + } + } + + /** + * Iterates over the tokens until it finds a field name token with the text matching the field requested. + * + * @throws IllegalStateException if the field cannot be found + */ + private static void positionParserAtTokenAfterField(XContentParser parser, String field) throws IOException { + XContentParser.Token token; + + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME && parser.currentName().equals(field)) { + parser.nextToken(); + return; + } + } + + throw new IllegalStateException(format("Failed to find required field [%s] in OpenAI embeddings response", field)); + } + + private static TextEmbeddingResults.Embedding parseEmbeddingObject(XContentParser parser) throws IOException { + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser); + + positionParserAtTokenAfterField(parser, "embedding"); + + List embeddingValues = XContentParserUtils.parseList(parser, OpenAiEmbeddingsResponseEntity::parseEmbeddingList); + + // the parser is currently sitting at an ARRAY_END so go to the next token + parser.nextToken(); + // if there are additional fields within this object, lets skip them, so we can begin parsing the next embedding array + parser.skipChildren(); + + return new TextEmbeddingResults.Embedding(embeddingValues); + } + + private static float parseEmbeddingList(XContentParser parser) throws IOException { + XContentParser.Token token = parser.currentToken(); + XContentParserUtils.ensureExpectedToken(XContentParser.Token.VALUE_NUMBER, token, parser); + return parser.floatValue(); + } + + private OpenAiEmbeddingsResponseEntity() {} +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/results/TextEmbeddingResults.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/results/TextEmbeddingResults.java new file mode 100644 index 0000000000000..7afaaebe9e856 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/results/TextEmbeddingResults.java @@ -0,0 +1,131 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.results; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.inference.InferenceResults; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * Writes a text embedding result in the follow json format + * { + * "text_embedding": [ + * { + * "embedding": [ + * 0.1 + * ] + * }, + * { + * "embedding": [ + * 0.2 + * ] + * } + * ] + * } + */ +public record TextEmbeddingResults(List embeddings) implements InferenceResults { + public static final String NAME = "text_embedding_results"; + public static final String TEXT_EMBEDDING = TaskType.TEXT_EMBEDDING.toString(); + + public TextEmbeddingResults(StreamInput in) throws IOException { + this(in.readCollectionAsList(Embedding::new)); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startArray(TEXT_EMBEDDING); + for (Embedding embedding : embeddings) { + embedding.toXContent(builder, params); + } + builder.endArray(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeCollection(embeddings); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public String getResultsField() { + return TEXT_EMBEDDING; + } + + @Override + public Map asMap() { + Map map = new LinkedHashMap<>(); + map.put(getResultsField(), embeddings.stream().map(Embedding::asMap).collect(Collectors.toList())); + + return map; + } + + @Override + public Map asMap(String outputField) { + Map map = new LinkedHashMap<>(); + map.put(outputField, embeddings.stream().map(Embedding::asMap).collect(Collectors.toList())); + + return map; + } + + @Override + public Object predictedValue() { + throw new UnsupportedOperationException("[" + NAME + "] does not support a single predicted value"); + } + + public record Embedding(List values) implements Writeable, ToXContentObject { + public static final String EMBEDDING = "embedding"; + + public Embedding(StreamInput in) throws IOException { + this(in.readCollectionAsImmutableList(StreamInput::readFloat)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeCollection(values, StreamOutput::writeFloat); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + builder.startArray(EMBEDDING); + for (Float value : values) { + builder.value(value); + } + builder.endArray(); + + builder.endObject(); + return builder; + } + + @Override + public String toString() { + return Strings.toString(this); + } + + public Map asMap() { + return Map.of(EMBEDDING, values); + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/MapParsingUtils.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/MapParsingUtils.java index 0849e8fa53cf5..20bea7f1347b3 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/MapParsingUtils.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/MapParsingUtils.java @@ -8,10 +8,17 @@ package org.elasticsearch.xpack.inference.services; import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.core.Strings; import org.elasticsearch.rest.RestStatus; +import java.net.URI; +import java.net.URISyntaxException; import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.core.Strings.format; public class MapParsingUtils { /** @@ -54,8 +61,13 @@ public static Map removeFromMapOrThrowIfNull(Map return value; } + @SuppressWarnings("unchecked") + public static Map removeFromMap(Map sourceMap, String fieldName) { + return (Map) sourceMap.remove(fieldName); + } + public static void throwIfNotEmptyMap(Map settingsMap, String serviceName) { - if (settingsMap.isEmpty() == false) { + if (settingsMap != null && settingsMap.isEmpty() == false) { throw MapParsingUtils.unknownSettingsError(settingsMap, serviceName); } } @@ -74,11 +86,86 @@ public static String missingSettingErrorMsg(String settingName, String scope) { return Strings.format("[%s] does not contain the required setting [%s]", scope, settingName); } - public static String invalidUrlErrorMsg(String url, String settingName) { - return Strings.format("Invalid url [%s] received in setting [%s]", url, settingName); + public static String invalidUrlErrorMsg(String url, String settingName, String settingScope) { + return Strings.format("[%s] Invalid url [%s] received for field [%s]", settingScope, url, settingName); } public static String mustBeNonEmptyString(String settingName, String scope) { return Strings.format("[%s] Invalid value empty string. [%s] must be a non-empty string", scope, settingName); } + + // TODO improve URI validation logic + public static URI convertToUri(String url, String settingName, String settingScope, ValidationException validationException) { + try { + return createUri(url); + } catch (IllegalArgumentException ignored) { + validationException.addValidationError(MapParsingUtils.invalidUrlErrorMsg(url, settingName, settingScope)); + return null; + } + } + + public static URI createUri(String url) throws IllegalArgumentException { + Objects.requireNonNull(url); + + try { + return new URI(url); + } catch (URISyntaxException e) { + throw new IllegalArgumentException(format("unable to parse url [%s]", url), e); + } + } + + public static SecureString extractRequiredSecureString( + Map map, + String settingName, + String scope, + ValidationException validationException + ) { + String requiredField = extractRequiredString(map, settingName, scope, validationException); + + if (validationException.validationErrors().isEmpty() == false) { + return null; + } + + return new SecureString(Objects.requireNonNull(requiredField).toCharArray()); + } + + public static String extractRequiredString( + Map map, + String settingName, + String scope, + ValidationException validationException + ) { + String requiredField = MapParsingUtils.removeAsType(map, settingName, String.class); + + if (requiredField == null) { + validationException.addValidationError(MapParsingUtils.missingSettingErrorMsg(settingName, scope)); + } else if (requiredField.isEmpty()) { + validationException.addValidationError(MapParsingUtils.mustBeNonEmptyString(settingName, scope)); + } + + if (validationException.validationErrors().isEmpty() == false) { + return null; + } + + return requiredField; + } + + public static String extractOptionalString( + Map map, + String settingName, + String scope, + ValidationException validationException + ) { + String optionalField = MapParsingUtils.removeAsType(map, settingName, String.class); + + if (optionalField != null && optionalField.isEmpty()) { + validationException.addValidationError(MapParsingUtils.mustBeNonEmptyString(settingName, scope)); + } + + if (validationException.validationErrors().isEmpty() == false) { + return null; + } + + return optionalField; + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserSecretSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserSecretSettings.java index f80de0067437b..f2df48366f786 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserSecretSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserSecretSettings.java @@ -16,12 +16,13 @@ import org.elasticsearch.inference.ModelSecrets; import org.elasticsearch.inference.SecretSettings; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.inference.services.MapParsingUtils; import java.io.IOException; import java.util.Map; import java.util.Objects; +import static org.elasticsearch.xpack.inference.services.MapParsingUtils.extractRequiredSecureString; + public record HuggingFaceElserSecretSettings(SecureString apiKey) implements SecretSettings { public static final String NAME = "hugging_face_elser_secret_settings"; @@ -29,21 +30,12 @@ public record HuggingFaceElserSecretSettings(SecureString apiKey) implements Sec public static HuggingFaceElserSecretSettings fromMap(Map map) { ValidationException validationException = new ValidationException(); - - String apiToken = MapParsingUtils.removeAsType(map, API_KEY, String.class); - - if (apiToken == null) { - validationException.addValidationError(MapParsingUtils.missingSettingErrorMsg(API_KEY, ModelSecrets.SECRET_SETTINGS)); - } else if (apiToken.isEmpty()) { - validationException.addValidationError(MapParsingUtils.mustBeNonEmptyString(API_KEY, ModelSecrets.SECRET_SETTINGS)); - } + SecureString secureApiToken = extractRequiredSecureString(map, API_KEY, ModelSecrets.SECRET_SETTINGS, validationException); if (validationException.validationErrors().isEmpty() == false) { throw validationException; } - SecureString secureApiToken = new SecureString(Objects.requireNonNull(apiToken).toCharArray()); - return new HuggingFaceElserSecretSettings(secureApiToken); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java index 9e4407945d775..66168cd13d58c 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java @@ -42,9 +42,6 @@ public class HuggingFaceElserService implements InferenceService { private final SetOnce factory; private final SetOnce serviceComponents; private final AtomicReference sender = new AtomicReference<>(); - // This is initialized once which assumes that the settings will not change. To change the service, it - // should be deleted and then added again - private final AtomicReference action = new AtomicReference<>(); public HuggingFaceElserService(SetOnce factory, SetOnce serviceComponents) { this.factory = Objects.requireNonNull(factory); @@ -107,24 +104,23 @@ public void infer( return; } - try { - init(model); - } catch (Exception e) { - listener.onFailure(new ElasticsearchException("Failed to initialize service", e)); + if (model instanceof HuggingFaceElserModel == false) { + listener.onFailure(new ElasticsearchException("The internal model was invalid")); return; } - action.get().execute(input, listener); + init(); + + HuggingFaceElserModel huggingFaceElserModel = (HuggingFaceElserModel) model; + HuggingFaceElserAction action = new HuggingFaceElserAction(sender.get(), huggingFaceElserModel, serviceComponents.get()); + + action.execute(input, listener); } @Override public void start(Model model, ActionListener listener) { - try { - init(model); - listener.onResponse(true); - } catch (Exception e) { - listener.onFailure(new ElasticsearchException("Failed to start service", e)); - } + init(); + listener.onResponse(true); } @Override @@ -132,21 +128,9 @@ public void close() throws IOException { IOUtils.closeWhileHandlingException(sender.get()); } - private void init(Model model) { - if (model instanceof HuggingFaceElserModel == false) { - throw new IllegalArgumentException("The internal model was invalid"); - } - + private void init() { sender.updateAndGet(current -> Objects.requireNonNullElseGet(current, () -> factory.get().createSender(name()))); sender.get().start(); - - HuggingFaceElserModel huggingFaceElserModel = (HuggingFaceElserModel) model; - action.updateAndGet( - current -> Objects.requireNonNullElseGet( - current, - () -> new HuggingFaceElserAction(sender.get(), huggingFaceElserModel, serviceComponents.get()) - ) - ); } @Override diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserServiceSettings.java index 13f66562f6f83..4b8213909f66b 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserServiceSettings.java @@ -7,8 +7,6 @@ package org.elasticsearch.xpack.inference.services.huggingface.elser; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ValidationException; @@ -17,59 +15,36 @@ import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ServiceSettings; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.inference.services.MapParsingUtils; import java.io.IOException; import java.net.URI; -import java.net.URISyntaxException; import java.util.Map; import java.util.Objects; -import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.inference.services.MapParsingUtils.convertToUri; +import static org.elasticsearch.xpack.inference.services.MapParsingUtils.createUri; +import static org.elasticsearch.xpack.inference.services.MapParsingUtils.extractRequiredString; public record HuggingFaceElserServiceSettings(URI uri) implements ServiceSettings { public static final String NAME = "hugging_face_elser_service_settings"; - private static final Logger logger = LogManager.getLogger(HuggingFaceElserServiceSettings.class); static final String URL = "url"; public static HuggingFaceElserServiceSettings fromMap(Map map) { ValidationException validationException = new ValidationException(); - String parsedUrl = MapParsingUtils.removeAsType(map, URL, String.class); - URI uri = convertToUri(parsedUrl, validationException); - + String parsedUrl = extractRequiredString(map, URL, ModelConfigurations.SERVICE_SETTINGS, validationException); if (validationException.validationErrors().isEmpty() == false) { throw validationException; } - return new HuggingFaceElserServiceSettings(uri); - } - - private static URI convertToUri(String url, ValidationException validationException) { - if (url == null) { - validationException.addValidationError(MapParsingUtils.missingSettingErrorMsg(URL, ModelConfigurations.SERVICE_SETTINGS)); - return null; - } + URI uri = convertToUri(parsedUrl, URL, ModelConfigurations.SERVICE_SETTINGS, validationException); - try { - return createUri(url); - } catch (IllegalArgumentException ignored) { - validationException.addValidationError(MapParsingUtils.invalidUrlErrorMsg(url, ModelConfigurations.SERVICE_SETTINGS)); - return null; + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; } - } - - // TODO move this to a common location and potentially improve parsing errors - private static URI createUri(String url) throws IllegalArgumentException { - Objects.requireNonNull(url); - try { - return new URI(url); - } catch (URISyntaxException e) { - logger.info(format("Invalid URL received [%s]", url), e); - throw new IllegalArgumentException(format("unable to parse url [%s]", url), e); - } + return new HuggingFaceElserServiceSettings(uri); } public HuggingFaceElserServiceSettings { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiModel.java new file mode 100644 index 0000000000000..97823e3bc9079 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiModel.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.openai; + +import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.external.action.openai.OpenAiActionVisitor; + +import java.util.Map; + +public abstract class OpenAiModel extends Model { + + public OpenAiModel(ModelConfigurations configurations, ModelSecrets secrets) { + super(configurations, secrets); + } + + public abstract ExecutableAction accept(OpenAiActionVisitor creator, Map taskSettings); +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java new file mode 100644 index 0000000000000..85abba70a3d03 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java @@ -0,0 +1,171 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.openai; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.core.IOUtils; +import org.elasticsearch.inference.InferenceResults; +import org.elasticsearch.inference.InferenceService; +import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.inference.external.action.openai.OpenAiActionCreator; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.services.ServiceComponents; +import org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsModel; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; + +import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.inference.services.MapParsingUtils.removeFromMapOrThrowIfNull; +import static org.elasticsearch.xpack.inference.services.MapParsingUtils.throwIfNotEmptyMap; + +public class OpenAiService implements InferenceService { + public static final String NAME = "openai"; + + private final SetOnce factory; + private final SetOnce serviceComponents; + private final AtomicReference sender = new AtomicReference<>(); + + public OpenAiService(SetOnce factory, SetOnce serviceComponents) { + this.factory = Objects.requireNonNull(factory); + this.serviceComponents = Objects.requireNonNull(serviceComponents); + } + + @Override + public String name() { + return NAME; + } + + @Override + public OpenAiModel parseRequestConfig( + String modelId, + TaskType taskType, + Map config, + Set platformArchitectures + ) { + Map serviceSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.SERVICE_SETTINGS); + Map taskSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.TASK_SETTINGS); + + OpenAiModel model = createModel( + modelId, + taskType, + serviceSettingsMap, + taskSettingsMap, + serviceSettingsMap, + TaskType.unsupportedTaskTypeErrorMsg(taskType, NAME) + ); + + throwIfNotEmptyMap(config, NAME); + throwIfNotEmptyMap(serviceSettingsMap, NAME); + throwIfNotEmptyMap(taskSettingsMap, NAME); + + return model; + } + + private OpenAiModel createModel( + String modelId, + TaskType taskType, + Map serviceSettings, + Map taskSettings, + Map secretSettings, + String failureMessage + ) { + return switch (taskType) { + case TEXT_EMBEDDING -> new OpenAiEmbeddingsModel(modelId, taskType, NAME, serviceSettings, taskSettings, secretSettings); + default -> throw new ElasticsearchStatusException(failureMessage, RestStatus.BAD_REQUEST); + }; + } + + @Override + public OpenAiModel parsePersistedConfig(String modelId, TaskType taskType, Map config, Map secrets) { + Map serviceSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.SERVICE_SETTINGS); + Map taskSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.TASK_SETTINGS); + Map secretSettingsMap = removeFromMapOrThrowIfNull(secrets, ModelSecrets.SECRET_SETTINGS); + + OpenAiModel model = createModel( + modelId, + taskType, + serviceSettingsMap, + taskSettingsMap, + secretSettingsMap, + format("Failed to parse stored model [%s] for [%s] service, please delete and add the service again", modelId, NAME) + ); + + throwIfNotEmptyMap(config, NAME); + throwIfNotEmptyMap(secrets, NAME); + throwIfNotEmptyMap(serviceSettingsMap, NAME); + throwIfNotEmptyMap(taskSettingsMap, NAME); + throwIfNotEmptyMap(secretSettingsMap, NAME); + + return model; + } + + @Override + public void infer( + Model model, + List input, + Map taskSettings, + ActionListener> listener + ) { + init(); + + if (model instanceof OpenAiModel == false) { + listener.onFailure( + new ElasticsearchStatusException( + format( + "The internal model was invalid, please delete the service [%s] with id [%s] and add it again.", + model.getConfigurations().getService(), + model.getConfigurations().getModelId() + ), + RestStatus.INTERNAL_SERVER_ERROR + ) + ); + return; + } + + OpenAiModel openAiModel = (OpenAiModel) model; + var actionCreator = new OpenAiActionCreator(sender.get(), serviceComponents.get()); + + var action = openAiModel.accept(actionCreator, taskSettings); + action.execute(input, listener); + } + + @Override + public void start(Model model, ActionListener listener) { + init(); + listener.onResponse(true); + } + + @Override + public void close() throws IOException { + IOUtils.closeWhileHandlingException(sender.get()); + } + + private void init() { + sender.updateAndGet(current -> Objects.requireNonNullElseGet(current, () -> factory.get().createSender(name()))); + sender.get().start(); + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.ML_INFERENCE_OPENAI_ADDED; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceSettings.java new file mode 100644 index 0000000000000..adb947b01691e --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceSettings.java @@ -0,0 +1,112 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.openai; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ServiceSettings; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.net.URI; +import java.util.Map; + +import static org.elasticsearch.xpack.inference.services.MapParsingUtils.convertToUri; +import static org.elasticsearch.xpack.inference.services.MapParsingUtils.createUri; +import static org.elasticsearch.xpack.inference.services.MapParsingUtils.extractOptionalString; + +/** + * Defines the base settings for interacting with OpenAI. + * @param uri an optional uri to override the openai url. This should only be used for testing. + */ +public record OpenAiServiceSettings(@Nullable URI uri, @Nullable String organizationId) implements ServiceSettings { + + public static final String NAME = "openai_service_settings"; + + public static final String URL = "url"; + public static final String ORGANIZATION = "organization_id"; + + public static OpenAiServiceSettings fromMap(Map map) { + ValidationException validationException = new ValidationException(); + + String url = extractOptionalString(map, URL, ModelConfigurations.SERVICE_SETTINGS, validationException); + String organizationId = extractOptionalString(map, ORGANIZATION, ModelConfigurations.SERVICE_SETTINGS, validationException); + + // Throw if any of the settings were empty strings + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + // the url is optional and only for testing + if (url == null) { + return new OpenAiServiceSettings((URI) null, organizationId); + } + + URI uri = convertToUri(url, URL, ModelConfigurations.SERVICE_SETTINGS, validationException); + + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + return new OpenAiServiceSettings(uri, organizationId); + } + + public OpenAiServiceSettings(@Nullable String url, @Nullable String organizationId) { + this(createOptionalUri(url), organizationId); + } + + private static URI createOptionalUri(String url) { + if (url == null) { + return null; + } + + return createUri(url); + } + + public OpenAiServiceSettings(StreamInput in) throws IOException { + this(in.readOptionalString(), in.readOptionalString()); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + if (uri != null) { + builder.field(URL, uri.toString()); + } + + if (organizationId != null) { + builder.field(ORGANIZATION, organizationId); + } + + builder.endObject(); + return builder; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.ML_INFERENCE_OPENAI_ADDED; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + var uriToWrite = uri != null ? uri.toString() : null; + out.writeOptionalString(uriToWrite); + out.writeOptionalString(organizationId); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsModel.java new file mode 100644 index 0000000000000..210b84d8ca31e --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsModel.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.openai.embeddings; + +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.external.action.openai.OpenAiActionVisitor; +import org.elasticsearch.xpack.inference.services.openai.OpenAiModel; +import org.elasticsearch.xpack.inference.services.openai.OpenAiServiceSettings; +import org.elasticsearch.xpack.inference.services.settings.DefaultSecretSettings; + +import java.util.Map; + +public class OpenAiEmbeddingsModel extends OpenAiModel { + public OpenAiEmbeddingsModel( + String modelId, + TaskType taskType, + String service, + Map serviceSettings, + Map taskSettings, + Map secrets + ) { + this( + modelId, + taskType, + service, + OpenAiServiceSettings.fromMap(serviceSettings), + OpenAiEmbeddingsTaskSettings.fromMap(taskSettings), + DefaultSecretSettings.fromMap(secrets) + ); + } + + // Should only be used directly for testing + OpenAiEmbeddingsModel( + String modelId, + TaskType taskType, + String service, + OpenAiServiceSettings serviceSettings, + OpenAiEmbeddingsTaskSettings taskSettings, + DefaultSecretSettings secrets + ) { + super(new ModelConfigurations(modelId, taskType, service, serviceSettings, taskSettings), new ModelSecrets(secrets)); + } + + private OpenAiEmbeddingsModel(OpenAiEmbeddingsModel originalModel, OpenAiEmbeddingsTaskSettings taskSettings) { + super( + new ModelConfigurations( + originalModel.getConfigurations().getModelId(), + originalModel.getConfigurations().getTaskType(), + originalModel.getConfigurations().getService(), + originalModel.getServiceSettings(), + taskSettings + ), + new ModelSecrets(originalModel.getSecretSettings()) + ); + } + + @Override + public OpenAiServiceSettings getServiceSettings() { + return (OpenAiServiceSettings) super.getServiceSettings(); + } + + @Override + public OpenAiEmbeddingsTaskSettings getTaskSettings() { + return (OpenAiEmbeddingsTaskSettings) super.getTaskSettings(); + } + + @Override + public DefaultSecretSettings getSecretSettings() { + return (DefaultSecretSettings) super.getSecretSettings(); + } + + @Override + public ExecutableAction accept(OpenAiActionVisitor creator, Map taskSettings) { + return creator.create(this, taskSettings); + } + + public OpenAiEmbeddingsModel overrideWith(Map taskSettings) { + var requestTaskSettings = OpenAiEmbeddingsRequestTaskSettings.fromMap(taskSettings); + + return new OpenAiEmbeddingsModel(this, getTaskSettings().overrideWith(requestTaskSettings)); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsRequestTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsRequestTaskSettings.java new file mode 100644 index 0000000000000..4933717192266 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsRequestTaskSettings.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.openai.embeddings; + +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.inference.ModelConfigurations; + +import java.util.Map; + +import static org.elasticsearch.xpack.inference.services.MapParsingUtils.extractOptionalString; +import static org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsTaskSettings.MODEL; +import static org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsTaskSettings.USER; + +/** + * This class handles extracting OpenAI task settings from a request. The difference between this class and + * {@link OpenAiEmbeddingsTaskSettings} is that this class considers all fields as optional. It will not throw an error if a field + * is missing. This allows overriding persistent task settings. + * @param model the name of the model to use with this request + * @param user a unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse + */ +public record OpenAiEmbeddingsRequestTaskSettings(String model, String user) { + public static final OpenAiEmbeddingsRequestTaskSettings EMPTY_SETTINGS = new OpenAiEmbeddingsRequestTaskSettings(null, null); + + /** + * Extracts the task settings from a map. All settings are considered optional and the absence of a setting + * does not throw an error. + * @param map the settings received from a request + * @return a {@link OpenAiEmbeddingsRequestTaskSettings} + */ + public static OpenAiEmbeddingsRequestTaskSettings fromMap(Map map) { + if (map.isEmpty()) { + return OpenAiEmbeddingsRequestTaskSettings.EMPTY_SETTINGS; + } + + ValidationException validationException = new ValidationException(); + + String model = extractOptionalString(map, MODEL, ModelConfigurations.TASK_SETTINGS, validationException); + String user = extractOptionalString(map, USER, ModelConfigurations.TASK_SETTINGS, validationException); + + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + return new OpenAiEmbeddingsRequestTaskSettings(model, user); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettings.java new file mode 100644 index 0000000000000..05781c03f9cb0 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettings.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.openai.embeddings; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.TaskSettings; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.services.MapParsingUtils.extractOptionalString; +import static org.elasticsearch.xpack.inference.services.MapParsingUtils.extractRequiredString; + +/** + * Defines the task settings for the openai service. + * + * @param model the id of the model to use in the requests to openai + * @param user an optional unique identifier representing the end-user, which can help OpenAI to monitor and detect abuse + * see the openai docs for more details + */ +public record OpenAiEmbeddingsTaskSettings(String model, @Nullable String user) implements TaskSettings { + + public static final String NAME = "openai_embeddings_task_settings"; + public static final String MODEL = "model"; + public static final String USER = "user"; + + public static OpenAiEmbeddingsTaskSettings fromMap(Map map) { + ValidationException validationException = new ValidationException(); + + String model = extractRequiredString(map, MODEL, ModelConfigurations.TASK_SETTINGS, validationException); + String user = extractOptionalString(map, USER, ModelConfigurations.TASK_SETTINGS, validationException); + + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + return new OpenAiEmbeddingsTaskSettings(model, user); + } + + public OpenAiEmbeddingsTaskSettings { + Objects.requireNonNull(model); + } + + public OpenAiEmbeddingsTaskSettings(StreamInput in) throws IOException { + this(in.readString(), in.readOptionalString()); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(MODEL, model); + if (user != null) { + builder.field(USER, user); + } + builder.endObject(); + return builder; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.ML_INFERENCE_OPENAI_ADDED; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(model); + out.writeOptionalString(user); + } + + public OpenAiEmbeddingsTaskSettings overrideWith(OpenAiEmbeddingsRequestTaskSettings requestSettings) { + var modelToUse = requestSettings.model() == null ? model : requestSettings.model(); + var userToUse = requestSettings.user() == null ? user : requestSettings.user(); + + return new OpenAiEmbeddingsTaskSettings(modelToUse, userToUse); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/settings/DefaultSecretSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/settings/DefaultSecretSettings.java new file mode 100644 index 0000000000000..3ad29d56a88be --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/settings/DefaultSecretSettings.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.settings; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.SecretSettings; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.services.MapParsingUtils.extractRequiredSecureString; + +/** + * Contains secret settings that are common to all services. + * @param apiKey the key used to authenticate with the 3rd party service + */ +public record DefaultSecretSettings(SecureString apiKey) implements SecretSettings { + public static final String NAME = "default_secret_settings"; + + static final String API_KEY = "api_key"; + + public static DefaultSecretSettings fromMap(Map map) { + ValidationException validationException = new ValidationException(); + SecureString secureApiToken = extractRequiredSecureString(map, API_KEY, ModelSecrets.SECRET_SETTINGS, validationException); + + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + return new DefaultSecretSettings(secureApiToken); + } + + public DefaultSecretSettings { + Objects.requireNonNull(apiKey); + } + + public DefaultSecretSettings(StreamInput in) throws IOException { + this(in.readSecureString()); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(API_KEY, apiKey.toString()); + builder.endObject(); + return builder; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.ML_INFERENCE_OPENAI_ADDED; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeSecureString(apiKey); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionCreatorTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionCreatorTests.java new file mode 100644 index 0000000000000..7bdc7d406a309 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionCreatorTests.java @@ -0,0 +1,125 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.action.openai; + +import org.apache.http.HttpHeaders; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceResults; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.http.MockResponse; +import org.elasticsearch.test.http.MockWebServer; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.inference.external.http.HttpClientManager; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.logging.ThrottlerManager; +import org.elasticsearch.xpack.inference.results.TextEmbeddingResults; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; +import static org.elasticsearch.xpack.inference.external.http.Utils.getUrl; +import static org.elasticsearch.xpack.inference.external.http.Utils.inferenceUtilityPool; +import static org.elasticsearch.xpack.inference.external.http.Utils.mockClusterServiceEmpty; +import static org.elasticsearch.xpack.inference.external.request.openai.OpenAiUtils.ORGANIZATION_HEADER; +import static org.elasticsearch.xpack.inference.services.ServiceComponentsTests.createWithEmptySettings; +import static org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsModelTests.createModel; +import static org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsRequestTaskSettingsTests.getRequestTaskSettingsMap; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; + +public class OpenAiActionCreatorTests extends ESTestCase { + private static final TimeValue TIMEOUT = new TimeValue(30, TimeUnit.SECONDS); + private final MockWebServer webServer = new MockWebServer(); + private ThreadPool threadPool; + private HttpClientManager clientManager; + + @Before + public void init() throws Exception { + webServer.start(); + threadPool = createThreadPool(inferenceUtilityPool()); + clientManager = HttpClientManager.create(Settings.EMPTY, threadPool, mockClusterServiceEmpty(), mock(ThrottlerManager.class)); + } + + @After + public void shutdown() throws IOException { + clientManager.close(); + terminate(threadPool); + webServer.close(); + } + + public void testCreate_OpenAiEmbeddingsModel() throws IOException { + var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + + try (var sender = senderFactory.createSender("test_service")) { + sender.start(); + + String responseJson = """ + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + 0.0123, + -0.0123 + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); + + var model = createModel(getUrl(webServer), "org", "secret", "model", "user"); + var actionCreator = new OpenAiActionCreator(sender, createWithEmptySettings(threadPool)); + var overriddenTaskSettings = getRequestTaskSettingsMap(null, "overridden_user"); + var action = actionCreator.create(model, overriddenTaskSettings); + + PlainActionFuture> listener = new PlainActionFuture<>(); + action.execute(List.of("abc"), listener); + + InferenceResults result = listener.actionGet(TIMEOUT).get(0); + + assertThat( + result.asMap(), + is( + Map.of( + TextEmbeddingResults.TEXT_EMBEDDING, + List.of(Map.of(TextEmbeddingResults.Embedding.EMBEDDING, List.of(0.0123F, -0.0123F))) + ) + ) + ); + assertThat(webServer.requests(), hasSize(1)); + assertNull(webServer.requests().get(0).getUri().getQuery()); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.AUTHORIZATION), equalTo("Bearer secret")); + assertThat(webServer.requests().get(0).getHeader(ORGANIZATION_HEADER), equalTo("org")); + + var requestMap = entityAsMap(webServer.requests().get(0).getBody()); + assertThat(requestMap.size(), is(3)); + assertThat(requestMap.get("input"), is(List.of("abc"))); + assertThat(requestMap.get("model"), is("model")); + assertThat(requestMap.get("user"), is("overridden_user")); + } + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiEmbeddingsActionTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiEmbeddingsActionTests.java new file mode 100644 index 0000000000000..d1bcadf8e30c8 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiEmbeddingsActionTests.java @@ -0,0 +1,238 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.action.openai; + +import org.apache.http.HttpHeaders; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceResults; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.http.MockResponse; +import org.elasticsearch.test.http.MockWebServer; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.inference.external.http.HttpClientManager; +import org.elasticsearch.xpack.inference.external.http.HttpResult; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.logging.ThrottlerManager; +import org.elasticsearch.xpack.inference.results.TextEmbeddingResults; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; +import static org.elasticsearch.xpack.inference.external.http.Utils.getUrl; +import static org.elasticsearch.xpack.inference.external.http.Utils.inferenceUtilityPool; +import static org.elasticsearch.xpack.inference.external.http.Utils.mockClusterServiceEmpty; +import static org.elasticsearch.xpack.inference.external.request.openai.OpenAiUtils.ORGANIZATION_HEADER; +import static org.elasticsearch.xpack.inference.services.ServiceComponentsTests.createWithEmptySettings; +import static org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsModelTests.createModel; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; + +public class OpenAiEmbeddingsActionTests extends ESTestCase { + private static final TimeValue TIMEOUT = new TimeValue(30, TimeUnit.SECONDS); + private final MockWebServer webServer = new MockWebServer(); + private ThreadPool threadPool; + private HttpClientManager clientManager; + + @Before + public void init() throws Exception { + webServer.start(); + threadPool = createThreadPool(inferenceUtilityPool()); + clientManager = HttpClientManager.create(Settings.EMPTY, threadPool, mockClusterServiceEmpty(), mock(ThrottlerManager.class)); + } + + @After + public void shutdown() throws IOException { + clientManager.close(); + terminate(threadPool); + webServer.close(); + } + + public void testExecute_ReturnsSuccessfulResponse() throws IOException { + var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + + try (var sender = senderFactory.createSender("test_service")) { + sender.start(); + + String responseJson = """ + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + 0.0123, + -0.0123 + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); + + var action = createAction(getUrl(webServer), "org", "secret", "model", "user", sender); + + PlainActionFuture> listener = new PlainActionFuture<>(); + action.execute(List.of("abc"), listener); + + InferenceResults result = listener.actionGet(TIMEOUT).get(0); + + assertThat( + result.asMap(), + is( + Map.of( + TextEmbeddingResults.TEXT_EMBEDDING, + List.of(Map.of(TextEmbeddingResults.Embedding.EMBEDDING, List.of(0.0123F, -0.0123F))) + ) + ) + ); + assertThat(webServer.requests(), hasSize(1)); + assertNull(webServer.requests().get(0).getUri().getQuery()); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.AUTHORIZATION), equalTo("Bearer secret")); + assertThat(webServer.requests().get(0).getHeader(ORGANIZATION_HEADER), equalTo("org")); + + var requestMap = entityAsMap(webServer.requests().get(0).getBody()); + assertThat(requestMap.size(), is(3)); + assertThat(requestMap.get("input"), is(List.of("abc"))); + assertThat(requestMap.get("model"), is("model")); + assertThat(requestMap.get("user"), is("user")); + } + } + + public void testExecute_ThrowsURISyntaxException_ForInvalidUrl() throws IOException { + try (var sender = mock(Sender.class)) { + var thrownException = expectThrows( + IllegalArgumentException.class, + () -> createAction("^^", "org", "secret", "model", "user", sender) + ); + assertThat(thrownException.getMessage(), is("unable to parse url [^^]")); + } + } + + public void testExecute_ThrowsElasticsearchException() { + var sender = mock(Sender.class); + doThrow(new ElasticsearchException("failed")).when(sender).send(any(), any()); + + var action = createAction(getUrl(webServer), "org", "secret", "model", "user", sender); + + PlainActionFuture> listener = new PlainActionFuture<>(); + action.execute(List.of("abc"), listener); + + var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + + assertThat(thrownException.getMessage(), is("failed")); + } + + public void testExecute_ThrowsElasticsearchException_WhenSenderOnFailureIsCalled() { + var sender = mock(Sender.class); + + doAnswer(invocation -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + listener.onFailure(new IllegalStateException("failed")); + + return Void.TYPE; + }).when(sender).send(any(), any()); + + var action = createAction(getUrl(webServer), "org", "secret", "model", "user", sender); + + PlainActionFuture> listener = new PlainActionFuture<>(); + action.execute(List.of("abc"), listener); + + var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + + assertThat(thrownException.getMessage(), is(format("Failed to send OpenAI embeddings request to [%s]", getUrl(webServer)))); + } + + public void testExecute_ThrowsElasticsearchException_WhenSenderOnFailureIsCalled_WhenUrlIsNull() { + var sender = mock(Sender.class); + + doAnswer(invocation -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + listener.onFailure(new IllegalStateException("failed")); + + return Void.TYPE; + }).when(sender).send(any(), any()); + + var action = createAction(null, "org", "secret", "model", "user", sender); + + PlainActionFuture> listener = new PlainActionFuture<>(); + action.execute(List.of("abc"), listener); + + var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + + assertThat(thrownException.getMessage(), is("Failed to send OpenAI embeddings request")); + } + + public void testExecute_ThrowsException() { + var sender = mock(Sender.class); + doThrow(new IllegalArgumentException("failed")).when(sender).send(any(), any()); + + var action = createAction(getUrl(webServer), "org", "secret", "model", "user", sender); + + PlainActionFuture> listener = new PlainActionFuture<>(); + action.execute(List.of("abc"), listener); + + var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + + assertThat(thrownException.getMessage(), is(format("Failed to send OpenAI embeddings request to [%s]", getUrl(webServer)))); + } + + public void testExecute_ThrowsExceptionWithNullUrl() { + var sender = mock(Sender.class); + doThrow(new IllegalArgumentException("failed")).when(sender).send(any(), any()); + + var action = createAction(null, "org", "secret", "model", "user", sender); + + PlainActionFuture> listener = new PlainActionFuture<>(); + action.execute(List.of("abc"), listener); + + var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + + assertThat(thrownException.getMessage(), is("Failed to send OpenAI embeddings request")); + } + + private OpenAiEmbeddingsAction createAction( + String url, + String org, + String apiKey, + String modelName, + @Nullable String user, + Sender sender + ) { + var model = createModel(url, org, apiKey, modelName, user); + + return new OpenAiEmbeddingsAction(sender, model, createWithEmptySettings(threadPool)); + } + +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderFactoryTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderFactoryTests.java index 82c41794695fd..af4ac7cd59977 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderFactoryTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderFactoryTests.java @@ -39,6 +39,7 @@ import static org.elasticsearch.xpack.inference.external.http.Utils.mockClusterServiceEmpty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.any; @@ -117,7 +118,10 @@ public void testHttpRequestSender_Throws_WhenATimeoutOccurs() throws Exception { var senderFactory = new HttpRequestSenderFactory(threadPool, mockManager, mockClusterServiceEmpty(), Settings.EMPTY); try (var sender = senderFactory.createSender("test_service")) { - sender.setMaxRequestTimeout(TimeValue.timeValueNanos(1)); + assertThat(sender, instanceOf(HttpRequestSenderFactory.HttpRequestSender.class)); + // hack to get around the sender interface so we can set the timeout directly + var httpSender = (HttpRequestSenderFactory.HttpRequestSender) sender; + httpSender.setMaxRequestTimeout(TimeValue.timeValueNanos(1)); sender.start(); PlainActionFuture listener = new PlainActionFuture<>(); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/openai/OpenAiClientTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/openai/OpenAiClientTests.java new file mode 100644 index 0000000000000..534941dbe05fb --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/openai/OpenAiClientTests.java @@ -0,0 +1,316 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.openai; + +import org.apache.http.HttpHeaders; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceResults; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.http.MockResponse; +import org.elasticsearch.test.http.MockWebServer; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.inference.external.http.HttpClientManager; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.results.TextEmbeddingResults; +import org.elasticsearch.xpack.inference.services.ServiceComponents; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; +import static org.elasticsearch.xpack.inference.external.http.Utils.getUrl; +import static org.elasticsearch.xpack.inference.external.http.Utils.inferenceUtilityPool; +import static org.elasticsearch.xpack.inference.external.http.Utils.mockClusterServiceEmpty; +import static org.elasticsearch.xpack.inference.external.http.retry.RetrySettingsTests.buildSettingsWithRetryFields; +import static org.elasticsearch.xpack.inference.external.request.openai.OpenAiEmbeddingsRequestTests.createRequest; +import static org.elasticsearch.xpack.inference.external.request.openai.OpenAiUtils.ORGANIZATION_HEADER; +import static org.elasticsearch.xpack.inference.logging.ThrottlerManagerTests.mockThrottlerManager; +import static org.elasticsearch.xpack.inference.services.ServiceComponentsTests.createWithEmptySettings; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; + +public class OpenAiClientTests extends ESTestCase { + private static final TimeValue TIMEOUT = new TimeValue(30, TimeUnit.SECONDS); + private final MockWebServer webServer = new MockWebServer(); + private ThreadPool threadPool; + private HttpClientManager clientManager; + + @Before + public void init() throws Exception { + webServer.start(); + threadPool = createThreadPool(inferenceUtilityPool()); + clientManager = HttpClientManager.create(Settings.EMPTY, threadPool, mockClusterServiceEmpty(), mockThrottlerManager()); + } + + @After + public void shutdown() throws IOException { + clientManager.close(); + terminate(threadPool); + webServer.close(); + } + + public void testSend_SuccessfulResponse() throws IOException, URISyntaxException { + var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + + try (var sender = senderFactory.createSender("test_service")) { + sender.start(); + + String responseJson = """ + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + 0.0123, + -0.0123 + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); + + OpenAiClient openAiClient = new OpenAiClient(sender, createWithEmptySettings(threadPool)); + + PlainActionFuture> listener = new PlainActionFuture<>(); + openAiClient.send(createRequest(getUrl(webServer), "org", "secret", "abc", "model", "user"), listener); + + InferenceResults result = listener.actionGet(TIMEOUT).get(0); + + assertThat( + result.asMap(), + is( + Map.of( + TextEmbeddingResults.TEXT_EMBEDDING, + List.of(Map.of(TextEmbeddingResults.Embedding.EMBEDDING, List.of(0.0123F, -0.0123F))) + ) + ) + ); + + assertThat(webServer.requests(), hasSize(1)); + assertNull(webServer.requests().get(0).getUri().getQuery()); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.AUTHORIZATION), equalTo("Bearer secret")); + assertThat(webServer.requests().get(0).getHeader(ORGANIZATION_HEADER), equalTo("org")); + + var requestMap = entityAsMap(webServer.requests().get(0).getBody()); + assertThat(requestMap.size(), is(3)); + assertThat(requestMap.get("input"), is(List.of("abc"))); + assertThat(requestMap.get("model"), is("model")); + assertThat(requestMap.get("user"), is("user")); + } + } + + public void testSend_SuccessfulResponse_WithoutUser() throws IOException, URISyntaxException { + var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + + try (var sender = senderFactory.createSender("test_service")) { + sender.start(); + + String responseJson = """ + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + 0.0123, + -0.0123 + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); + + OpenAiClient openAiClient = new OpenAiClient(sender, createWithEmptySettings(threadPool)); + + PlainActionFuture> listener = new PlainActionFuture<>(); + openAiClient.send(createRequest(getUrl(webServer), "org", "secret", "abc", "model", null), listener); + + InferenceResults result = listener.actionGet(TIMEOUT).get(0); + + assertThat( + result.asMap(), + is( + Map.of( + TextEmbeddingResults.TEXT_EMBEDDING, + List.of(Map.of(TextEmbeddingResults.Embedding.EMBEDDING, List.of(0.0123F, -0.0123F))) + ) + ) + ); + + assertThat(webServer.requests(), hasSize(1)); + assertNull(webServer.requests().get(0).getUri().getQuery()); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.AUTHORIZATION), equalTo("Bearer secret")); + assertThat(webServer.requests().get(0).getHeader(ORGANIZATION_HEADER), equalTo("org")); + + var requestMap = entityAsMap(webServer.requests().get(0).getBody()); + assertThat(requestMap.size(), is(2)); + assertThat(requestMap.get("input"), is(List.of("abc"))); + assertThat(requestMap.get("model"), is("model")); + } + } + + public void testSend_SuccessfulResponse_WithoutOrganization() throws IOException, URISyntaxException { + var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + + try (var sender = senderFactory.createSender("test_service")) { + sender.start(); + + String responseJson = """ + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + 0.0123, + -0.0123 + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); + + OpenAiClient openAiClient = new OpenAiClient(sender, createWithEmptySettings(threadPool)); + + PlainActionFuture> listener = new PlainActionFuture<>(); + openAiClient.send(createRequest(getUrl(webServer), null, "secret", "abc", "model", null), listener); + + InferenceResults result = listener.actionGet(TIMEOUT).get(0); + + assertThat( + result.asMap(), + is( + Map.of( + TextEmbeddingResults.TEXT_EMBEDDING, + List.of(Map.of(TextEmbeddingResults.Embedding.EMBEDDING, List.of(0.0123F, -0.0123F))) + ) + ) + ); + + assertThat(webServer.requests(), hasSize(1)); + assertNull(webServer.requests().get(0).getUri().getQuery()); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.AUTHORIZATION), equalTo("Bearer secret")); + assertNull(webServer.requests().get(0).getHeader(ORGANIZATION_HEADER)); + + var requestMap = entityAsMap(webServer.requests().get(0).getBody()); + assertThat(requestMap.size(), is(2)); + assertThat(requestMap.get("input"), is(List.of("abc"))); + assertThat(requestMap.get("model"), is("model")); + } + } + + public void testSend_FailsFromInvalidResponseFormat() throws IOException, URISyntaxException { + var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + + try (var sender = senderFactory.createSender("test_service")) { + sender.start(); + + String responseJson = """ + { + "object": "list", + "data_does_not_exist": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + 0.0123, + -0.0123 + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); + + OpenAiClient openAiClient = new OpenAiClient( + sender, + new ServiceComponents( + threadPool, + mockThrottlerManager(), + // timeout as zero for no retries + buildSettingsWithRetryFields(TimeValue.timeValueMillis(1), TimeValue.timeValueMinutes(1), TimeValue.timeValueSeconds(0)) + ) + ); + + PlainActionFuture> listener = new PlainActionFuture<>(); + openAiClient.send(createRequest(getUrl(webServer), "org", "secret", "abc", "model", "user"), listener); + + var thrownException = expectThrows(IllegalStateException.class, () -> listener.actionGet(TIMEOUT)); + assertThat(thrownException.getMessage(), is(format("Failed to find required field [data] in OpenAI embeddings response"))); + + assertThat(webServer.requests(), hasSize(1)); + assertNull(webServer.requests().get(0).getUri().getQuery()); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.AUTHORIZATION), equalTo("Bearer secret")); + assertThat(webServer.requests().get(0).getHeader(ORGANIZATION_HEADER), equalTo("org")); + + var requestMap = entityAsMap(webServer.requests().get(0).getBody()); + assertThat(requestMap.size(), is(3)); + assertThat(requestMap.get("input"), is(List.of("abc"))); + assertThat(requestMap.get("model"), is("model")); + assertThat(requestMap.get("user"), is("user")); + } + } + + public void testSend_ThrowsException() throws URISyntaxException, IOException { + var sender = mock(Sender.class); + doThrow(new ElasticsearchException("failed")).when(sender).send(any(), any()); + + OpenAiClient openAiClient = new OpenAiClient(sender, createWithEmptySettings(threadPool)); + PlainActionFuture> listener = new PlainActionFuture<>(); + openAiClient.send(createRequest(getUrl(webServer), "org", "secret", "abc", "model", "user"), listener); + + var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + assertThat(thrownException.getMessage(), is("failed")); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/RequestUtilsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/RequestUtilsTests.java new file mode 100644 index 0000000000000..b6690373ec097 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/RequestUtilsTests.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request; + +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.test.ESTestCase; + +import static org.elasticsearch.xpack.inference.external.request.RequestUtils.createAuthBearerHeader; +import static org.hamcrest.Matchers.is; + +public class RequestUtilsTests extends ESTestCase { + public void testCreateAuthBearerHeader() { + var header = createAuthBearerHeader(new SecureString("abc".toCharArray())); + + assertThat(header.getName(), is("Authorization")); + assertThat(header.getValue(), is("Bearer abc")); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiEmbeddingsRequestEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiEmbeddingsRequestEntityTests.java new file mode 100644 index 0000000000000..cedfd04192c0d --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiEmbeddingsRequestEntityTests.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.openai; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.List; + +import static org.hamcrest.CoreMatchers.is; + +public class OpenAiEmbeddingsRequestEntityTests extends ESTestCase { + + public void testXContent_WritesUserWhenDefined() throws IOException { + var entity = new OpenAiEmbeddingsRequestEntity(List.of("abc"), "model", "user"); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + entity.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, is(""" + {"input":["abc"],"model":"model","user":"user"}""")); + } + + public void testXContent_DoesNotWriteUserWhenItIsNull() throws IOException { + var entity = new OpenAiEmbeddingsRequestEntity(List.of("abc"), "model", null); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + entity.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, is(""" + {"input":["abc"],"model":"model"}""")); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiEmbeddingsRequestTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiEmbeddingsRequestTests.java new file mode 100644 index 0000000000000..146601da86dbd --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiEmbeddingsRequestTests.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.openai; + +import org.apache.http.HttpHeaders; +import org.apache.http.client.methods.HttpPost; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.inference.external.openai.OpenAiAccount; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.List; + +import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; +import static org.elasticsearch.xpack.inference.external.request.openai.OpenAiEmbeddingsRequest.buildDefaultUri; +import static org.elasticsearch.xpack.inference.external.request.openai.OpenAiUtils.ORGANIZATION_HEADER; +import static org.hamcrest.Matchers.aMapWithSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; + +public class OpenAiEmbeddingsRequestTests extends ESTestCase { + public void testCreateRequest_WithUrlOrganizationUserDefined() throws URISyntaxException, IOException { + var request = createRequest("www.google.com", "org", "secret", "abc", "model", "user"); + var httpRequest = request.createRequest(); + + assertThat(httpRequest, instanceOf(HttpPost.class)); + var httpPost = (HttpPost) httpRequest; + + assertThat(httpPost.getURI().toString(), is("www.google.com")); + assertThat(httpPost.getLastHeader(HttpHeaders.CONTENT_TYPE).getValue(), is(XContentType.JSON.mediaType())); + assertThat(httpPost.getLastHeader(HttpHeaders.AUTHORIZATION).getValue(), is("Bearer secret")); + assertThat(httpPost.getLastHeader(ORGANIZATION_HEADER).getValue(), is("org")); + + var requestMap = entityAsMap(httpPost.getEntity().getContent()); + assertThat(requestMap, aMapWithSize(3)); + assertThat(requestMap.get("input"), is(List.of("abc"))); + assertThat(requestMap.get("model"), is("model")); + assertThat(requestMap.get("user"), is("user")); + } + + public void testCreateRequest_WithDefaultUrl() throws URISyntaxException, IOException { + var request = createRequest(null, "org", "secret", "abc", "model", "user"); + var httpRequest = request.createRequest(); + + assertThat(httpRequest, instanceOf(HttpPost.class)); + var httpPost = (HttpPost) httpRequest; + + assertThat(httpPost.getURI().toString(), is(buildDefaultUri().toString())); + assertThat(httpPost.getLastHeader(HttpHeaders.CONTENT_TYPE).getValue(), is(XContentType.JSON.mediaType())); + assertThat(httpPost.getLastHeader(HttpHeaders.AUTHORIZATION).getValue(), is("Bearer secret")); + assertThat(httpPost.getLastHeader(ORGANIZATION_HEADER).getValue(), is("org")); + + var requestMap = entityAsMap(httpPost.getEntity().getContent()); + assertThat(requestMap, aMapWithSize(3)); + assertThat(requestMap.get("input"), is(List.of("abc"))); + assertThat(requestMap.get("model"), is("model")); + assertThat(requestMap.get("user"), is("user")); + } + + public void testCreateRequest_WithDefaultUrlAndWithoutUserOrganization() throws URISyntaxException, IOException { + var request = createRequest(null, null, "secret", "abc", "model", null); + var httpRequest = request.createRequest(); + + assertThat(httpRequest, instanceOf(HttpPost.class)); + var httpPost = (HttpPost) httpRequest; + + assertThat(httpPost.getURI().toString(), is(buildDefaultUri().toString())); + assertThat(httpPost.getLastHeader(HttpHeaders.CONTENT_TYPE).getValue(), is(XContentType.JSON.mediaType())); + assertThat(httpPost.getLastHeader(HttpHeaders.AUTHORIZATION).getValue(), is("Bearer secret")); + assertNull(httpPost.getLastHeader(ORGANIZATION_HEADER)); + + var requestMap = entityAsMap(httpPost.getEntity().getContent()); + assertThat(requestMap, aMapWithSize(2)); + assertThat(requestMap.get("input"), is(List.of("abc"))); + assertThat(requestMap.get("model"), is("model")); + } + + public static OpenAiEmbeddingsRequest createRequest( + @Nullable String url, + @Nullable String org, + String apiKey, + String input, + String model, + @Nullable String user + ) throws URISyntaxException { + var uri = url == null ? null : new URI(url); + + var account = new OpenAiAccount(uri, org, new SecureString(apiKey.toCharArray())); + var entity = new OpenAiEmbeddingsRequestEntity(List.of(input), model, user); + + return new OpenAiEmbeddingsRequest(account, entity); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/huggingface/HuggingFaceElserResponseEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/huggingface/HuggingFaceElserResponseEntityTests.java index 716c4520c7ee4..9f7f878632d92 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/huggingface/HuggingFaceElserResponseEntityTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/huggingface/HuggingFaceElserResponseEntityTests.java @@ -134,7 +134,7 @@ public void testFails_ValueString() { ); } - public void testFails_ValueInt() throws IOException { + public void testFromResponse_CreatesResultsWithValueInt() throws IOException { String responseJson = """ [ { @@ -155,7 +155,7 @@ public void testFails_ValueInt() throws IOException { assertFalse(parsedResults.isTruncated()); } - public void testFails_ValueLong() throws IOException { + public void testFromResponse_CreatesResultsWithValueLong() throws IOException { String responseJson = """ [ { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/openai/OpenAiEmbeddingsResponseEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/openai/OpenAiEmbeddingsResponseEntityTests.java new file mode 100644 index 0000000000000..a3ec162b05ec8 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/openai/OpenAiEmbeddingsResponseEntityTests.java @@ -0,0 +1,353 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.response.openai; + +import org.apache.http.HttpResponse; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.inference.external.http.HttpResult; +import org.elasticsearch.xpack.inference.results.TextEmbeddingResults; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.List; + +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; + +public class OpenAiEmbeddingsResponseEntityTests extends ESTestCase { + public void testFromResponse_CreatesResultsForASingleItem() throws IOException { + String responseJson = """ + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + 0.014539449, + -0.015288644 + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + + TextEmbeddingResults parsedResults = OpenAiEmbeddingsResponseEntity.fromResponse( + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ); + + assertThat(parsedResults.embeddings(), is(List.of(new TextEmbeddingResults.Embedding(List.of(0.014539449F, -0.015288644F))))); + } + + public void testFromResponse_CreatesResultsForMultipleItems() throws IOException { + String responseJson = """ + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + 0.014539449, + -0.015288644 + ] + }, + { + "object": "embedding", + "index": 1, + "embedding": [ + 0.0123, + -0.0123 + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + + TextEmbeddingResults parsedResults = OpenAiEmbeddingsResponseEntity.fromResponse( + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ); + + assertThat( + parsedResults.embeddings(), + is( + List.of( + new TextEmbeddingResults.Embedding(List.of(0.014539449F, -0.015288644F)), + new TextEmbeddingResults.Embedding(List.of(0.0123F, -0.0123F)) + ) + ) + ); + } + + public void testFromResponse_FailsWhenDataFieldIsNotPresent() { + String responseJson = """ + { + "object": "list", + "not_data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + 0.014539449, + -0.015288644 + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + + var thrownException = expectThrows( + IllegalStateException.class, + () -> OpenAiEmbeddingsResponseEntity.fromResponse( + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ) + ); + + assertThat(thrownException.getMessage(), is("Failed to find required field [data] in OpenAI embeddings response")); + } + + public void testFromResponse_FailsWhenDataFieldNotAnArray() { + String responseJson = """ + { + "object": "list", + "data": { + "test": { + "object": "embedding", + "index": 0, + "embedding": [ + 0.014539449, + -0.015288644 + ] + } + }, + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + + var thrownException = expectThrows( + ParsingException.class, + () -> OpenAiEmbeddingsResponseEntity.fromResponse( + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ) + ); + + assertThat( + thrownException.getMessage(), + is("Failed to parse object: expecting token of type [START_ARRAY] but found [START_OBJECT]") + ); + } + + public void testFromResponse_FailsWhenEmbeddingsDoesNotExist() { + String responseJson = """ + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embeddingzzz": [ + 0.014539449, + -0.015288644 + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + + var thrownException = expectThrows( + IllegalStateException.class, + () -> OpenAiEmbeddingsResponseEntity.fromResponse( + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ) + ); + + assertThat(thrownException.getMessage(), is("Failed to find required field [embedding] in OpenAI embeddings response")); + } + + public void testFromResponse_FailsWhenEmbeddingValueIsAString() { + String responseJson = """ + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + "abc" + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + + var thrownException = expectThrows( + ParsingException.class, + () -> OpenAiEmbeddingsResponseEntity.fromResponse( + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ) + ); + + assertThat( + thrownException.getMessage(), + is("Failed to parse object: expecting token of type [VALUE_NUMBER] but found [VALUE_STRING]") + ); + } + + public void testFromResponse_SucceedsWhenEmbeddingValueIsInt() throws IOException { + String responseJson = """ + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + 1 + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + + TextEmbeddingResults parsedResults = OpenAiEmbeddingsResponseEntity.fromResponse( + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ); + + assertThat(parsedResults.embeddings(), is(List.of(new TextEmbeddingResults.Embedding(List.of(1.0F))))); + } + + public void testFromResponse_SucceedsWhenEmbeddingValueIsLong() throws IOException { + String responseJson = """ + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + 40294967295 + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + + TextEmbeddingResults parsedResults = OpenAiEmbeddingsResponseEntity.fromResponse( + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ); + + assertThat(parsedResults.embeddings(), is(List.of(new TextEmbeddingResults.Embedding(List.of(4.0294965E10F))))); + } + + public void testFromResponse_FailsWhenEmbeddingValueIsAnObject() { + String responseJson = """ + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + {} + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + + var thrownException = expectThrows( + ParsingException.class, + () -> OpenAiEmbeddingsResponseEntity.fromResponse( + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ) + ); + + assertThat( + thrownException.getMessage(), + is("Failed to parse object: expecting token of type [VALUE_NUMBER] but found [START_OBJECT]") + ); + } + + public void testFromResponse_FailsWhenIsMissingFinalClosingBracket() { + String responseJson = """ + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + {} + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + """; + + var thrownException = expectThrows( + ParsingException.class, + () -> OpenAiEmbeddingsResponseEntity.fromResponse( + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ) + ); + + assertThat( + thrownException.getMessage(), + is("Failed to parse object: expecting token of type [VALUE_NUMBER] but found [START_OBJECT]") + ); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/results/TextEmbeddingResultsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/results/TextEmbeddingResultsTests.java new file mode 100644 index 0000000000000..0f0165875e41c --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/results/TextEmbeddingResultsTests.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.results; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.ToXContentFragment; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.is; + +public class TextEmbeddingResultsTests extends ESTestCase { + public void testToXContent_CreatesTheRightFormatForASingleEmbedding() throws IOException { + var entity = new TextEmbeddingResults(List.of(new TextEmbeddingResults.Embedding(List.of(0.1F)))); + + assertThat( + entity.asMap(), + is(Map.of(TextEmbeddingResults.TEXT_EMBEDDING, List.of(Map.of(TextEmbeddingResults.Embedding.EMBEDDING, List.of(0.1F))))) + ); + + String xContentResult = toJsonString(entity); + assertThat(xContentResult, is(""" + { + "text_embedding" : [ + { + "embedding" : [ + 0.1 + ] + } + ] + }""")); + } + + public void testToXContent_CreatesTheRightFormatForMultipleEmbeddings() throws IOException { + var entity = new TextEmbeddingResults( + List.of(new TextEmbeddingResults.Embedding(List.of(0.1F)), new TextEmbeddingResults.Embedding(List.of(0.2F))) + + ); + + assertThat( + entity.asMap(), + is( + Map.of( + TextEmbeddingResults.TEXT_EMBEDDING, + List.of( + Map.of(TextEmbeddingResults.Embedding.EMBEDDING, List.of(0.1F)), + Map.of(TextEmbeddingResults.Embedding.EMBEDDING, List.of(0.2F)) + ) + ) + ) + ); + + String xContentResult = toJsonString(entity); + assertThat(xContentResult, is(""" + { + "text_embedding" : [ + { + "embedding" : [ + 0.1 + ] + }, + { + "embedding" : [ + 0.2 + ] + } + ] + }""")); + } + + private static String toJsonString(ToXContentFragment entity) throws IOException { + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON).prettyPrint(); + builder.startObject(); + entity.toXContent(builder, null); + builder.endObject(); + + return Strings.toString(builder); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/MapParsingUtilsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/MapParsingUtilsTests.java index 7b693b2ef4c0f..9ff23ea38541d 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/MapParsingUtilsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/MapParsingUtilsTests.java @@ -8,14 +8,21 @@ package org.elasticsearch.xpack.inference.services; import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.common.ValidationException; import org.elasticsearch.test.ESTestCase; import java.util.HashMap; import java.util.Map; +import static org.elasticsearch.xpack.inference.services.MapParsingUtils.convertToUri; +import static org.elasticsearch.xpack.inference.services.MapParsingUtils.createUri; +import static org.elasticsearch.xpack.inference.services.MapParsingUtils.extractOptionalString; +import static org.elasticsearch.xpack.inference.services.MapParsingUtils.extractRequiredSecureString; +import static org.elasticsearch.xpack.inference.services.MapParsingUtils.extractRequiredString; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; public class MapParsingUtilsTests extends ESTestCase { @@ -88,4 +95,148 @@ public void testRemoveAsTypeMissingReturnsNull() { assertNull(MapParsingUtils.removeAsType(new HashMap<>(), "missing", Integer.class)); assertThat(map.entrySet(), hasSize(3)); } + + public void testConvertToUri_CreatesUri() { + var validation = new ValidationException(); + var uri = convertToUri("www.elastic.co", "name", "scope", validation); + + assertNotNull(uri); + assertTrue(validation.validationErrors().isEmpty()); + assertThat(uri.toString(), is("www.elastic.co")); + } + + public void testConvertToUri_ThrowsNullPointerException_WhenPassedNull() { + var validation = new ValidationException(); + expectThrows(NullPointerException.class, () -> convertToUri(null, "name", "scope", validation)); + + assertTrue(validation.validationErrors().isEmpty()); + } + + public void testConvertToUri_AddsValidationError_WhenUrlIsInvalid() { + var validation = new ValidationException(); + var uri = convertToUri("^^", "name", "scope", validation); + + assertNull(uri); + assertThat(validation.validationErrors().size(), is(1)); + assertThat(validation.validationErrors().get(0), is("[scope] Invalid url [^^] received for field [name]")); + } + + public void testCreateUri_CreatesUri() { + var uri = createUri("www.elastic.co"); + + assertNotNull(uri); + assertThat(uri.toString(), is("www.elastic.co")); + } + + public void testCreateUri_ThrowsException_WithInvalidUrl() { + var exception = expectThrows(IllegalArgumentException.class, () -> createUri("^^")); + + assertThat(exception.getMessage(), is("unable to parse url [^^]")); + } + + public void testCreateUri_ThrowsException_WithNullUrl() { + expectThrows(NullPointerException.class, () -> createUri(null)); + } + + public void testExtractRequiredSecureString_CreatesSecureString() { + var validation = new ValidationException(); + Map map = modifiableMap(Map.of("key", "value")); + var secureString = extractRequiredSecureString(map, "key", "scope", validation); + + assertTrue(validation.validationErrors().isEmpty()); + assertNotNull(secureString); + assertThat(secureString.toString(), is("value")); + assertTrue(map.isEmpty()); + } + + public void testExtractRequiredSecureString_AddsException_WhenFieldDoesNotExist() { + var validation = new ValidationException(); + Map map = modifiableMap(Map.of("key", "value")); + var secureString = extractRequiredSecureString(map, "abc", "scope", validation); + + assertNull(secureString); + assertFalse(validation.validationErrors().isEmpty()); + assertThat(map.size(), is(1)); + assertThat(validation.validationErrors().get(0), is("[scope] does not contain the required setting [abc]")); + } + + public void testExtractRequiredSecureString_AddsException_WhenFieldIsEmpty() { + var validation = new ValidationException(); + Map map = modifiableMap(Map.of("key", "")); + var createdString = extractOptionalString(map, "key", "scope", validation); + + assertNull(createdString); + assertFalse(validation.validationErrors().isEmpty()); + assertTrue(map.isEmpty()); + assertThat(validation.validationErrors().get(0), is("[scope] Invalid value empty string. [key] must be a non-empty string")); + } + + public void testExtractRequiredString_CreatesString() { + var validation = new ValidationException(); + Map map = modifiableMap(Map.of("key", "value")); + var createdString = extractRequiredString(map, "key", "scope", validation); + + assertTrue(validation.validationErrors().isEmpty()); + assertNotNull(createdString); + assertThat(createdString, is("value")); + assertTrue(map.isEmpty()); + } + + public void testExtractRequiredString_AddsException_WhenFieldDoesNotExist() { + var validation = new ValidationException(); + Map map = modifiableMap(Map.of("key", "value")); + var createdString = extractRequiredSecureString(map, "abc", "scope", validation); + + assertNull(createdString); + assertFalse(validation.validationErrors().isEmpty()); + assertThat(map.size(), is(1)); + assertThat(validation.validationErrors().get(0), is("[scope] does not contain the required setting [abc]")); + } + + public void testExtractRequiredString_AddsException_WhenFieldIsEmpty() { + var validation = new ValidationException(); + Map map = modifiableMap(Map.of("key", "")); + var createdString = extractOptionalString(map, "key", "scope", validation); + + assertNull(createdString); + assertFalse(validation.validationErrors().isEmpty()); + assertTrue(map.isEmpty()); + assertThat(validation.validationErrors().get(0), is("[scope] Invalid value empty string. [key] must be a non-empty string")); + } + + public void testExtractOptionalString_CreatesString() { + var validation = new ValidationException(); + Map map = modifiableMap(Map.of("key", "value")); + var createdString = extractOptionalString(map, "key", "scope", validation); + + assertTrue(validation.validationErrors().isEmpty()); + assertNotNull(createdString); + assertThat(createdString, is("value")); + assertTrue(map.isEmpty()); + } + + public void testExtractOptionalString_DoesNotAddException_WhenFieldDoesNotExist() { + var validation = new ValidationException(); + Map map = modifiableMap(Map.of("key", "value")); + var createdString = extractOptionalString(map, "abc", "scope", validation); + + assertNull(createdString); + assertTrue(validation.validationErrors().isEmpty()); + assertThat(map.size(), is(1)); + } + + public void testExtractOptionalString_AddsException_WhenFieldIsEmpty() { + var validation = new ValidationException(); + Map map = modifiableMap(Map.of("key", "")); + var createdString = extractOptionalString(map, "key", "scope", validation); + + assertNull(createdString); + assertFalse(validation.validationErrors().isEmpty()); + assertTrue(map.isEmpty()); + assertThat(validation.validationErrors().get(0), is("[scope] Invalid value empty string. [key] must be a non-empty string")); + } + + private static Map modifiableMap(Map aMap) { + return new HashMap<>(aMap); + } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ServiceComponentsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ServiceComponentsTests.java new file mode 100644 index 0000000000000..8ce615ecbb060 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ServiceComponentsTests.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; + +import static org.elasticsearch.xpack.inference.logging.ThrottlerManagerTests.mockThrottlerManager; + +public class ServiceComponentsTests extends ESTestCase { + public static ServiceComponents createWithEmptySettings(ThreadPool threadPool) { + return new ServiceComponents(threadPool, mockThrottlerManager(), Settings.EMPTY); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserServiceSettingsTests.java index 021904d7c2b67..525f701323511 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserServiceSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserServiceSettingsTests.java @@ -32,13 +32,33 @@ public void testFromMap() { assertThat(new HuggingFaceElserServiceSettings(url), is(serviceSettings)); } + public void testFromMap_EmptyUrl_ThrowsError() { + var thrownException = expectThrows( + ValidationException.class, + () -> HuggingFaceElserServiceSettings.fromMap(new HashMap<>(Map.of(HuggingFaceElserServiceSettings.URL, ""))) + ); + + assertThat( + thrownException.getMessage(), + containsString( + Strings.format( + "Validation Failed: 1: [service_settings] Invalid value empty string. [%s] must be a non-empty string;", + HuggingFaceElserServiceSettings.URL + ) + ) + ); + } + public void testFromMap_MissingUrl_ThrowsError() { var thrownException = expectThrows(ValidationException.class, () -> HuggingFaceElserServiceSettings.fromMap(new HashMap<>())); assertThat( thrownException.getMessage(), containsString( - Strings.format("[service_settings] does not contain the required setting [%s]", HuggingFaceElserServiceSettings.URL) + Strings.format( + "Validation Failed: 1: [service_settings] does not contain the required setting [%s];", + HuggingFaceElserServiceSettings.URL + ) ) ); } @@ -52,7 +72,13 @@ public void testFromMap_InvalidUrl_ThrowsError() { assertThat( thrownException.getMessage(), - containsString(Strings.format("Invalid url [%s] received in setting [service_settings]", url)) + is( + Strings.format( + "Validation Failed: 1: [service_settings] Invalid url [%s] received for field [%s];", + url, + HuggingFaceElserServiceSettings.URL + ) + ) ); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceSettingsTests.java new file mode 100644 index 0000000000000..9fbcc3bec7a60 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceSettingsTests.java @@ -0,0 +1,141 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.openai; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; + +public class OpenAiServiceSettingsTests extends AbstractWireSerializingTestCase { + + public static OpenAiServiceSettings createRandomWithNonNullUrl() { + return new OpenAiServiceSettings(randomAlphaOfLength(15), randomAlphaOfLength(15)); + } + + /** + * The created settings can have a url set to null. + */ + public static OpenAiServiceSettings createRandom() { + var url = randomBoolean() ? randomAlphaOfLength(15) : null; + var organizationId = randomBoolean() ? randomAlphaOfLength(15) : null; + return new OpenAiServiceSettings(url, organizationId); + } + + public void testFromMap() { + var url = "https://www.abc.com"; + var org = "organization"; + var serviceSettings = OpenAiServiceSettings.fromMap( + new HashMap<>(Map.of(OpenAiServiceSettings.URL, url, OpenAiServiceSettings.ORGANIZATION, org)) + ); + + assertThat(serviceSettings, is(new OpenAiServiceSettings(url, org))); + } + + public void testFromMap_MissingUrl_DoesNotThrowException() { + var serviceSettings = OpenAiServiceSettings.fromMap(new HashMap<>(Map.of(OpenAiServiceSettings.ORGANIZATION, "org"))); + assertNull(serviceSettings.uri()); + assertThat(serviceSettings.organizationId(), is("org")); + } + + public void testFromMap_EmptyUrl_ThrowsError() { + var thrownException = expectThrows( + ValidationException.class, + () -> OpenAiServiceSettings.fromMap(new HashMap<>(Map.of(OpenAiServiceSettings.URL, ""))) + ); + + assertThat( + thrownException.getMessage(), + containsString( + Strings.format( + "Validation Failed: 1: [service_settings] Invalid value empty string. [%s] must be a non-empty string;", + OpenAiServiceSettings.URL + ) + ) + ); + } + + public void testFromMap_MissingOrganization_DoesNotThrowException() { + var serviceSettings = OpenAiServiceSettings.fromMap(new HashMap<>()); + assertNull(serviceSettings.uri()); + assertNull(serviceSettings.organizationId()); + } + + public void testFromMap_EmptyOrganization_ThrowsError() { + var thrownException = expectThrows( + ValidationException.class, + () -> OpenAiServiceSettings.fromMap(new HashMap<>(Map.of(OpenAiServiceSettings.ORGANIZATION, ""))) + ); + + assertThat( + thrownException.getMessage(), + containsString( + Strings.format( + "Validation Failed: 1: [service_settings] Invalid value empty string. [%s] must be a non-empty string;", + OpenAiServiceSettings.ORGANIZATION + ) + ) + ); + } + + public void testFromMap_InvalidUrl_ThrowsError() { + var url = "https://www.abc^.com"; + var thrownException = expectThrows( + ValidationException.class, + () -> OpenAiServiceSettings.fromMap(new HashMap<>(Map.of(OpenAiServiceSettings.URL, url))) + ); + + assertThat( + thrownException.getMessage(), + is( + Strings.format( + "Validation Failed: 1: [service_settings] Invalid url [%s] received for field [%s];", + url, + OpenAiServiceSettings.URL + ) + ) + ); + } + + @Override + protected Writeable.Reader instanceReader() { + return OpenAiServiceSettings::new; + } + + @Override + protected OpenAiServiceSettings createTestInstance() { + return createRandomWithNonNullUrl(); + } + + @Override + protected OpenAiServiceSettings mutateInstance(OpenAiServiceSettings instance) throws IOException { + return createRandomWithNonNullUrl(); + } + + public static Map getServiceSettingsMap(@Nullable String url, @Nullable String org) { + + var map = new HashMap(); + + if (url != null) { + map.put(OpenAiServiceSettings.URL, url); + } + + if (org != null) { + map.put(OpenAiServiceSettings.ORGANIZATION, org); + } + return map; + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java new file mode 100644 index 0000000000000..dae9a1a01c78f --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java @@ -0,0 +1,638 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.openai; + +import org.apache.http.HttpHeaders; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceResults; +import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.http.MockResponse; +import org.elasticsearch.test.http.MockWebServer; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.inference.external.http.HttpClientManager; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.logging.ThrottlerManager; +import org.elasticsearch.xpack.inference.results.TextEmbeddingResults; +import org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsModel; +import org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsModelTests; +import org.hamcrest.Matchers; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; +import static org.elasticsearch.xpack.inference.external.http.Utils.getUrl; +import static org.elasticsearch.xpack.inference.external.http.Utils.inferenceUtilityPool; +import static org.elasticsearch.xpack.inference.external.http.Utils.mockClusterServiceEmpty; +import static org.elasticsearch.xpack.inference.external.request.openai.OpenAiUtils.ORGANIZATION_HEADER; +import static org.elasticsearch.xpack.inference.services.ServiceComponentsTests.createWithEmptySettings; +import static org.elasticsearch.xpack.inference.services.openai.OpenAiServiceSettingsTests.getServiceSettingsMap; +import static org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsTaskSettingsTests.getTaskSettingsMap; +import static org.elasticsearch.xpack.inference.services.settings.DefaultSecretSettingsTests.getSecretSettingsMap; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +public class OpenAiServiceTests extends ESTestCase { + private static final TimeValue TIMEOUT = new TimeValue(30, TimeUnit.SECONDS); + private final MockWebServer webServer = new MockWebServer(); + private ThreadPool threadPool; + private HttpClientManager clientManager; + + @Before + public void init() throws Exception { + webServer.start(); + threadPool = createThreadPool(inferenceUtilityPool()); + clientManager = HttpClientManager.create(Settings.EMPTY, threadPool, mockClusterServiceEmpty(), mock(ThrottlerManager.class)); + } + + @After + public void shutdown() throws IOException { + clientManager.close(); + terminate(threadPool); + webServer.close(); + } + + public void testParseRequestConfig_CreatesAnOpenAiEmbeddingsModel() throws IOException { + try ( + var service = new OpenAiService( + new SetOnce<>(mock(HttpRequestSenderFactory.class)), + new SetOnce<>(createWithEmptySettings(threadPool)) + ) + ) { + var model = service.parseRequestConfig( + "id", + TaskType.TEXT_EMBEDDING, + getRequestConfigMap( + getServiceSettingsMap("url", "org"), + getTaskSettingsMap("model", "user"), + getSecretSettingsMap("secret") + ), + Set.of() + ); + + assertThat(model, instanceOf(OpenAiEmbeddingsModel.class)); + + var embeddingsModel = (OpenAiEmbeddingsModel) model; + assertThat(embeddingsModel.getServiceSettings().uri().toString(), is("url")); + assertThat(embeddingsModel.getServiceSettings().organizationId(), is("org")); + assertThat(embeddingsModel.getTaskSettings().model(), is("model")); + assertThat(embeddingsModel.getTaskSettings().user(), is("user")); + assertThat(embeddingsModel.getSecretSettings().apiKey().toString(), is("secret")); + } + } + + public void testParseRequestConfig_ThrowsUnsupportedModelType() throws IOException { + try ( + var service = new OpenAiService( + new SetOnce<>(mock(HttpRequestSenderFactory.class)), + new SetOnce<>(createWithEmptySettings(threadPool)) + ) + ) { + var thrownException = expectThrows( + ElasticsearchStatusException.class, + () -> service.parseRequestConfig( + "id", + TaskType.SPARSE_EMBEDDING, + getRequestConfigMap( + getServiceSettingsMap("url", "org"), + getTaskSettingsMap("model", "user"), + getSecretSettingsMap("secret") + ), + Set.of() + ) + ); + + assertThat(thrownException.getMessage(), is("The [openai] service does not support task type [sparse_embedding]")); + } + } + + public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInConfig() throws IOException { + try ( + var service = new OpenAiService( + new SetOnce<>(mock(HttpRequestSenderFactory.class)), + new SetOnce<>(createWithEmptySettings(threadPool)) + ) + ) { + var config = getRequestConfigMap( + getServiceSettingsMap("url", "org"), + getTaskSettingsMap("model", "user"), + getSecretSettingsMap("secret") + ); + config.put("extra_key", "value"); + + var thrownException = expectThrows( + ElasticsearchStatusException.class, + () -> service.parseRequestConfig("id", TaskType.TEXT_EMBEDDING, config, Set.of()) + ); + + assertThat( + thrownException.getMessage(), + is("Model configuration contains settings [{extra_key=value}] unknown to the [openai] service") + ); + } + } + + public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInServiceSettingsMap() throws IOException { + try ( + var service = new OpenAiService( + new SetOnce<>(mock(HttpRequestSenderFactory.class)), + new SetOnce<>(createWithEmptySettings(threadPool)) + ) + ) { + var serviceSettings = getServiceSettingsMap("url", "org"); + serviceSettings.put("extra_key", "value"); + + var config = getRequestConfigMap(serviceSettings, getTaskSettingsMap("model", "user"), getSecretSettingsMap("secret")); + + var thrownException = expectThrows( + ElasticsearchStatusException.class, + () -> service.parseRequestConfig("id", TaskType.TEXT_EMBEDDING, config, Set.of()) + ); + + assertThat( + thrownException.getMessage(), + is("Model configuration contains settings [{extra_key=value}] unknown to the [openai] service") + ); + } + } + + public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInTaskSettingsMap() throws IOException { + try ( + var service = new OpenAiService( + new SetOnce<>(mock(HttpRequestSenderFactory.class)), + new SetOnce<>(createWithEmptySettings(threadPool)) + ) + ) { + var taskSettingsMap = getTaskSettingsMap("model", "user"); + taskSettingsMap.put("extra_key", "value"); + + var config = getRequestConfigMap(getServiceSettingsMap("url", "org"), taskSettingsMap, getSecretSettingsMap("secret")); + + var thrownException = expectThrows( + ElasticsearchStatusException.class, + () -> service.parseRequestConfig("id", TaskType.TEXT_EMBEDDING, config, Set.of()) + ); + + assertThat( + thrownException.getMessage(), + is("Model configuration contains settings [{extra_key=value}] unknown to the [openai] service") + ); + } + } + + public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInSecretSettingsMap() throws IOException { + try ( + var service = new OpenAiService( + new SetOnce<>(mock(HttpRequestSenderFactory.class)), + new SetOnce<>(createWithEmptySettings(threadPool)) + ) + ) { + var secretSettingsMap = getSecretSettingsMap("secret"); + secretSettingsMap.put("extra_key", "value"); + + var config = getRequestConfigMap(getServiceSettingsMap("url", "org"), getTaskSettingsMap("model", "user"), secretSettingsMap); + + var thrownException = expectThrows( + ElasticsearchStatusException.class, + () -> service.parseRequestConfig("id", TaskType.TEXT_EMBEDDING, config, Set.of()) + ); + + assertThat( + thrownException.getMessage(), + is("Model configuration contains settings [{extra_key=value}] unknown to the [openai] service") + ); + } + } + + public void testParseRequestConfig_CreatesAnOpenAiEmbeddingsModelWithoutUserUrlOrganization() throws IOException { + try ( + var service = new OpenAiService( + new SetOnce<>(mock(HttpRequestSenderFactory.class)), + new SetOnce<>(createWithEmptySettings(threadPool)) + ) + ) { + var model = service.parseRequestConfig( + "id", + TaskType.TEXT_EMBEDDING, + getRequestConfigMap(getServiceSettingsMap(null, null), getTaskSettingsMap("model", null), getSecretSettingsMap("secret")), + Set.of() + ); + + assertThat(model, instanceOf(OpenAiEmbeddingsModel.class)); + + var embeddingsModel = (OpenAiEmbeddingsModel) model; + assertNull(embeddingsModel.getServiceSettings().uri()); + assertNull(embeddingsModel.getServiceSettings().organizationId()); + assertThat(embeddingsModel.getTaskSettings().model(), is("model")); + assertNull(embeddingsModel.getTaskSettings().user()); + assertThat(embeddingsModel.getSecretSettings().apiKey().toString(), is("secret")); + } + } + + public void testParsePersistedConfig_CreatesAnOpenAiEmbeddingsModel() throws IOException { + try ( + var service = new OpenAiService( + new SetOnce<>(mock(HttpRequestSenderFactory.class)), + new SetOnce<>(createWithEmptySettings(threadPool)) + ) + ) { + var persistedConfig = getPersistedConfigMap( + getServiceSettingsMap("url", "org"), + getTaskSettingsMap("model", "user"), + getSecretSettingsMap("secret") + ); + + var model = service.parsePersistedConfig("id", TaskType.TEXT_EMBEDDING, persistedConfig.config(), persistedConfig.secrets()); + + assertThat(model, instanceOf(OpenAiEmbeddingsModel.class)); + + var embeddingsModel = (OpenAiEmbeddingsModel) model; + assertThat(embeddingsModel.getServiceSettings().uri().toString(), is("url")); + assertThat(embeddingsModel.getServiceSettings().organizationId(), is("org")); + assertThat(embeddingsModel.getTaskSettings().model(), is("model")); + assertThat(embeddingsModel.getTaskSettings().user(), is("user")); + assertThat(embeddingsModel.getSecretSettings().apiKey().toString(), is("secret")); + } + } + + public void testParsePersistedConfig_ThrowsErrorTryingToParseInvalidModel() throws IOException { + try ( + var service = new OpenAiService( + new SetOnce<>(mock(HttpRequestSenderFactory.class)), + new SetOnce<>(createWithEmptySettings(threadPool)) + ) + ) { + var persistedConfig = getPersistedConfigMap( + getServiceSettingsMap("url", "org"), + getTaskSettingsMap("model", "user"), + getSecretSettingsMap("secret") + ); + + var thrownException = expectThrows( + ElasticsearchStatusException.class, + () -> service.parsePersistedConfig("id", TaskType.SPARSE_EMBEDDING, persistedConfig.config(), persistedConfig.secrets()) + ); + + assertThat( + thrownException.getMessage(), + is("Failed to parse stored model [id] for [openai] service, please delete and add the service again") + ); + } + } + + public void testParsePersistedConfig_CreatesAnOpenAiEmbeddingsModelWithoutUserUrlOrganization() throws IOException { + try ( + var service = new OpenAiService( + new SetOnce<>(mock(HttpRequestSenderFactory.class)), + new SetOnce<>(createWithEmptySettings(threadPool)) + ) + ) { + var persistedConfig = getPersistedConfigMap( + getServiceSettingsMap(null, null), + getTaskSettingsMap("model", null), + getSecretSettingsMap("secret") + ); + + var model = service.parsePersistedConfig("id", TaskType.TEXT_EMBEDDING, persistedConfig.config(), persistedConfig.secrets()); + + assertThat(model, instanceOf(OpenAiEmbeddingsModel.class)); + + var embeddingsModel = (OpenAiEmbeddingsModel) model; + assertNull(embeddingsModel.getServiceSettings().uri()); + assertNull(embeddingsModel.getServiceSettings().organizationId()); + assertThat(embeddingsModel.getTaskSettings().model(), is("model")); + assertNull(embeddingsModel.getTaskSettings().user()); + assertThat(embeddingsModel.getSecretSettings().apiKey().toString(), is("secret")); + } + } + + public void testParsePersistedConfig_ThrowsWhenAnExtraKeyExistsInConfig() throws IOException { + try ( + var service = new OpenAiService( + new SetOnce<>(mock(HttpRequestSenderFactory.class)), + new SetOnce<>(createWithEmptySettings(threadPool)) + ) + ) { + var persistedConfig = getPersistedConfigMap( + getServiceSettingsMap("url", "org"), + getTaskSettingsMap("model", "user"), + getSecretSettingsMap("secret") + ); + persistedConfig.config().put("extra_key", "value"); + + var thrownException = expectThrows( + ElasticsearchStatusException.class, + () -> service.parsePersistedConfig("id", TaskType.TEXT_EMBEDDING, persistedConfig.config(), persistedConfig.secrets()) + ); + + assertThat( + thrownException.getMessage(), + is("Model configuration contains settings [{extra_key=value}] unknown to the [openai] service") + ); + } + } + + public void testParsePersistedConfig_ThrowsWhenAnExtraKeyExistsInSecretsSettings() throws IOException { + try ( + var service = new OpenAiService( + new SetOnce<>(mock(HttpRequestSenderFactory.class)), + new SetOnce<>(createWithEmptySettings(threadPool)) + ) + ) { + var secretSettingsMap = getSecretSettingsMap("secret"); + secretSettingsMap.put("extra_key", "value"); + + var persistedConfig = getPersistedConfigMap( + getServiceSettingsMap("url", "org"), + getTaskSettingsMap("model", "user"), + secretSettingsMap + ); + + var thrownException = expectThrows( + ElasticsearchStatusException.class, + () -> service.parsePersistedConfig("id", TaskType.TEXT_EMBEDDING, persistedConfig.config(), persistedConfig.secrets()) + ); + + assertThat( + thrownException.getMessage(), + is("Model configuration contains settings [{extra_key=value}] unknown to the [openai] service") + ); + } + } + + public void testParsePersistedConfig_ThrowsWhenAnExtraKeyExistsInSecrets() throws IOException { + try ( + var service = new OpenAiService( + new SetOnce<>(mock(HttpRequestSenderFactory.class)), + new SetOnce<>(createWithEmptySettings(threadPool)) + ) + ) { + var persistedConfig = getPersistedConfigMap( + getServiceSettingsMap("url", "org"), + getTaskSettingsMap("model", "user"), + getSecretSettingsMap("secret") + ); + persistedConfig.secrets.put("extra_key", "value"); + + var thrownException = expectThrows( + ElasticsearchStatusException.class, + () -> service.parsePersistedConfig("id", TaskType.TEXT_EMBEDDING, persistedConfig.config(), persistedConfig.secrets()) + ); + + assertThat( + thrownException.getMessage(), + is("Model configuration contains settings [{extra_key=value}] unknown to the [openai] service") + ); + } + } + + public void testParsePersistedConfig_ThrowsWhenAnExtraKeyExistsInServiceSettings() throws IOException { + try ( + var service = new OpenAiService( + new SetOnce<>(mock(HttpRequestSenderFactory.class)), + new SetOnce<>(createWithEmptySettings(threadPool)) + ) + ) { + var serviceSettingsMap = getServiceSettingsMap("url", "org"); + serviceSettingsMap.put("extra_key", "value"); + + var persistedConfig = getPersistedConfigMap( + serviceSettingsMap, + getTaskSettingsMap("model", "user"), + getSecretSettingsMap("secret") + ); + + var thrownException = expectThrows( + ElasticsearchStatusException.class, + () -> service.parsePersistedConfig("id", TaskType.TEXT_EMBEDDING, persistedConfig.config(), persistedConfig.secrets()) + ); + + assertThat( + thrownException.getMessage(), + is("Model configuration contains settings [{extra_key=value}] unknown to the [openai] service") + ); + } + } + + public void testParsePersistedConfig_ThrowsWhenAnExtraKeyExistsInTaskSettings() throws IOException { + try ( + var service = new OpenAiService( + new SetOnce<>(mock(HttpRequestSenderFactory.class)), + new SetOnce<>(createWithEmptySettings(threadPool)) + ) + ) { + var taskSettingsMap = getTaskSettingsMap("model", "user"); + taskSettingsMap.put("extra_key", "value"); + + var persistedConfig = getPersistedConfigMap( + getServiceSettingsMap("url", "org"), + taskSettingsMap, + getSecretSettingsMap("secret") + ); + + var thrownException = expectThrows( + ElasticsearchStatusException.class, + () -> service.parsePersistedConfig("id", TaskType.TEXT_EMBEDDING, persistedConfig.config(), persistedConfig.secrets()) + ); + + assertThat( + thrownException.getMessage(), + is("Model configuration contains settings [{extra_key=value}] unknown to the [openai] service") + ); + } + } + + public void testStart_InitializesTheSender() throws IOException { + var sender = mock(Sender.class); + + var factory = mock(HttpRequestSenderFactory.class); + when(factory.createSender(anyString())).thenReturn(sender); + + try (var service = new OpenAiService(new SetOnce<>(factory), new SetOnce<>(createWithEmptySettings(threadPool)))) { + PlainActionFuture listener = new PlainActionFuture<>(); + service.start(mock(Model.class), listener); + + listener.actionGet(TIMEOUT); + verify(sender, times(1)).start(); + verify(factory, times(1)).createSender(anyString()); + } + + verify(sender, times(1)).close(); + verifyNoMoreInteractions(factory); + verifyNoMoreInteractions(sender); + } + + public void testStart_CallingStartTwiceKeepsSameSenderReference() throws IOException { + var sender = mock(Sender.class); + + var factory = mock(HttpRequestSenderFactory.class); + when(factory.createSender(anyString())).thenReturn(sender); + + try (var service = new OpenAiService(new SetOnce<>(factory), new SetOnce<>(createWithEmptySettings(threadPool)))) { + PlainActionFuture listener = new PlainActionFuture<>(); + service.start(mock(Model.class), listener); + listener.actionGet(TIMEOUT); + + service.start(mock(Model.class), listener); + listener.actionGet(TIMEOUT); + + verify(factory, times(1)).createSender(anyString()); + verify(sender, times(2)).start(); + } + + verify(sender, times(1)).close(); + verifyNoMoreInteractions(factory); + verifyNoMoreInteractions(sender); + } + + public void testInfer_ThrowsErrorWhenModelIsNotOpenAiModel() throws IOException { + var sender = mock(Sender.class); + + var factory = mock(HttpRequestSenderFactory.class); + when(factory.createSender(anyString())).thenReturn(sender); + + var mockModel = getInvalidModel("model_id", "service_name"); + + try (var service = new OpenAiService(new SetOnce<>(factory), new SetOnce<>(createWithEmptySettings(threadPool)))) { + PlainActionFuture> listener = new PlainActionFuture<>(); + service.infer(mockModel, List.of(""), new HashMap<>(), listener); + + var thrownException = expectThrows(ElasticsearchStatusException.class, () -> listener.actionGet(TIMEOUT)); + assertThat( + thrownException.getMessage(), + is("The internal model was invalid, please delete the service [service_name] with id [model_id] and add it again.") + ); + + verify(factory, times(1)).createSender(anyString()); + verify(sender, times(1)).start(); + } + + verify(sender, times(1)).close(); + verifyNoMoreInteractions(factory); + verifyNoMoreInteractions(sender); + } + + public void testInfer_SendsRequest() throws IOException { + var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + + try (var service = new OpenAiService(new SetOnce<>(senderFactory), new SetOnce<>(createWithEmptySettings(threadPool)))) { + + String responseJson = """ + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + 0.0123, + -0.0123 + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); + + var model = OpenAiEmbeddingsModelTests.createModel(getUrl(webServer), "org", "secret", "model", "user"); + PlainActionFuture> listener = new PlainActionFuture<>(); + service.infer(model, List.of("abc"), new HashMap<>(), listener); + + InferenceResults result = listener.actionGet(TIMEOUT).get(0); + + assertThat( + result.asMap(), + Matchers.is( + Map.of( + TextEmbeddingResults.TEXT_EMBEDDING, + List.of(Map.of(TextEmbeddingResults.Embedding.EMBEDDING, List.of(0.0123F, -0.0123F))) + ) + ) + ); + assertThat(webServer.requests(), hasSize(1)); + assertNull(webServer.requests().get(0).getUri().getQuery()); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.AUTHORIZATION), equalTo("Bearer secret")); + assertThat(webServer.requests().get(0).getHeader(ORGANIZATION_HEADER), equalTo("org")); + + var requestMap = entityAsMap(webServer.requests().get(0).getBody()); + assertThat(requestMap.size(), Matchers.is(3)); + assertThat(requestMap.get("input"), Matchers.is(List.of("abc"))); + assertThat(requestMap.get("model"), Matchers.is("model")); + assertThat(requestMap.get("user"), Matchers.is("user")); + } + } + + private static Model getInvalidModel(String modelId, String serviceName) { + var mockConfigs = mock(ModelConfigurations.class); + when(mockConfigs.getModelId()).thenReturn(modelId); + when(mockConfigs.getService()).thenReturn(serviceName); + + var mockModel = mock(Model.class); + when(mockModel.getConfigurations()).thenReturn(mockConfigs); + + return mockModel; + } + + private Map getRequestConfigMap( + Map serviceSettings, + Map taskSettings, + Map secretSettings + ) { + var builtServiceSettings = new HashMap<>(); + builtServiceSettings.putAll(serviceSettings); + builtServiceSettings.putAll(secretSettings); + + return new HashMap<>( + Map.of(ModelConfigurations.SERVICE_SETTINGS, builtServiceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings) + ); + } + + private PeristedConfig getPersistedConfigMap( + Map serviceSettings, + Map taskSettings, + Map secretSettings + ) { + + return new PeristedConfig( + new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, serviceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings)), + new HashMap<>(Map.of(ModelSecrets.SECRET_SETTINGS, secretSettings)) + ); + } + + private record PeristedConfig(Map config, Map secrets) {} +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsModelTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsModelTests.java new file mode 100644 index 0000000000000..96ced66723f04 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsModelTests.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.openai.embeddings; + +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.inference.services.openai.OpenAiServiceSettings; +import org.elasticsearch.xpack.inference.services.settings.DefaultSecretSettings; + +import static org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsRequestTaskSettingsTests.getRequestTaskSettingsMap; +import static org.hamcrest.Matchers.is; + +public class OpenAiEmbeddingsModelTests extends ESTestCase { + + public void testOverrideWith_OverridesUser() { + var model = createModel("url", "org", "api_key", "model_name", null); + var requestTaskSettingsMap = getRequestTaskSettingsMap(null, "user_override"); + + var overriddenModel = model.overrideWith(requestTaskSettingsMap); + + assertThat(overriddenModel, is(createModel("url", "org", "api_key", "model_name", "user_override"))); + } + + public static OpenAiEmbeddingsModel createModel( + String url, + @Nullable String org, + String apiKey, + String modelName, + @Nullable String user + ) { + return new OpenAiEmbeddingsModel( + "id", + TaskType.TEXT_EMBEDDING, + "service", + new OpenAiServiceSettings(url, org), + new OpenAiEmbeddingsTaskSettings(modelName, user), + new DefaultSecretSettings(new SecureString(apiKey.toCharArray())) + ); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsRequestTaskSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsRequestTaskSettingsTests.java new file mode 100644 index 0000000000000..b76e9f9a6d5c6 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsRequestTaskSettingsTests.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.openai.embeddings; + +import org.elasticsearch.core.Nullable; +import org.elasticsearch.test.ESTestCase; + +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.is; + +public class OpenAiEmbeddingsRequestTaskSettingsTests extends ESTestCase { + public void testFromMap_ReturnsEmptySettings_WhenTheMapIsEmpty() { + var settings = OpenAiEmbeddingsRequestTaskSettings.fromMap(new HashMap<>(Map.of())); + + assertNull(settings.model()); + assertNull(settings.user()); + } + + public void testFromMap_ReturnsEmptySettings_WhenTheMapDoesNotContainTheFields() { + var settings = OpenAiEmbeddingsRequestTaskSettings.fromMap(new HashMap<>(Map.of("key", "model"))); + + assertNull(settings.model()); + assertNull(settings.user()); + } + + public void testFromMap_ReturnsEmptyModel_WhenTheMapDoesNotContainThatField() { + var settings = OpenAiEmbeddingsRequestTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiEmbeddingsTaskSettings.USER, "user"))); + + assertNull(settings.model()); + assertThat(settings.user(), is("user")); + } + + public void testFromMap_ReturnsEmptyUser_WhenTheDoesMapNotContainThatField() { + var settings = OpenAiEmbeddingsRequestTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiEmbeddingsTaskSettings.MODEL, "model"))); + + assertNull(settings.user()); + assertThat(settings.model(), is("model")); + } + + public static Map getRequestTaskSettingsMap(@Nullable String model, @Nullable String user) { + var map = new HashMap(); + + if (model != null) { + map.put(OpenAiEmbeddingsTaskSettings.MODEL, model); + } + + if (user != null) { + map.put(OpenAiEmbeddingsTaskSettings.USER, user); + } + + return map; + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettingsTests.java new file mode 100644 index 0000000000000..d33ec12016cad --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettingsTests.java @@ -0,0 +1,128 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.openai.embeddings; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.is; + +public class OpenAiEmbeddingsTaskSettingsTests extends AbstractWireSerializingTestCase { + + public static OpenAiEmbeddingsTaskSettings createRandomWithUser() { + return new OpenAiEmbeddingsTaskSettings(randomAlphaOfLength(15), randomAlphaOfLength(15)); + } + + /** + * The created settings can have the user set to null. + */ + public static OpenAiEmbeddingsTaskSettings createRandom() { + var user = randomBoolean() ? randomAlphaOfLength(15) : null; + return new OpenAiEmbeddingsTaskSettings(randomAlphaOfLength(15), user); + } + + public void testFromMap_MissingModel_ThrowException() { + var thrownException = expectThrows( + ValidationException.class, + () -> OpenAiEmbeddingsTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiEmbeddingsTaskSettings.USER, "user"))) + ); + + assertThat( + thrownException.getMessage(), + is( + Strings.format( + "Validation Failed: 1: [task_settings] does not contain the required setting [%s];", + OpenAiEmbeddingsTaskSettings.MODEL + ) + ) + ); + } + + public void testFromMap_CreatesWithModelAndUser() { + var taskSettings = OpenAiEmbeddingsTaskSettings.fromMap( + new HashMap<>(Map.of(OpenAiEmbeddingsTaskSettings.MODEL, "model", OpenAiEmbeddingsTaskSettings.USER, "user")) + ); + + assertThat(taskSettings.model(), is("model")); + assertThat(taskSettings.user(), is("user")); + } + + public void testFromMap_MissingUser_DoesNotThrowException() { + var taskSettings = OpenAiEmbeddingsTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiEmbeddingsTaskSettings.MODEL, "model"))); + + assertThat(taskSettings.model(), is("model")); + assertNull(taskSettings.user()); + } + + public void testOverrideWith_KeepsOriginalValuesWithOverridesAreNull() { + var taskSettings = OpenAiEmbeddingsTaskSettings.fromMap( + new HashMap<>(Map.of(OpenAiEmbeddingsTaskSettings.MODEL, "model", OpenAiEmbeddingsTaskSettings.USER, "user")) + ); + + var overriddenTaskSettings = taskSettings.overrideWith(OpenAiEmbeddingsRequestTaskSettings.EMPTY_SETTINGS); + assertThat(overriddenTaskSettings, is(taskSettings)); + } + + public void testOverrideWith_UsesOverriddenSettings() { + var taskSettings = OpenAiEmbeddingsTaskSettings.fromMap( + new HashMap<>(Map.of(OpenAiEmbeddingsTaskSettings.MODEL, "model", OpenAiEmbeddingsTaskSettings.USER, "user")) + ); + + var requestTaskSettings = OpenAiEmbeddingsRequestTaskSettings.fromMap( + new HashMap<>(Map.of(OpenAiEmbeddingsTaskSettings.MODEL, "model2", OpenAiEmbeddingsTaskSettings.USER, "user2")) + ); + + var overriddenTaskSettings = taskSettings.overrideWith(requestTaskSettings); + assertThat(overriddenTaskSettings, is(new OpenAiEmbeddingsTaskSettings("model2", "user2"))); + } + + public void testOverrideWith_UsesOnlyNonNullModelSetting() { + var taskSettings = OpenAiEmbeddingsTaskSettings.fromMap( + new HashMap<>(Map.of(OpenAiEmbeddingsTaskSettings.MODEL, "model", OpenAiEmbeddingsTaskSettings.USER, "user")) + ); + + var requestTaskSettings = OpenAiEmbeddingsRequestTaskSettings.fromMap( + new HashMap<>(Map.of(OpenAiEmbeddingsTaskSettings.MODEL, "model2")) + ); + + var overriddenTaskSettings = taskSettings.overrideWith(requestTaskSettings); + assertThat(overriddenTaskSettings, is(new OpenAiEmbeddingsTaskSettings("model2", "user"))); + } + + @Override + protected Writeable.Reader instanceReader() { + return OpenAiEmbeddingsTaskSettings::new; + } + + @Override + protected OpenAiEmbeddingsTaskSettings createTestInstance() { + return createRandomWithUser(); + } + + @Override + protected OpenAiEmbeddingsTaskSettings mutateInstance(OpenAiEmbeddingsTaskSettings instance) throws IOException { + return createRandomWithUser(); + } + + public static Map getTaskSettingsMap(String model, @Nullable String user) { + var map = new HashMap(Map.of(OpenAiEmbeddingsTaskSettings.MODEL, model)); + + if (user != null) { + map.put(OpenAiEmbeddingsTaskSettings.USER, user); + } + + return map; + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/settings/DefaultSecretSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/settings/DefaultSecretSettingsTests.java new file mode 100644 index 0000000000000..2fd952fbbdda4 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/settings/DefaultSecretSettingsTests.java @@ -0,0 +1,80 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.settings; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; + +public class DefaultSecretSettingsTests extends AbstractWireSerializingTestCase { + + public static DefaultSecretSettings createRandom() { + return new DefaultSecretSettings(new SecureString(randomAlphaOfLength(15).toCharArray())); + } + + public void testFromMap() { + var apiKey = "abc"; + var serviceSettings = DefaultSecretSettings.fromMap(new HashMap<>(Map.of(DefaultSecretSettings.API_KEY, apiKey))); + + assertThat(new DefaultSecretSettings(new SecureString(apiKey.toCharArray())), is(serviceSettings)); + } + + public void testFromMap_MissingApiKey_ThrowsError() { + var thrownException = expectThrows(ValidationException.class, () -> DefaultSecretSettings.fromMap(new HashMap<>())); + + assertThat( + thrownException.getMessage(), + containsString(Strings.format("[secret_settings] does not contain the required setting [%s]", DefaultSecretSettings.API_KEY)) + ); + } + + public void testFromMap_EmptyApiKey_ThrowsError() { + var thrownException = expectThrows( + ValidationException.class, + () -> DefaultSecretSettings.fromMap(new HashMap<>(Map.of(DefaultSecretSettings.API_KEY, ""))) + ); + + assertThat( + thrownException.getMessage(), + containsString( + Strings.format( + "[secret_settings] Invalid value empty string. [%s] must be a non-empty string", + DefaultSecretSettings.API_KEY + ) + ) + ); + } + + @Override + protected Writeable.Reader instanceReader() { + return DefaultSecretSettings::new; + } + + @Override + protected DefaultSecretSettings createTestInstance() { + return createRandom(); + } + + @Override + protected DefaultSecretSettings mutateInstance(DefaultSecretSettings instance) throws IOException { + return createRandom(); + } + + public static Map getSecretSettingsMap(String apiKey) { + return new HashMap<>(Map.of(DefaultSecretSettings.API_KEY, apiKey)); + } +} diff --git a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordFieldMapper.java b/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordFieldMapper.java index a7f8a47e42463..d04bb88325cc7 100644 --- a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordFieldMapper.java +++ b/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordFieldMapper.java @@ -281,7 +281,7 @@ public FieldMapper build(MapperBuilderContext context) { name, FIELD_TYPE, new CountedKeywordFieldType( - name, + context.buildFullName(name), true, false, true, diff --git a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregationBuilder.java b/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregationBuilder.java index 23adacd8f65fa..72e3eb4efacf9 100644 --- a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregationBuilder.java +++ b/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregationBuilder.java @@ -30,10 +30,12 @@ import java.util.Map; import java.util.Objects; -class CountedTermsAggregationBuilder extends ValuesSourceAggregationBuilder { +public class CountedTermsAggregationBuilder extends ValuesSourceAggregationBuilder { public static final String NAME = "counted_terms"; - public static final ValuesSourceRegistry.RegistryKey REGISTRY_KEY = - new ValuesSourceRegistry.RegistryKey<>(NAME, CountedTermsAggregatorSupplier.class); + static final ValuesSourceRegistry.RegistryKey REGISTRY_KEY = new ValuesSourceRegistry.RegistryKey<>( + NAME, + CountedTermsAggregatorSupplier.class + ); public static final ParseField REQUIRED_SIZE_FIELD_NAME = new ParseField("size"); @@ -50,7 +52,7 @@ class CountedTermsAggregationBuilder extends ValuesSourceAggregationBuilder { + b.startObject("dotted.field"); + b.field("type", CountedKeywordFieldMapper.CONTENT_TYPE); + b.endObject(); + })); + ParsedDocument doc = mapper.parse(source(b -> b.field("dotted.field", "1234"))); + List fields = doc.rootDoc().getFields("dotted.field"); + assertEquals(1, fields.size()); + } } diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/PublishableHttpResource.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/PublishableHttpResource.java index d37f4669484a0..e2d4d173af013 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/PublishableHttpResource.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/PublishableHttpResource.java @@ -27,9 +27,11 @@ import java.util.Collections; import java.util.Map; import java.util.Set; -import java.util.stream.Collectors; +import static java.util.stream.Collectors.joining; +import static org.elasticsearch.client.RestClient.IGNORE_RESPONSE_CODES_PARAM; import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.rest.RestStatus.NOT_FOUND; /** * {@code PublishableHttpResource} represents an {@link HttpResource} that is a single file or object that can be checked and @@ -254,7 +256,7 @@ protected void checkForResource( // avoid exists and DNE parameters from being an exception by default final Set expectedResponseCodes = Sets.union(exists, doesNotExist); - request.addParameter("ignore", expectedResponseCodes.stream().map(i -> i.toString()).collect(Collectors.joining(","))); + request.addParameter(IGNORE_RESPONSE_CODES_PARAM, expectedResponseCodes.stream().map(Object::toString).collect(joining(","))); client.performRequestAsync(request, new ResponseListener() { @@ -436,9 +438,9 @@ protected void deleteResource( final Request request = new Request("DELETE", resourceBasePath + "/" + resourceName); addDefaultParameters(request); - if (false == defaultParameters.containsKey("ignore")) { + if (false == defaultParameters.containsKey(IGNORE_RESPONSE_CODES_PARAM)) { // avoid 404 being an exception by default - request.addParameter("ignore", Integer.toString(RestStatus.NOT_FOUND.getStatus())); + request.addParameter(IGNORE_RESPONSE_CODES_PARAM, Integer.toString(NOT_FOUND.getStatus())); } client.performRequestAsync(request, new ResponseListener() { diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/AbstractPublishableHttpResourceTestCase.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/AbstractPublishableHttpResourceTestCase.java index 4878289cae8d6..b72891708e780 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/AbstractPublishableHttpResourceTestCase.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/AbstractPublishableHttpResourceTestCase.java @@ -30,8 +30,9 @@ import java.util.Map; import java.util.Set; import java.util.function.Predicate; -import java.util.stream.Collectors; +import static java.util.stream.Collectors.joining; +import static org.elasticsearch.client.RestClient.IGNORE_RESPONSE_CODES_PARAM; import static org.elasticsearch.xpack.monitoring.exporter.http.AsyncHttpResourceHelper.mockBooleanActionListener; import static org.elasticsearch.xpack.monitoring.exporter.http.AsyncHttpResourceHelper.mockPublishResultActionListener; import static org.elasticsearch.xpack.monitoring.exporter.http.AsyncHttpResourceHelper.whenPerformRequestAsyncWith; @@ -443,7 +444,7 @@ protected Map getParameters( final Set statusCodes = Sets.union(exists, doesNotExist); final Map parametersWithIgnore = new HashMap<>(parameters); - parametersWithIgnore.putIfAbsent("ignore", statusCodes.stream().map(i -> i.toString()).collect(Collectors.joining(","))); + parametersWithIgnore.putIfAbsent(IGNORE_RESPONSE_CODES_PARAM, statusCodes.stream().map(Object::toString).collect(joining(","))); return parametersWithIgnore; } @@ -451,7 +452,7 @@ protected Map getParameters( protected Map deleteParameters(final Map parameters) { final Map parametersWithIgnore = new HashMap<>(parameters); - parametersWithIgnore.putIfAbsent("ignore", "404"); + parametersWithIgnore.putIfAbsent(IGNORE_RESPONSE_CODES_PARAM, Integer.toString(RestStatus.NOT_FOUND.getStatus())); return parametersWithIgnore; } diff --git a/x-pack/plugin/profiling/build.gradle b/x-pack/plugin/profiling/build.gradle index 30bcb5a8756dc..8275bfe633c91 100644 --- a/x-pack/plugin/profiling/build.gradle +++ b/x-pack/plugin/profiling/build.gradle @@ -17,6 +17,7 @@ esplugin { dependencies { compileOnly project(path: xpackModule('core')) + compileOnly project(path: xpackModule('mapper-counted-keyword')) testImplementation(testArtifact(project(xpackModule('core')))) testImplementation project(path: xpackModule('mapper-unsigned-long')) testImplementation project(path: xpackModule('mapper-version')) diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java index 1d199c95dc633..82226a4bad0f5 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java @@ -44,10 +44,11 @@ public void testGetStackTracesFromAPMWithMatch() throws Exception { GetStackTracesRequest request = new GetStackTracesRequest(null, query, "apm-test-*", "transaction.profiler_stack_trace_ids"); GetStackTracesResponse response = client().execute(GetStackTracesAction.INSTANCE, request).get(); - assertEquals(39, response.getTotalFrames()); + assertEquals(43, response.getTotalFrames()); assertNotNull(response.getStackTraceEvents()); - assertEquals(1L, (long) response.getStackTraceEvents().get("Ce77w10WeIDow3kd1jowlA")); + assertEquals(3L, (long) response.getStackTraceEvents().get("Ce77w10WeIDow3kd1jowlA")); + assertEquals(2L, (long) response.getStackTraceEvents().get("JvISdnJ47BQ01489cwF9DA")); assertNotNull(response.getStackTraces()); // just do a high-level spot check. Decoding is tested in unit-tests diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/ProfilingTestCase.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/ProfilingTestCase.java index 19bebd05234e6..9ac616af21e7c 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/ProfilingTestCase.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/ProfilingTestCase.java @@ -24,6 +24,7 @@ import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.ilm.LifecycleSettings; +import org.elasticsearch.xpack.countedkeyword.CountedKeywordMapperPlugin; import org.elasticsearch.xpack.ilm.IndexLifecycle; import org.elasticsearch.xpack.unsignedlong.UnsignedLongMapperPlugin; import org.elasticsearch.xpack.versionfield.VersionFieldPlugin; @@ -45,6 +46,7 @@ protected Collection> nodePlugins() { LocalStateProfilingXPackPlugin.class, IndexLifecycle.class, UnsignedLongMapperPlugin.class, + CountedKeywordMapperPlugin.class, VersionFieldPlugin.class, getTestTransportPlugin() ); diff --git a/x-pack/plugin/profiling/src/internalClusterTest/resources/data/apm-test.ndjson b/x-pack/plugin/profiling/src/internalClusterTest/resources/data/apm-test.ndjson index d68c6b5e4f2b1..d147256d6b90f 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/resources/data/apm-test.ndjson +++ b/x-pack/plugin/profiling/src/internalClusterTest/resources/data/apm-test.ndjson @@ -1,2 +1,2 @@ {"create": {"_index": "apm-test-001"}} -{"@timestamp": "1698624000", "transaction.name": "encodeSha1", "transaction.profiler_stack_trace_ids": "Ce77w10WeIDow3kd1jowlA"} +{"@timestamp": "1698624000", "transaction.name": "encodeSha1", "transaction.profiler_stack_trace_ids": ["Ce77w10WeIDow3kd1jowlA", "JvISdnJ47BQ01489cwF9DA", "JvISdnJ47BQ01489cwF9DA", "Ce77w10WeIDow3kd1jowlA", "Ce77w10WeIDow3kd1jowlA"]} diff --git a/x-pack/plugin/profiling/src/internalClusterTest/resources/indices/apm-test.json b/x-pack/plugin/profiling/src/internalClusterTest/resources/indices/apm-test.json index eba8ed14059a7..e0aeb707ffc76 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/resources/indices/apm-test.json +++ b/x-pack/plugin/profiling/src/internalClusterTest/resources/indices/apm-test.json @@ -13,7 +13,7 @@ "type": "date" }, "transaction.profiler_stack_trace_ids": { - "type": "keyword" + "type": "counted_keyword" }, "transaction.name": { "type": "keyword" diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java index 28adb58593eef..a781f91f30bbe 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java @@ -40,6 +40,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xcontent.ObjectPath; +import org.elasticsearch.xpack.countedkeyword.CountedTermsAggregationBuilder; import java.time.Instant; import java.util.ArrayList; @@ -184,8 +185,7 @@ private void searchGenericEvents(GetStackTracesRequest request, ActionListener SEARCHABLE_SNAPSHOTS_ALLOCATE_ON_ROLLING_RESTART = Setting.boolSetting( "xpack.searchable.snapshot.allocate_on_rolling_restart", false, @@ -36,11 +37,6 @@ public class SearchableSnapshotEnableAllocationDecider extends AllocationDecider Setting.Property.Deprecated ); - static { - // TODO xpack.searchable.snapshot.allocate_on_rolling_restart was only temporary, remove it in the next major - assert Version.CURRENT.major == Version.V_7_17_0.major + 1; - } - private volatile EnableAllocationDecider.Allocation enableAllocation; private volatile boolean allocateOnRollingRestart; diff --git a/x-pack/plugin/security/build.gradle b/x-pack/plugin/security/build.gradle index 509d4d5012f52..acb802743586c 100644 --- a/x-pack/plugin/security/build.gradle +++ b/x-pack/plugin/security/build.gradle @@ -57,7 +57,7 @@ dependencies { api "org.opensaml:opensaml-storage-impl:${versions.opensaml}" api "net.shibboleth.utilities:java-support:8.4.0" api "com.google.code.findbugs:jsr305:3.0.2" - api "org.apache.santuario:xmlsec:2.3.2" + api "org.apache.santuario:xmlsec:2.3.4" api "io.dropwizard.metrics:metrics-core:4.1.4" api ( "org.cryptacular:cryptacular:1.2.5") { exclude group: 'org.bouncycastle' diff --git a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java index fc5f5ba616ab8..7a65a03277faf 100644 --- a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java +++ b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java @@ -294,6 +294,7 @@ public class Constants { "cluster:monitor/ccr/follow_info", "cluster:monitor/ccr/follow_stats", "cluster:monitor/ccr/stats", + "cluster:monitor/data_stream/lifecycle/stats", "cluster:monitor/eql/async/status", "cluster:monitor/fetch/health/info", "cluster:monitor/health", @@ -522,6 +523,7 @@ public class Constants { "indices:data/read/xpack/rollup/get/index/caps", "indices:data/read/xpack/rollup/search", "indices:data/read/xpack/termsenum/list", + "indices:data/write/simulate/bulk", "indices:data/write/bulk", "indices:data/write/bulk[s]", "indices:data/write/bulk_shard_operations[s]", diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleDownsamplingSecurityIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleDownsamplingSecurityIT.java index 7cdc91b83afaf..1c696ffb9dd31 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleDownsamplingSecurityIT.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleDownsamplingSecurityIT.java @@ -244,7 +244,7 @@ private Map collectErrorsFromStoreAsMap() { Map indicesAndErrors = new HashMap<>(); for (DataStreamLifecycleService lifecycleService : lifecycleServices) { DataStreamLifecycleErrorStore errorStore = lifecycleService.getErrorStore(); - List allIndices = errorStore.getAllIndices(); + Set allIndices = errorStore.getAllIndices(); for (var index : allIndices) { ErrorEntry error = errorStore.getError(index); if (error != null) { diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleServiceRuntimeSecurityIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleServiceRuntimeSecurityIT.java index fbb2832461a7c..f5349cac99ed7 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleServiceRuntimeSecurityIT.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleServiceRuntimeSecurityIT.java @@ -48,6 +48,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Set; import java.util.concurrent.ExecutionException; import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.backingIndexEqualTo; @@ -168,7 +169,7 @@ private Map collectErrorsFromStoreAsMap() { Map indicesAndErrors = new HashMap<>(); for (DataStreamLifecycleService lifecycleService : lifecycleServices) { DataStreamLifecycleErrorStore errorStore = lifecycleService.getErrorStore(); - List allIndices = errorStore.getAllIndices(); + Set allIndices = errorStore.getAllIndices(); for (var index : allIndices) { ErrorEntry error = errorStore.getError(index); if (error != null) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/privilege/TransportPutPrivilegesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/privilege/TransportPutPrivilegesAction.java index da28d640c1dae..f13e599ef741c 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/privilege/TransportPutPrivilegesAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/privilege/TransportPutPrivilegesAction.java @@ -7,9 +7,11 @@ package org.elasticsearch.xpack.security.action.privilege; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; @@ -19,6 +21,8 @@ import org.elasticsearch.xpack.security.authz.store.NativePrivilegeStore; import java.util.Collections; +import java.util.List; +import java.util.Map; /** * Transport action to retrieve one or more application privileges from the security index @@ -45,8 +49,23 @@ protected void doExecute(Task task, final PutPrivilegesRequest request, final Ac this.privilegeStore.putPrivileges( request.getPrivileges(), request.getRefreshPolicy(), - ActionListener.wrap(created -> listener.onResponse(new PutPrivilegesResponse(created)), listener::onFailure) + ActionListener.wrap(result -> listener.onResponse(buildResponse(result)), listener::onFailure) ); } } + + private static PutPrivilegesResponse buildResponse(Map> result) { + final Map> createdPrivilegesByApplicationName = Maps.newHashMapWithExpectedSize(result.size()); + result.forEach((appName, privileges) -> { + List createdPrivileges = privileges.entrySet() + .stream() + .filter(e -> e.getValue() == DocWriteResponse.Result.CREATED) + .map(e -> e.getKey()) + .toList(); + if (createdPrivileges.isEmpty() == false) { + createdPrivilegesByApplicationName.put(appName, createdPrivileges); + } + }); + return new PutPrivilegesResponse(createdPrivilegesByApplicationName); + } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java index 1bb638795615a..2df8edcfcb215 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java @@ -20,6 +20,7 @@ import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.bulk.BulkShardRequest; +import org.elasticsearch.action.bulk.SimulateBulkAction; import org.elasticsearch.action.delete.DeleteAction; import org.elasticsearch.action.get.MultiGetAction; import org.elasticsearch.action.index.IndexAction; @@ -251,6 +252,7 @@ static boolean checkSameUserPermissions(String action, TransportRequest request, private static boolean shouldAuthorizeIndexActionNameOnly(String action, TransportRequest request) { switch (action) { case BulkAction.NAME: + case SimulateBulkAction.NAME: case IndexAction.NAME: case DeleteAction.NAME: case INDEX_SUB_REQUEST_PRIMARY: diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStore.java index e2e1cf1511211..004874f5b63b9 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStore.java @@ -370,7 +370,7 @@ protected void cacheFetchedDescriptors( public void putPrivileges( Collection privileges, WriteRequest.RefreshPolicy refreshPolicy, - ActionListener>> listener + ActionListener>> listener ) { if (privileges.isEmpty()) { listener.onResponse(Map.of()); @@ -416,9 +416,9 @@ private IndexRequest preparePutPrivilege(ApplicationPrivilegeDescriptor privileg } } - private void handleBulkResponse(BulkResponse bulkResponse, ActionListener>> listener) { + private void handleBulkResponse(BulkResponse bulkResponse, ActionListener>> listener) { ElasticsearchException failure = null; - final Map> createdPrivilegesByAppName = new HashMap<>(); + final Map> privilegeResultByAppName = new HashMap<>(); for (var item : bulkResponse.getItems()) { if (item.isFailed()) { if (failure == null) { @@ -427,24 +427,22 @@ private void handleBulkResponse(BulkResponse bulkResponse, ActionListener name = nameFromDocId(item.getId()); - final String appName = name.v1(); - final String privilegeName = name.v2(); - - List createdPrivileges = createdPrivilegesByAppName.get(appName); - if (createdPrivileges == null) { - createdPrivileges = new ArrayList<>(); - createdPrivilegesByAppName.put(appName, createdPrivileges); - } - createdPrivileges.add(privilegeName); + final Tuple name = nameFromDocId(item.getId()); + final String appName = name.v1(); + final String privilegeName = name.v2(); + + var privileges = privilegeResultByAppName.get(appName); + if (privileges == null) { + privileges = new HashMap<>(); + privilegeResultByAppName.put(appName, privileges); } + privileges.put(privilegeName, item.getResponse().getResult()); } } if (failure != null) { listener.onFailure(failure); } else { - clearCaches(listener, createdPrivilegesByAppName.keySet(), createdPrivilegesByAppName); + clearCaches(listener, privilegeResultByAppName.keySet(), privilegeResultByAppName); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/TestSecurityClient.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/TestSecurityClient.java index 13c8612487d89..4888c0f4c9721 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/TestSecurityClient.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/TestSecurityClient.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.xcontent.XContentParserUtils; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Tuple; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xcontent.ObjectPath; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -51,6 +52,7 @@ import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.elasticsearch.test.rest.ESRestTestCase.entityAsMap; +import static org.elasticsearch.test.rest.ESRestTestCase.setIgnoredErrorResponseCodes; public class TestSecurityClient { @@ -395,7 +397,7 @@ public TokenInvalidation invalidateTokens(String requestBody) throws IOException final Request request = new Request(HttpDelete.METHOD_NAME, endpoint); // This API returns 404 (with the same body as a 200 response) if there's nothing to delete. // RestClient will throw an exception on 404, but we don't want that, we want to parse the body and return it - request.addParameter("ignore", "404"); + setIgnoredErrorResponseCodes(request, RestStatus.NOT_FOUND); request.setJsonEntity(requestBody); final Map responseBody = entityAsMap(execute(request)); final List> errors = (List>) responseBody.get("error_details"); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java index 0d8c44964b01f..53df6e6157282 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; @@ -46,6 +47,7 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.security.action.privilege.ClearPrivilegesCacheRequest; +import org.elasticsearch.xpack.core.security.action.privilege.ClearPrivilegesCacheResponse; import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilegeDescriptor; import org.elasticsearch.xpack.core.security.test.TestRestrictedIndices; @@ -71,6 +73,7 @@ import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; +import java.util.function.Predicate; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; @@ -79,15 +82,20 @@ import static java.util.Collections.singletonList; import static org.elasticsearch.common.util.set.Sets.newHashSet; import static org.elasticsearch.search.SearchService.ALLOW_EXPENSIVE_QUERIES; +import static org.hamcrest.Matchers.aMapWithSize; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.arrayContaining; -import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.arrayContainingInAnyOrder; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.iterableWithSize; import static org.hamcrest.Matchers.not; import static org.mockito.ArgumentMatchers.any; @@ -173,8 +181,7 @@ public void testGetSinglePrivilegeByName() throws Exception { final PlainActionFuture> future = new PlainActionFuture<>(); store.getPrivileges(List.of("myapp"), List.of("admin"), future); assertThat(requests, iterableWithSize(1)); - assertThat(requests.get(0), instanceOf(SearchRequest.class)); - SearchRequest request = (SearchRequest) requests.get(0); + final SearchRequest request = getLastRequest(SearchRequest.class); final String query = Strings.toString(request.source().query()); assertThat(query, containsString(""" {"terms":{"application":["myapp"]""")); @@ -182,27 +189,7 @@ public void testGetSinglePrivilegeByName() throws Exception { {"term":{"type":{"value":"application-privilege\"""")); final SearchHit[] hits = buildHits(sourcePrivileges); - listener.get() - .onResponse( - new SearchResponse( - new SearchResponseSections( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), - null, - null, - false, - false, - null, - 1 - ), - "_scrollId1", - 1, - 1, - 0, - 1, - null, - null - ) - ); + listener.get().onResponse(buildSearchResponse(hits)); assertResult(sourcePrivileges, future); } @@ -211,27 +198,7 @@ public void testGetMissingPrivilege() throws InterruptedException, ExecutionExce final PlainActionFuture> future = new PlainActionFuture<>(); store.getPrivileges(List.of("myapp"), List.of("admin"), future); final SearchHit[] hits = new SearchHit[0]; - listener.get() - .onResponse( - new SearchResponse( - new SearchResponseSections( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), - null, - null, - false, - false, - null, - 1 - ), - "_scrollId1", - 1, - 1, - 0, - 1, - null, - null - ) - ); + listener.get().onResponse(buildSearchResponse(hits)); final Collection applicationPrivilegeDescriptors = future.get(1, TimeUnit.SECONDS); assertThat(applicationPrivilegeDescriptors, empty()); @@ -247,8 +214,7 @@ public void testGetPrivilegesByApplicationName() throws Exception { final PlainActionFuture> future = new PlainActionFuture<>(); store.getPrivileges(Arrays.asList("myapp", "yourapp"), null, future); assertThat(requests, iterableWithSize(1)); - assertThat(requests.get(0), instanceOf(SearchRequest.class)); - SearchRequest request = (SearchRequest) requests.get(0); + final SearchRequest request = getLastRequest(SearchRequest.class); assertThat(request.indices(), arrayContaining(SecuritySystemIndices.SECURITY_MAIN_ALIAS)); final String query = Strings.toString(request.source().query()); @@ -259,27 +225,7 @@ public void testGetPrivilegesByApplicationName() throws Exception { {"term":{"type":{"value":"application-privilege\"""")); final SearchHit[] hits = buildHits(sourcePrivileges); - listener.get() - .onResponse( - new SearchResponse( - new SearchResponseSections( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), - null, - null, - false, - false, - null, - 1 - ), - "_scrollId1", - 1, - 1, - 0, - 1, - null, - null - ) - ); + listener.get().onResponse(buildSearchResponse(hits)); assertResult(sourcePrivileges, future); } @@ -321,8 +267,7 @@ public void testGetPrivilegesByWildcardApplicationName() throws Exception { final PlainActionFuture> future = new PlainActionFuture<>(); store.getPrivileges(Arrays.asList("myapp-*", "yourapp"), null, future); assertThat(requests, iterableWithSize(1)); - assertThat(requests.get(0), instanceOf(SearchRequest.class)); - SearchRequest request = (SearchRequest) requests.get(0); + final SearchRequest request = getLastRequest(SearchRequest.class); assertThat(request.indices(), arrayContaining(SecuritySystemIndices.SECURITY_MAIN_ALIAS)); final String query = Strings.toString(request.source().query()); @@ -338,27 +283,7 @@ public void testGetPrivilegesByWildcardApplicationName() throws Exception { } final SearchHit[] hits = buildHits(allowExpensiveQueries ? sourcePrivileges.subList(1, 4) : sourcePrivileges); - listener.get() - .onResponse( - new SearchResponse( - new SearchResponseSections( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), - null, - null, - false, - false, - null, - 1 - ), - "_scrollId1", - 1, - 1, - 0, - 1, - null, - null - ) - ); + listener.get().onResponse(buildSearchResponse(hits)); // The first and last privilege should not be retrieved assertResult(sourcePrivileges.subList(1, 4), future); } @@ -367,8 +292,7 @@ public void testGetPrivilegesByStarApplicationName() throws Exception { final PlainActionFuture> future = new PlainActionFuture<>(); store.getPrivileges(Arrays.asList("*", "anything"), null, future); assertThat(requests, iterableWithSize(1)); - assertThat(requests.get(0), instanceOf(SearchRequest.class)); - SearchRequest request = (SearchRequest) requests.get(0); + final SearchRequest request = getLastRequest(SearchRequest.class); assertThat(request.indices(), arrayContaining(SecuritySystemIndices.SECURITY_MAIN_ALIAS)); final String query = Strings.toString(request.source().query()); @@ -376,27 +300,7 @@ public void testGetPrivilegesByStarApplicationName() throws Exception { assertThat(query, containsString("{\"term\":{\"type\":{\"value\":\"application-privilege\"")); final SearchHit[] hits = new SearchHit[0]; - listener.get() - .onResponse( - new SearchResponse( - new SearchResponseSections( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), - null, - null, - false, - false, - null, - 1 - ), - "_scrollId1", - 1, - 1, - 0, - 1, - null, - null - ) - ); + listener.get().onResponse(buildSearchResponse(hits)); } public void testGetAllPrivileges() throws Exception { @@ -409,8 +313,7 @@ public void testGetAllPrivileges() throws Exception { final PlainActionFuture> future = new PlainActionFuture<>(); store.getPrivileges(null, null, future); assertThat(requests, iterableWithSize(1)); - assertThat(requests.get(0), instanceOf(SearchRequest.class)); - SearchRequest request = (SearchRequest) requests.get(0); + final SearchRequest request = getLastRequest(SearchRequest.class); assertThat(request.indices(), arrayContaining(SecuritySystemIndices.SECURITY_MAIN_ALIAS)); final String query = Strings.toString(request.source().query()); @@ -418,27 +321,7 @@ public void testGetAllPrivileges() throws Exception { assertThat(query, not(containsString("{\"terms\""))); final SearchHit[] hits = buildHits(sourcePrivileges); - listener.get() - .onResponse( - new SearchResponse( - new SearchResponseSections( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), - null, - null, - false, - false, - null, - 1 - ), - "_scrollId1", - 1, - 1, - 0, - 1, - null, - null - ) - ); + listener.get().onResponse(buildSearchResponse(hits)); assertResult(sourcePrivileges, future); } @@ -454,27 +337,7 @@ public void testGetPrivilegesCacheByApplicationNames() throws Exception { store.getPrivileges(List.of("myapp", "yourapp"), null, future); final SearchHit[] hits = buildHits(sourcePrivileges); - listener.get() - .onResponse( - new SearchResponse( - new SearchResponseSections( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), - null, - null, - false, - false, - null, - 1 - ), - "_scrollId1", - 1, - 1, - 0, - 1, - null, - null - ) - ); + listener.get().onResponse(buildSearchResponse(hits)); assertEquals(Set.of("myapp"), store.getApplicationNamesCache().get(Set.of("myapp", "yourapp"))); assertEquals(Set.copyOf(sourcePrivileges), store.getDescriptorsCache().get("myapp")); @@ -506,27 +369,7 @@ public void testGetPrivilegesCacheWithApplicationAndPrivilegeName() throws Excep store.getPrivileges(Collections.singletonList("myapp"), singletonList("user"), future); final SearchHit[] hits = buildHits(sourcePrivileges); - listener.get() - .onResponse( - new SearchResponse( - new SearchResponseSections( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), - null, - null, - false, - false, - null, - 1 - ), - "_scrollId1", - 1, - 1, - 0, - 1, - null, - null - ) - ); + listener.get().onResponse(buildSearchResponse(hits)); // Not caching names with no wildcard assertNull(store.getApplicationNamesCache().get(singleton("myapp"))); @@ -545,27 +388,7 @@ public void testGetPrivilegesCacheWithNonExistentApplicationName() throws Except final PlainActionFuture> future = new PlainActionFuture<>(); store.getPrivileges(Collections.singletonList("no-such-app"), null, future); final SearchHit[] hits = buildHits(emptyList()); - listener.get() - .onResponse( - new SearchResponse( - new SearchResponseSections( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), - null, - null, - false, - false, - null, - 1 - ), - "_scrollId1", - 1, - 1, - 0, - 1, - null, - null - ) - ); + listener.get().onResponse(buildSearchResponse(hits)); assertEquals(emptySet(), store.getApplicationNamesCache().get(singleton("no-such-app"))); assertEquals(0, store.getDescriptorsCache().count()); @@ -582,27 +405,7 @@ public void testGetPrivilegesCacheWithDifferentMatchAllApplicationNames() throws final PlainActionFuture> future = new PlainActionFuture<>(); store.getPrivileges(emptyList(), null, future); final SearchHit[] hits = buildHits(emptyList()); - listener.get() - .onResponse( - new SearchResponse( - new SearchResponseSections( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), - null, - null, - false, - false, - null, - 1 - ), - "_scrollId1", - 1, - 1, - 0, - 1, - null, - null - ) - ); + listener.get().onResponse(buildSearchResponse(hits)); assertEquals(emptySet(), store.getApplicationNamesCache().get(singleton("*"))); assertEquals(1, store.getApplicationNamesCache().count()); assertResult(emptyList(), future); @@ -629,6 +432,68 @@ public void testGetPrivilegesCacheWithDifferentMatchAllApplicationNames() throws assertResult(emptyList(), future4); } + public void testCacheIsClearedByApplicationNameWhenPrivilegesAreModified() throws Exception { + final PlainActionFuture> getFuture = new PlainActionFuture<>(); + store.getPrivileges(emptyList(), null, getFuture); + final List sourcePrivileges = List.of( + new ApplicationPrivilegeDescriptor("app1", "priv1a", Set.of("action:1a"), Map.of()), + new ApplicationPrivilegeDescriptor("app1", "priv1b", Set.of("action:1b"), Map.of()), + new ApplicationPrivilegeDescriptor("app2", "priv2a", Set.of("action:2a"), Map.of()), + new ApplicationPrivilegeDescriptor("app2", "priv2b", Set.of("action:2b"), Map.of()) + ); + final SearchHit[] hits = buildHits(sourcePrivileges); + listener.get().onResponse(buildSearchResponse(hits)); + assertEquals(Set.of("app1", "app2"), store.getApplicationNamesCache().get(singleton("*"))); + assertResult(sourcePrivileges, getFuture); + + // add a new privilege to app1 + var priv1c = new ApplicationPrivilegeDescriptor("app1", "priv1c", Set.of("action:1c"), Map.of()); + PlainActionFuture>> putFuture = new PlainActionFuture<>(); + store.putPrivileges(List.of(priv1c), WriteRequest.RefreshPolicy.IMMEDIATE, putFuture); + + handleBulkRequest(1, item -> true); + + assertCacheCleared("app1"); + + Map> putResponse = putFuture.get(); + assertThat(putResponse, aMapWithSize(1)); + assertThat(putResponse, hasKey("app1")); + assertThat(putResponse.get("app1"), aMapWithSize(1)); + assertThat(putResponse.get("app1"), hasEntry("priv1c", DocWriteResponse.Result.CREATED)); + + // modify a privilege in app2 + var priv2a = new ApplicationPrivilegeDescriptor("app2", "priv2a", Set.of("action:2*"), Map.of()); + putFuture = new PlainActionFuture<>(); + store.putPrivileges(List.of(priv2a), WriteRequest.RefreshPolicy.IMMEDIATE, putFuture); + + handleBulkRequest(1, item -> false); + assertCacheCleared("app2"); + + putResponse = putFuture.get(); + assertThat(putResponse, aMapWithSize(1)); + assertThat(putResponse, hasKey("app2")); + assertThat(putResponse.get("app2"), aMapWithSize(1)); + assertThat(putResponse.get("app2"), hasEntry("priv2a", DocWriteResponse.Result.UPDATED)); + + // modify a privilege in app1, add a privilege in app2 + var priv1a = new ApplicationPrivilegeDescriptor("app1", "priv1a", Set.of("action:1*"), Map.of()); + var priv2c = new ApplicationPrivilegeDescriptor("app2", "priv2c", Set.of("action:2c"), Map.of()); + putFuture = new PlainActionFuture<>(); + store.putPrivileges(List.of(priv1a, priv2c), WriteRequest.RefreshPolicy.IMMEDIATE, putFuture); + + handleBulkRequest(2, item -> item.id().contains("app2")); + assertCacheCleared("app1", "app2"); + + putResponse = putFuture.get(); + assertThat(putResponse, aMapWithSize(2)); + assertThat(putResponse, hasKey("app1")); + assertThat(putResponse.get("app1"), aMapWithSize(1)); + assertThat(putResponse.get("app1"), hasEntry("priv1a", DocWriteResponse.Result.UPDATED)); + assertThat(putResponse, hasKey("app2")); + assertThat(putResponse.get("app2"), aMapWithSize(1)); + assertThat(putResponse.get("app2"), hasEntry("priv2c", DocWriteResponse.Result.CREATED)); + } + public void testStaleResultsWillNotBeCached() { final List sourcePrivileges = singletonList( new ApplicationPrivilegeDescriptor("myapp", "admin", newHashSet("action:admin/*", "action:login", "data:read/*"), emptyMap()) @@ -640,27 +505,7 @@ public void testStaleResultsWillNotBeCached() { // Before the results can be cached, invalidate the cache to simulate stale search results store.getDescriptorsAndApplicationNamesCache().invalidateAll(); final SearchHit[] hits = buildHits(sourcePrivileges); - listener.get() - .onResponse( - new SearchResponse( - new SearchResponseSections( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), - null, - null, - false, - false, - null, - 1 - ), - "_scrollId1", - 1, - 1, - 0, - 1, - null, - null - ) - ); + listener.get().onResponse(buildSearchResponse(hits)); // Nothing should be cached since the results are stale assertEquals(0, store.getApplicationNamesCache().count()); @@ -708,27 +553,7 @@ protected void cacheFetchedDescriptors( final PlainActionFuture> future = new PlainActionFuture<>(); store1.getPrivileges(null, null, future); final SearchHit[] hits = buildHits(sourcePrivileges); - listener.get() - .onResponse( - new SearchResponse( - new SearchResponseSections( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), - null, - null, - false, - false, - null, - 1 - ), - "_scrollId1", - 1, - 1, - 0, - 1, - null, - null - ) - ); + listener.get().onResponse(buildSearchResponse(hits)); // Make sure the caching is about to happen getPrivilegeCountDown.await(5, TimeUnit.SECONDS); @@ -750,7 +575,7 @@ public void testPutPrivileges() throws Exception { new ApplicationPrivilegeDescriptor("app2", "all", newHashSet("*"), emptyMap()) ); - final PlainActionFuture>> putPrivilegeFuture = new PlainActionFuture<>(); + final PlainActionFuture>> putPrivilegeFuture = new PlainActionFuture<>(); store.putPrivileges(putPrivileges, WriteRequest.RefreshPolicy.IMMEDIATE, putPrivilegeFuture); assertThat(requests, iterableWithSize(1)); assertThat(requests, everyItem(instanceOf(BulkRequest.class))); @@ -776,7 +601,7 @@ public void testPutPrivileges() throws Exception { final boolean created = privilege.getName().equals("user") == false; responses[i] = BulkItemResponse.success( i, - created ? DocWriteRequest.OpType.CREATE : DocWriteRequest.OpType.UPDATE, + DocWriteRequest.OpType.INDEX, new IndexResponse(new ShardId(SecuritySystemIndices.SECURITY_MAIN_ALIAS, uuid, i), request.id(), 1, 1, 1, created) ); } @@ -789,12 +614,13 @@ public void testPutPrivileges() throws Exception { assertThat(requests.get(0), instanceOf(ClearPrivilegesCacheRequest.class)); listener.get().onResponse(null); - final Map> map = putPrivilegeFuture.actionGet(); + final Map> map = putPrivilegeFuture.actionGet(); assertThat(map.entrySet(), iterableWithSize(2)); - assertThat(map.get("app1"), iterableWithSize(1)); - assertThat(map.get("app2"), iterableWithSize(1)); - assertThat(map.get("app1"), contains("admin")); - assertThat(map.get("app2"), contains("all")); + assertThat(map.get("app1"), aMapWithSize(2)); + assertThat(map.get("app2"), aMapWithSize(1)); + assertThat(map.get("app1"), hasEntry("admin", DocWriteResponse.Result.CREATED)); + assertThat(map.get("app1"), hasEntry("user", DocWriteResponse.Result.UPDATED)); + assertThat(map.get("app2"), hasEntry("all", DocWriteResponse.Result.CREATED)); } public void testRetrieveActionNamePatternsInsteadOfPrivileges() throws Exception { @@ -953,27 +779,7 @@ public void testGetPrivilegesWorkWithoutCache() throws Exception { final PlainActionFuture> future = new PlainActionFuture<>(); store1.getPrivileges(singletonList("myapp"), null, future); final SearchHit[] hits = buildHits(sourcePrivileges); - listener.get() - .onResponse( - new SearchResponse( - new SearchResponseSections( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), - null, - null, - false, - false, - null, - 1 - ), - "_scrollId1", - 1, - 1, - 0, - 1, - null, - null - ) - ); + listener.get().onResponse(buildSearchResponse(hits)); assertResult(sourcePrivileges, future); } @@ -998,6 +804,12 @@ private SecurityIndexManager.State dummyState( ); } + private T getLastRequest(Class requestClass) { + final ActionRequest last = requests.get(requests.size() - 1); + assertThat(last, instanceOf(requestClass)); + return requestClass.cast(last); + } + private SearchHit[] buildHits(List sourcePrivileges) { final SearchHit[] hits = new SearchHit[sourcePrivileges.size()]; for (int i = 0; i < hits.length; i++) { @@ -1008,6 +820,51 @@ private SearchHit[] buildHits(List sourcePrivile return hits; } + private static SearchResponse buildSearchResponse(SearchHit[] hits) { + return new SearchResponse( + new SearchResponseSections( + new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), + null, + null, + false, + false, + null, + 1 + ), + "_scrollId1", + 1, + 1, + 0, + 1, + null, + null + ); + } + + private void handleBulkRequest(int expectedCount, Predicate> isCreated) { + final BulkRequest bulkReq = getLastRequest(BulkRequest.class); + assertThat(bulkReq.requests(), hasSize(expectedCount)); + + final var uuid = UUIDs.randomBase64UUID(random()); + final var items = new BulkItemResponse[expectedCount]; + for (int i = 0; i < expectedCount; i++) { + final DocWriteRequest itemReq = bulkReq.requests().get(i); + items[i] = BulkItemResponse.success( + i, + itemReq.opType(), + new IndexResponse( + new ShardId(SecuritySystemIndices.SECURITY_MAIN_ALIAS, uuid, 0), + itemReq.id(), + 1, + 1, + 1, + isCreated.test(itemReq) + ) + ); + } + listener.get().onResponse(new BulkResponse(items, randomIntBetween(1, 999))); + } + private void assertResult( List sourcePrivileges, PlainActionFuture> future @@ -1017,6 +874,13 @@ private void assertResult( assertThat(new HashSet<>(getPrivileges), equalTo(new HashSet<>(sourcePrivileges))); } + private void assertCacheCleared(String... applicationNames) { + final ClearPrivilegesCacheRequest clearCacheReq = getLastRequest(ClearPrivilegesCacheRequest.class); + assertThat(clearCacheReq.applicationNames(), arrayContainingInAnyOrder(applicationNames)); + assertThat(clearCacheReq.clearRolesCache(), is(true)); + listener.get().onResponse(new ClearPrivilegesCacheResponse(clusterService.getClusterName(), List.of(), List.of())); + } + @SuppressWarnings("unchecked") private static Consumer anyConsumer() { return any(Consumer.class); diff --git a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java index 29c7f1b98f4bf..3832bbf488045 100644 --- a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java +++ b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java @@ -187,7 +187,8 @@ static ShutdownShardMigrationStatus shardMigrationStatus( return new ShutdownShardMigrationStatus( SingleNodeShutdownMetadata.Status.COMPLETE, 0, - "no shard relocation is necessary for a node restart" + "no shard relocation is necessary for a node restart", + null ); } @@ -196,7 +197,8 @@ static ShutdownShardMigrationStatus shardMigrationStatus( return new ShutdownShardMigrationStatus( SingleNodeShutdownMetadata.Status.NOT_STARTED, 0, - "node is not currently part of the cluster" + "node is not currently part of the cluster", + null ); } @@ -242,7 +244,7 @@ static ShutdownShardMigrationStatus shardMigrationStatus( // The node is in `DiscoveryNodes`, but not `RoutingNodes` - so there are no shards assigned to it. We're done. if (currentState.getRoutingNodes().node(nodeId) == null) { // We don't know about that node - return new ShutdownShardMigrationStatus(SingleNodeShutdownMetadata.Status.COMPLETE, 0); + return new ShutdownShardMigrationStatus(SingleNodeShutdownMetadata.Status.COMPLETE, 0, 0, 0); } // Check if there are any shards currently on this node, and if there are any relocating shards @@ -256,12 +258,14 @@ static ShutdownShardMigrationStatus shardMigrationStatus( SingleNodeShutdownMetadata.Status shardStatus = totalRemainingShards == 0 ? SingleNodeShutdownMetadata.Status.COMPLETE : SingleNodeShutdownMetadata.Status.IN_PROGRESS; - return new ShutdownShardMigrationStatus(shardStatus, totalRemainingShards); + return new ShutdownShardMigrationStatus(shardStatus, startedShards, relocatingShards, initializingShards); } else if (initializingShards > 0 && relocatingShards == 0 && startedShards == 0) { // If there's only initializing shards left, return now with a note that only initializing shards are left return new ShutdownShardMigrationStatus( SingleNodeShutdownMetadata.Status.IN_PROGRESS, - totalRemainingShards, + startedShards, + relocatingShards, + initializingShards, "all remaining shards are currently INITIALIZING and must finish before they can be moved off this node" ); } @@ -314,7 +318,8 @@ static ShutdownShardMigrationStatus shardMigrationStatus( 0, "[" + shardsToIgnoreForFinalStatus.get() - + "] shards cannot be moved away from this node but have at least one copy on another node in the cluster" + + "] shards cannot be moved away from this node but have at least one copy on another node in the cluster", + null ); } else if (unmovableShard.isPresent()) { // We found a shard that can't be moved, so shard relocation is stalled. Blame the unmovable shard. @@ -334,7 +339,12 @@ static ShutdownShardMigrationStatus shardMigrationStatus( decision ); } else { - return new ShutdownShardMigrationStatus(SingleNodeShutdownMetadata.Status.IN_PROGRESS, totalRemainingShards); + return new ShutdownShardMigrationStatus( + SingleNodeShutdownMetadata.Status.IN_PROGRESS, + startedShards, + relocatingShards, + initializingShards + ); } } diff --git a/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/GetShutdownStatusResponseTests.java b/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/GetShutdownStatusResponseTests.java index 5c375152bf6c8..de579deafb44b 100644 --- a/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/GetShutdownStatusResponseTests.java +++ b/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/GetShutdownStatusResponseTests.java @@ -68,7 +68,7 @@ public static SingleNodeShutdownMetadata randomNodeShutdownMetadata() { public static SingleNodeShutdownStatus randomNodeShutdownStatus() { return new SingleNodeShutdownStatus( randomNodeShutdownMetadata(), - new ShutdownShardMigrationStatus(randomStatus(), randomNonNegativeLong()), + new ShutdownShardMigrationStatus(randomStatus(), randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()), new ShutdownPersistentTasksStatus(), new ShutdownPluginsStatus(randomBoolean()) ); diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/s3/build.gradle b/x-pack/plugin/snapshot-repo-test-kit/qa/s3/build.gradle index 36b13ef8b12a7..513589cfbfa06 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/s3/build.gradle +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/s3/build.gradle @@ -69,6 +69,12 @@ testClusters.matching { it.name == "javaRestTest" }.configureEach { println "Using an external service to test " + project.name } setting 'xpack.security.enabled', 'false' + + // Additional tracing related to investigation into https://github.com/elastic/elasticsearch/issues/102294 + setting 'logger.org.elasticsearch.repositories.s3', 'TRACE' + setting 'logger.org.elasticsearch.repositories.blobstore.testkit', 'TRACE' + setting 'logger.com.amazonaws.request', 'DEBUG' + setting 'logger.org.apache.http.wire', 'DEBUG' } tasks.register("s3ThirdPartyTest") { diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/s3/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/S3SnapshotRepoTestKitIT.java b/x-pack/plugin/snapshot-repo-test-kit/qa/s3/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/S3SnapshotRepoTestKitIT.java index a9a034eb9efd2..9e40f7b7aada2 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/s3/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/S3SnapshotRepoTestKitIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/s3/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/S3SnapshotRepoTestKitIT.java @@ -28,4 +28,9 @@ protected Settings repositorySettings() { return Settings.builder().put("client", "repo_test_kit").put("bucket", bucket).put("base_path", basePath).build(); } + + @Override + public void testRepositoryAnalysis() throws Exception { + super.testRepositoryAnalysis(); + } } diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeAction.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeAction.java index a86adaef2c1b1..cad66019a3bbb 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeAction.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeAction.java @@ -422,6 +422,7 @@ private boolean setFirstFailure(Exception e) { } private void fail(Exception e) { + logger.trace(() -> Strings.format("repository analysis in [%s] failed", blobPath), e); if (setFirstFailure(e) == false) { if (innerFailures.tryAcquire()) { final Throwable cause = ExceptionsHelper.unwrapCause(e); @@ -732,6 +733,7 @@ public void run() { if (currentValue <= request.getRegisterOperationCount() || otherAnalysisComplete.get() == false) { // complete at least request.getRegisterOperationCount() steps, but we may as well keep running for longer too + logger.trace("[{}] incrementing uncontended register [{}] from [{}]", blobPath, registerName, currentValue); transportService.sendChildRequest( nodes.get(currentValue < nodes.size() ? currentValue : random.nextInt(nodes.size())), UncontendedRegisterAnalyzeAction.NAME, @@ -745,13 +747,14 @@ public void run() { ) ); } else { + logger.trace("[{}] resetting uncontended register [{}] from [{}]", blobPath, registerName, currentValue); transportService.getThreadPool() .executor(ThreadPool.Names.SNAPSHOT) .execute( ActionRunnable.wrap( ActionListener.releaseAfter( ActionListener.wrap( - r -> logger.trace("uncontended register analysis succeeded"), + r -> logger.trace("[{}] uncontended register [{}] analysis succeeded", blobPath, registerName), AsyncAction.this::fail ), requestRefs.acquire() diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/CartesianShapeValues.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/CartesianShapeValues.java index 21b5b7934e42e..1bd7296b2da39 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/CartesianShapeValues.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/CartesianShapeValues.java @@ -67,6 +67,7 @@ public CartesianShapeValue() { super(CoordinateEncoder.CARTESIAN, CartesianPoint::new); } + @SuppressWarnings("this-escape") public CartesianShapeValue(StreamInput in) throws IOException { this(); this.reset(in); diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/GeoShapeValues.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/GeoShapeValues.java index 799ff035da73f..fb32e9e1c4e4f 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/GeoShapeValues.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/GeoShapeValues.java @@ -70,6 +70,7 @@ public GeoShapeValue() { this.tile2DVisitor = new Tile2DVisitor(); } + @SuppressWarnings("this-escape") public GeoShapeValue(StreamInput in) throws IOException { this(); reset(in); diff --git a/x-pack/plugin/stack/src/javaRestTest/java/org/elasticsearch/xpack/stack/EcsDynamicTemplatesIT.java b/x-pack/plugin/stack/src/javaRestTest/java/org/elasticsearch/xpack/stack/EcsDynamicTemplatesIT.java index ea3286e96160c..25cea3b3f6e0a 100644 --- a/x-pack/plugin/stack/src/javaRestTest/java/org/elasticsearch/xpack/stack/EcsDynamicTemplatesIT.java +++ b/x-pack/plugin/stack/src/javaRestTest/java/org/elasticsearch/xpack/stack/EcsDynamicTemplatesIT.java @@ -32,7 +32,6 @@ import java.net.URL; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -77,7 +76,7 @@ private static void prepareEcsDynamicTemplates() throws IOException { "/" + ECS_DYNAMIC_TEMPLATES_FILE, Integer.toString(1), StackTemplateRegistry.TEMPLATE_VERSION_VARIABLE, - Collections.emptyMap() + StackTemplateRegistry.ADDITIONAL_TEMPLATE_VARIABLES ); Map ecsDynamicTemplatesRaw; try ( diff --git a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java index 36da14680c66a..8dc8238b8230b 100644 --- a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java +++ b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java @@ -57,7 +57,7 @@ public class StackTemplateRegistry extends IndexTemplateRegistry { private final FeatureService featureService; private volatile boolean stackTemplateEnabled; - private static final Map ADDITIONAL_TEMPLATE_VARIABLES = Map.of("xpack.stack.template.deprecated", "false"); + public static final Map ADDITIONAL_TEMPLATE_VARIABLES = Map.of("xpack.stack.template.deprecated", "false"); // General mappings conventions for any data that ends up in a data stream public static final String DATA_STREAMS_MAPPINGS_COMPONENT_TEMPLATE_NAME = "data-streams@mappings"; diff --git a/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformRestTestCase.java b/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformRestTestCase.java index e6388bb6fea5d..c616c1c238171 100644 --- a/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformRestTestCase.java +++ b/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformRestTestCase.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.transform.TransformField; @@ -618,7 +619,7 @@ protected static void deleteTransform(String transformId) throws IOException { protected static void deleteTransform(String transformId, boolean ignoreNotFound, boolean deleteDestIndex) throws IOException { Request request = new Request("DELETE", getTransformEndpoint() + transformId); if (ignoreNotFound) { - request.addParameter("ignore", "404"); + setIgnoredErrorResponseCodes(request, RestStatus.NOT_FOUND); } if (deleteDestIndex) { request.addParameter(TransformField.DELETE_DEST_INDEX.getPreferredName(), Boolean.TRUE.toString()); diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformContext.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformContext.java index 9ef9823e10a07..7fdabda6189a9 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformContext.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformContext.java @@ -24,7 +24,7 @@ public interface Listener { void failureCountChanged(); - void fail(String failureMessage, ActionListener listener); + void fail(Throwable exception, String failureMessage, ActionListener listener); } private final AtomicReference taskState; @@ -218,8 +218,8 @@ void shutdown() { taskListener.shutdown(); } - void markAsFailed(String failureMessage) { - taskListener.fail(failureMessage, ActionListener.wrap(r -> { + void markAsFailed(Throwable exception, String failureMessage) { + taskListener.fail(exception, failureMessage, ActionListener.wrap(r -> { // Successfully marked as failed, reset counter so that task can be restarted failureCount.set(0); }, e -> {})); diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandler.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandler.java index 4ac6aff416164..7354e588c7dbd 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandler.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandler.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.script.ScriptException; @@ -51,28 +52,32 @@ class TransformFailureHandler { /** * Handle a search or indexing failure * - * @param e the exception caught + * @param exception the exception caught * @param settingsConfig The settings */ - void handleIndexerFailure(Exception e, SettingsConfig settingsConfig) { + void handleIndexerFailure(Exception exception, SettingsConfig settingsConfig) { // more detailed reporting in the handlers and below - logger.debug(() -> "[" + transformId + "] transform encountered an exception: ", e); - Throwable unwrappedException = ExceptionsHelper.findSearchExceptionRootCause(e); + logger.atDebug().withThrowable(exception).log("[{}] transform encountered an exception", transformId); + Throwable unwrappedException = ExceptionsHelper.findSearchExceptionRootCause(exception); boolean unattended = Boolean.TRUE.equals(settingsConfig.getUnattended()); - if (unwrappedException instanceof CircuitBreakingException circuitBreakingException) { - handleCircuitBreakingException(circuitBreakingException, unattended); - } else if (unwrappedException instanceof ScriptException scriptException) { - handleScriptException(scriptException, unattended); - } else if (unwrappedException instanceof BulkIndexingException bulkIndexingException) { - handleBulkIndexingException(bulkIndexingException, unattended, getNumFailureRetries(settingsConfig)); - } else if (unwrappedException instanceof ClusterBlockException clusterBlockException) { + if (unwrappedException instanceof CircuitBreakingException e) { + handleCircuitBreakingException(e, unattended); + } else if (unwrappedException instanceof ScriptException e) { + handleScriptException(e, unattended); + } else if (unwrappedException instanceof BulkIndexingException e) { + handleBulkIndexingException(e, unattended, getNumFailureRetries(settingsConfig)); + } else if (unwrappedException instanceof ClusterBlockException e) { // gh#89802 always retry for a cluster block exception, because a cluster block should be temporary. - retry(clusterBlockException, clusterBlockException.getDetailedMessage(), unattended, getNumFailureRetries(settingsConfig)); - } else if (unwrappedException instanceof ElasticsearchException elasticsearchException) { - handleElasticsearchException(elasticsearchException, unattended, getNumFailureRetries(settingsConfig)); - } else if (unwrappedException instanceof IllegalArgumentException illegalArgumentException) { - handleIllegalArgumentException(illegalArgumentException, unattended); + retry(e, e.getDetailedMessage(), unattended, getNumFailureRetries(settingsConfig)); + } else if (unwrappedException instanceof SearchPhaseExecutionException e) { + // The reason of a SearchPhaseExecutionException unfortunately contains a full stack trace. + // Instead of displaying that to the user, get the cause's message instead. + retry(e, e.getCause() != null ? e.getCause().getMessage() : null, unattended, getNumFailureRetries(settingsConfig)); + } else if (unwrappedException instanceof ElasticsearchException e) { + handleElasticsearchException(e, unattended, getNumFailureRetries(settingsConfig)); + } else if (unwrappedException instanceof IllegalArgumentException e) { + handleIllegalArgumentException(e, unattended); } else { retry( unwrappedException, @@ -98,6 +103,7 @@ boolean handleStatePersistenceFailure(Exception e, SettingsConfig settingsConfig if (numFailureRetries != -1 && failureCount > numFailureRetries) { fail( + e, "task encountered more than " + numFailureRetries + " failures updating internal state; latest failure: " + e.getMessage() ); return true; @@ -130,7 +136,7 @@ private void handleCircuitBreakingException(CircuitBreakingException circuitBrea if (unattended) { retry(circuitBreakingException, message, true, -1); } else { - fail(message); + fail(circuitBreakingException, message); } } else { String message = TransformMessages.getMessage(TransformMessages.LOG_TRANSFORM_PIVOT_REDUCE_PAGE_SIZE, pageSize, newPageSize); @@ -155,7 +161,7 @@ private void handleScriptException(ScriptException scriptException, boolean unat if (unattended) { retry(scriptException, message, true, -1); } else { - fail(message); + fail(scriptException, message); } } @@ -172,7 +178,7 @@ private void handleBulkIndexingException(BulkIndexingException bulkIndexingExcep TransformMessages.LOG_TRANSFORM_PIVOT_IRRECOVERABLE_BULK_INDEXING_ERROR, bulkIndexingException.getDetailedMessage() ); - fail(message); + fail(bulkIndexingException, message); } else { retry(bulkIndexingException, bulkIndexingException.getDetailedMessage(), unattended, numFailureRetries); } @@ -190,7 +196,7 @@ private void handleBulkIndexingException(BulkIndexingException bulkIndexingExcep private void handleElasticsearchException(ElasticsearchException elasticsearchException, boolean unattended, int numFailureRetries) { if (unattended == false && ExceptionRootCauseFinder.isExceptionIrrecoverable(elasticsearchException)) { String message = "task encountered irrecoverable failure: " + elasticsearchException.getDetailedMessage(); - fail(message); + fail(elasticsearchException, message); } else { retry(elasticsearchException, elasticsearchException.getDetailedMessage(), unattended, numFailureRetries); } @@ -209,7 +215,7 @@ private void handleIllegalArgumentException(IllegalArgumentException illegalArgu retry(illegalArgumentException, illegalArgumentException.getMessage(), true, -1); } else { String message = "task encountered irrecoverable failure: " + illegalArgumentException.getMessage(); - fail(message); + fail(illegalArgumentException, message); } } @@ -226,14 +232,13 @@ private void handleIllegalArgumentException(IllegalArgumentException illegalArgu */ private void retry(Throwable unwrappedException, String message, boolean unattended, int numFailureRetries) { // group failures to decide whether to report it below - final boolean repeatedFailure = context.getLastFailure() == null - ? false - : unwrappedException.getClass().equals(context.getLastFailure().getClass()); + final boolean repeatedFailure = context.getLastFailure() != null + && unwrappedException.getClass().equals(context.getLastFailure().getClass()); final int failureCount = context.incrementAndGetFailureCount(unwrappedException); if (unattended == false && numFailureRetries != -1 && failureCount > numFailureRetries) { - fail("task encountered more than " + numFailureRetries + " failures; latest failure: " + message); + fail(unwrappedException, "task encountered more than " + numFailureRetries + " failures; latest failure: " + message); return; } @@ -248,7 +253,9 @@ private void retry(Throwable unwrappedException, String message, boolean unatten numFailureRetries ); - logger.log(unattended ? Level.INFO : Level.WARN, () -> "[" + transformId + "] " + retryMessage, unwrappedException); + logger.atLevel(unattended ? Level.INFO : Level.WARN) + .withThrowable(unwrappedException) + .log("[{}] {}", transformId, retryMessage); auditor.audit(unattended ? INFO : WARNING, transformId, retryMessage); } } @@ -261,9 +268,9 @@ private void retry(Throwable unwrappedException, String message, boolean unatten * * @param failureMessage the reason of the failure */ - private void fail(String failureMessage) { + private void fail(Throwable exception, String failureMessage) { // note: logging and audit is done as part of context.markAsFailed - context.markAsFailed(failureMessage); + context.markAsFailed(exception, failureMessage); } /** diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutor.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutor.java index 6a8a8c8548491..ac690c625124f 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutor.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutor.java @@ -229,7 +229,7 @@ protected void nodeOperation(AllocatedPersistentTask task, @Nullable TransformTa // TODO: do not use the same error message as for loading the last checkpoint String msg = TransformMessages.getMessage(TransformMessages.FAILED_TO_LOAD_TRANSFORM_CHECKPOINT, transformId); logger.error(msg, error); - markAsFailed(buildTask, msg); + markAsFailed(buildTask, error, msg); }); // <5> load last checkpoint @@ -243,7 +243,7 @@ protected void nodeOperation(AllocatedPersistentTask task, @Nullable TransformTa }, error -> { String msg = TransformMessages.getMessage(TransformMessages.FAILED_TO_LOAD_TRANSFORM_CHECKPOINT, transformId); logger.error(msg, error); - markAsFailed(buildTask, msg); + markAsFailed(buildTask, error, msg); }); // <4> Set the previous stats (if they exist), initialize the indexer, start the task (If it is STOPPED) @@ -288,7 +288,7 @@ protected void nodeOperation(AllocatedPersistentTask task, @Nullable TransformTa if (error instanceof ResourceNotFoundException == false) { String msg = TransformMessages.getMessage(TransformMessages.FAILED_TO_LOAD_TRANSFORM_STATE, transformId); logger.error(msg, error); - markAsFailed(buildTask, msg); + markAsFailed(buildTask, error, msg); } else { logger.trace("[{}] No stats found (new transform), starting the task", transformId); startTask(buildTask, indexerBuilder, null, null, startTaskListener); @@ -309,7 +309,7 @@ protected void nodeOperation(AllocatedPersistentTask task, @Nullable TransformTa TransformDeprecations.MIN_TRANSFORM_VERSION ); auditor.error(transformId, transformTooOldError); - markAsFailed(buildTask, transformTooOldError); + markAsFailed(buildTask, null, transformTooOldError); return; } @@ -321,6 +321,7 @@ protected void nodeOperation(AllocatedPersistentTask task, @Nullable TransformTa auditor.error(transformId, validationException.getMessage()); markAsFailed( buildTask, + validationException, TransformMessages.getMessage( TransformMessages.TRANSFORM_CONFIGURATION_INVALID, transformId, @@ -330,8 +331,7 @@ protected void nodeOperation(AllocatedPersistentTask task, @Nullable TransformTa } }, error -> { String msg = TransformMessages.getMessage(TransformMessages.FAILED_TO_LOAD_TRANSFORM_CONFIGURATION, transformId); - logger.error(msg, error); - markAsFailed(buildTask, msg); + markAsFailed(buildTask, error, msg); }); // <2> Get the transform config @@ -340,8 +340,7 @@ protected void nodeOperation(AllocatedPersistentTask task, @Nullable TransformTa error -> { Throwable cause = ExceptionsHelper.unwrapCause(error); String msg = "Failed to create internal index mappings"; - logger.error(msg, cause); - markAsFailed(buildTask, msg + "[" + cause + "]"); + markAsFailed(buildTask, error, msg + "[" + cause + "]"); } ); @@ -368,10 +367,11 @@ private static IndexerState currentIndexerState(TransformState previousState) { }; } - private static void markAsFailed(TransformTask task, String reason) { + private static void markAsFailed(TransformTask task, Throwable exception, String reason) { CountDownLatch latch = new CountDownLatch(1); task.fail( + exception, reason, new LatchedActionListener<>( ActionListener.wrap( diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformTask.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformTask.java index 753d61410d5a8..6ab7e7764b187 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformTask.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformTask.java @@ -480,7 +480,7 @@ public void failureCountChanged() { } @Override - public void fail(String reason, ActionListener listener) { + public void fail(Throwable exception, String reason, ActionListener listener) { synchronized (context) { // If we are already flagged as failed, this probably means that a second trigger started firing while we were attempting to // flag the previously triggered indexer as failed. Exit early as we are already flagged as failed. @@ -505,7 +505,7 @@ public void fail(String reason, ActionListener listener) { return; } - logger.error("[{}] transform has failed; experienced: [{}].", transform.getId(), reason); + logger.atError().withThrowable(exception).log("[{}] transform has failed; experienced: [{}].", transform.getId(), reason); auditor.error(transform.getId(), reason); // We should not keep retrying. Either the task will be stopped, or started // If it is started again, it is registered again. @@ -517,7 +517,7 @@ public void fail(String reason, ActionListener listener) { // The end user should see that the task is in a failed state, and attempt to stop it again but with force=true context.setTaskStateToFailed(reason); TransformState newState = getState(); - // Even though the indexer information is persisted to an index, we still need TransformTaskState in the clusterstate + // Even though the indexer information is persisted to an index, we still need TransformTaskState in the cluster state // This keeps track of STARTED, FAILED, STOPPED // This is because a FAILED state could occur because we failed to read the config from the internal index, which would imply // that diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandlerTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandlerTests.java index 0218f5ae86226..84c8d4e140408 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandlerTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandlerTests.java @@ -50,7 +50,7 @@ public void failureCountChanged() { } @Override - public void fail(String failureMessage, ActionListener listener) { + public void fail(Throwable exception, String failureMessage, ActionListener listener) { failed = true; } diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureHandlingTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureHandlingTests.java index f59aaab33f0f1..d3be18a193415 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureHandlingTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureHandlingTests.java @@ -1068,7 +1068,7 @@ public void shutdown() {} public void failureCountChanged() {} @Override - public void fail(String message, ActionListener listener) { + public void fail(Throwable exception, String message, ActionListener listener) { assertTrue(failIndexerCalled.compareAndSet(false, true)); assertTrue(failureMessage.compareAndSet(null, message)); } diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureOnStatePersistenceTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureOnStatePersistenceTests.java index 33ced92a8fa19..dba954994f9a3 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureOnStatePersistenceTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureOnStatePersistenceTests.java @@ -197,7 +197,7 @@ public void shutdown() {} public void failureCountChanged() {} @Override - public void fail(String failureMessage, ActionListener listener) { + public void fail(Throwable exception, String failureMessage, ActionListener listener) { state.set(TransformTaskState.FAILED); } }; @@ -415,7 +415,7 @@ public void shutdown() {} public void failureCountChanged() {} @Override - public void fail(String failureMessage, ActionListener listener) { + public void fail(Throwable exception, String failureMessage, ActionListener listener) { state.set(TransformTaskState.FAILED); } }; diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformTaskTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformTaskTests.java index 277553cd9f4ec..cda258c6daa81 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformTaskTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformTaskTests.java @@ -222,7 +222,7 @@ public void testStopOnFailedTaskWithoutIndexer() { transformTask.init(mock(PersistentTasksService.class), taskManager, "task-id", 42); AtomicBoolean listenerCalled = new AtomicBoolean(false); - transformTask.fail("because", ActionTestUtils.assertNoFailureListener(r -> { listenerCalled.compareAndSet(false, true); })); + transformTask.fail(null, "because", ActionTestUtils.assertNoFailureListener(r -> { listenerCalled.compareAndSet(false, true); })); TransformState state = transformTask.getState(); assertEquals(TransformTaskState.FAILED, state.getTaskState()); diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryActionConditionTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryActionConditionTests.java index 1a6982e62b002..b82622fbd4819 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryActionConditionTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryActionConditionTests.java @@ -34,6 +34,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; import static org.elasticsearch.index.query.QueryBuilders.termQuery; @@ -158,7 +159,6 @@ public void testActionConditionWithHardFailures() throws Exception { } @SuppressWarnings("unchecked") - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/102070") public void testActionConditionWithFailures() throws Exception { final String id = "testActionConditionWithFailures"; final ExecutableCondition[] actionConditionsWithFailure = new ExecutableCondition[] { @@ -172,6 +172,7 @@ public void testActionConditionWithFailures() throws Exception { putAndTriggerWatch(id, input, actionConditionsWithFailure); assertWatchWithMinimumActionsCount(id, ExecutionState.EXECUTED, 1); + AtomicReference searchHitReference = new AtomicReference<>(); // only one action should have failed via condition assertBusy(() -> { // Watcher history is now written asynchronously, so we check this in an assertBusy @@ -179,38 +180,34 @@ public void testActionConditionWithFailures() throws Exception { final SearchResponse response = searchHistory(SearchSourceBuilder.searchSource().query(termQuery("watch_id", id))); try { assertThat(response.getHits().getTotalHits().value, is(oneOf(1L, 2L))); + searchHitReference.set(response.getHits().getAt(0)); } finally { response.decRef(); } }); - final SearchResponse response = searchHistory(SearchSourceBuilder.searchSource().query(termQuery("watch_id", id))); - try { - final SearchHit hit = response.getHits().getAt(0); - final List actions = getActionsFromHit(hit.getSourceAsMap()); + final SearchHit hit = searchHitReference.get(); + final List actions = getActionsFromHit(hit.getSourceAsMap()); - for (int i = 0; i < actionConditionsWithFailure.length; ++i) { - final Map action = (Map) actions.get(i); - final Map condition = (Map) action.get("condition"); - final Map logging = (Map) action.get("logging"); + for (int i = 0; i < actionConditionsWithFailure.length; ++i) { + final Map action = (Map) actions.get(i); + final Map condition = (Map) action.get("condition"); + final Map logging = (Map) action.get("logging"); - assertThat(action.get("id"), is("action" + i)); - assertThat(condition.get("type"), is(actionConditionsWithFailure[i].type())); + assertThat(action.get("id"), is("action" + i)); + assertThat(condition.get("type"), is(actionConditionsWithFailure[i].type())); - if (i == failedIndex) { - assertThat(action.get("status"), is("condition_failed")); - assertThat(condition.get("met"), is(false)); - assertThat(action.get("reason"), is("condition not met. skipping")); - assertThat(logging, nullValue()); - } else { - assertThat(action.get("status"), is("success")); - assertThat(condition.get("met"), is(true)); - assertThat(action.get("reason"), nullValue()); - assertThat(logging.get("logged_text"), is(Integer.toString(i))); - } + if (i == failedIndex) { + assertThat(action.get("status"), is("condition_failed")); + assertThat(condition.get("met"), is(false)); + assertThat(action.get("reason"), is("condition not met. skipping")); + assertThat(logging, nullValue()); + } else { + assertThat(action.get("status"), is("success")); + assertThat(condition.get("met"), is(true)); + assertThat(action.get("reason"), nullValue()); + assertThat(logging.get("logged_text"), is(Integer.toString(i))); } - } finally { - response.decRef(); } } @@ -235,6 +232,7 @@ public void testActionCondition() throws Exception { assertWatchWithMinimumActionsCount(id, ExecutionState.EXECUTED, 1); + AtomicReference searchHitReference = new AtomicReference<>(); // all actions should be successful assertBusy(() -> { // Watcher history is now written asynchronously, so we check this in an assertBusy @@ -242,30 +240,26 @@ public void testActionCondition() throws Exception { final SearchResponse response = searchHistory(SearchSourceBuilder.searchSource().query(termQuery("watch_id", id))); try { assertThat(response.getHits().getTotalHits().value, is(oneOf(1L, 2L))); + searchHitReference.set(response.getHits().getAt(0)); } finally { response.decRef(); } }); - final SearchResponse response = searchHistory(SearchSourceBuilder.searchSource().query(termQuery("watch_id", id))); - try { - final SearchHit hit = response.getHits().getAt(0); - final List actions = getActionsFromHit(hit.getSourceAsMap()); + final SearchHit hit = searchHitReference.get(); + final List actions = getActionsFromHit(hit.getSourceAsMap()); - for (int i = 0; i < actionConditions.size(); ++i) { - final Map action = (Map) actions.get(i); - final Map condition = (Map) action.get("condition"); - final Map logging = (Map) action.get("logging"); + for (int i = 0; i < actionConditions.size(); ++i) { + final Map action = (Map) actions.get(i); + final Map condition = (Map) action.get("condition"); + final Map logging = (Map) action.get("logging"); - assertThat(action.get("id"), is("action" + i)); - assertThat(action.get("status"), is("success")); - assertThat(condition.get("type"), is(actionConditions.get(i).type())); - assertThat(condition.get("met"), is(true)); - assertThat(action.get("reason"), nullValue()); - assertThat(logging.get("logged_text"), is(Integer.toString(i))); - } - } finally { - response.decRef(); + assertThat(action.get("id"), is("action" + i)); + assertThat(action.get("status"), is("success")); + assertThat(condition.get("type"), is(actionConditions.get(i).type())); + assertThat(condition.get("met"), is(true)); + assertThat(action.get("reason"), nullValue()); + assertThat(logging.get("logged_text"), is(Integer.toString(i))); } } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/search/ExecutableSearchInput.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/search/ExecutableSearchInput.java index eeb43c52a1e20..6dbcef08481d1 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/search/ExecutableSearchInput.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/search/ExecutableSearchInput.java @@ -18,7 +18,6 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.script.Script; -import org.elasticsearch.search.SearchHit; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; @@ -92,9 +91,6 @@ SearchInput.Result doExecute(WatchExecutionContext ctx, WatcherSearchTemplateReq if (logger.isDebugEnabled()) { logger.debug("[{}] found [{}] hits", ctx.id(), response.getHits().getTotalHits().value); - for (SearchHit hit : response.getHits()) { - logger.debug("[{}] hit [{}]", ctx.id(), hit.getSourceAsMap()); - } } final Payload payload; diff --git a/x-pack/qa/rolling-upgrade-basic/src/test/java/org/elasticsearch/upgrades/BasicLicenseUpgradeIT.java b/x-pack/qa/rolling-upgrade-basic/src/test/java/org/elasticsearch/upgrades/BasicLicenseUpgradeIT.java index 75fcc5cf6e7ad..da8a4c806a0f5 100644 --- a/x-pack/qa/rolling-upgrade-basic/src/test/java/org/elasticsearch/upgrades/BasicLicenseUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade-basic/src/test/java/org/elasticsearch/upgrades/BasicLicenseUpgradeIT.java @@ -8,6 +8,7 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; +import org.elasticsearch.rest.RestStatus; import java.util.Map; @@ -28,7 +29,7 @@ private void checkBasicLicense() throws Exception { final Request request = new Request("GET", "/_license"); // This avoids throwing a ResponseException when the license is not ready yet // allowing to retry the check using assertBusy - request.addParameter("ignore", "404"); + setIgnoredErrorResponseCodes(request, RestStatus.NOT_FOUND); Response licenseResponse = client().performRequest(request); assertOK(licenseResponse); Map licenseResponseMap = entityAsMap(licenseResponse); @@ -42,7 +43,7 @@ private void checkNonExpiringBasicLicense() throws Exception { final Request request = new Request("GET", "/_license"); // This avoids throwing a ResponseException when the license is not ready yet // allowing to retry the check using assertBusy - request.addParameter("ignore", "404"); + setIgnoredErrorResponseCodes(request, RestStatus.NOT_FOUND); Response licenseResponse = client().performRequest(request); assertOK(licenseResponse); Map licenseResponseMap = entityAsMap(licenseResponse); diff --git a/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java b/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java index b1e1888aba75d..0b7ab1fe5980d 100644 --- a/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java @@ -88,7 +88,6 @@ public void testUniDirectionalIndexFollowing() throws Exception { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/102000") public void testAutoFollowing() throws Exception { String leaderIndex1 = "logs-20200101"; String leaderIndex2 = "logs-20200102"; @@ -372,7 +371,8 @@ private static void assertTotalHitCount(final String index, final int expectedTo private static void verifyTotalHitCount(final String index, final int expectedTotalHits, final RestClient client) throws IOException { final Request request = new Request("GET", "/" + index + "/_search"); request.addParameter(TOTAL_HITS_AS_INT_PARAM, "true"); - Map response = toMap(client.performRequest(request)); + setIgnoredErrorResponseCodes(request, RestStatus.NOT_FOUND); // trip the assertOK (i.e. retry an assertBusy) rather than throwing + Map response = toMap(assertOK(client.performRequest(request))); final int totalHits = (int) XContentMapValues.extractValue("hits.total", response); assertThat(totalHits, equalTo(expectedTotalHits)); }