diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index b5981d5ef40f3..3283e691f121c 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -48,7 +48,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["7.17.19", "8.12.2", "8.13.0", "8.14.0"] + BWC_VERSION: ["7.17.19", "8.12.3", "8.13.0", "8.14.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index bb2b4748b36ef..5e7c1a0960789 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -1873,6 +1873,22 @@ steps: env: BWC_VERSION: 8.12.2 + - label: "{{matrix.image}} / 8.12.3 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.12.3 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.12.3 + - label: "{{matrix.image}} / 8.13.0 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.13.0 timeout_in_minutes: 300 diff --git a/.buildkite/pipelines/periodic.template.yml b/.buildkite/pipelines/periodic.template.yml index a92e190be7963..42e922462c7ac 100644 --- a/.buildkite/pipelines/periodic.template.yml +++ b/.buildkite/pipelines/periodic.template.yml @@ -49,7 +49,6 @@ steps: - checkPart1 - checkPart2 - checkPart3 - - bwcTestSnapshots - checkRestCompat agents: provider: gcp @@ -59,6 +58,22 @@ steps: env: ES_RUNTIME_JAVA: "{{matrix.ES_RUNTIME_JAVA}}" GRADLE_TASK: "{{matrix.GRADLE_TASK}}" + - label: "{{matrix.ES_RUNTIME_JAVA}} / {{matrix.BWC_VERSION}} / java-fips-matrix-bwc" + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dtests.fips.enabled=true v$$BWC_VERSION#bwcTest + timeout_in_minutes: 300 + matrix: + setup: + ES_RUNTIME_JAVA: + - openjdk17 + BWC_VERSION: $BWC_LIST + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n1-standard-32 + buildDirectory: /dev/shm/bk + env: + ES_RUNTIME_JAVA: "{{matrix.ES_RUNTIME_JAVA}}" + BWC_VERSION: "{{matrix.BWC_VERSION}}" - group: java-matrix steps: - label: "{{matrix.ES_RUNTIME_JAVA}} / {{matrix.GRADLE_TASK}} / java-matrix" @@ -69,9 +84,6 @@ steps: ES_RUNTIME_JAVA: - graalvm-ce17 - openjdk17 - - openjdk18 - - openjdk19 - - openjdk20 - openjdk21 - openjdk22 GRADLE_TASK: @@ -88,6 +100,25 @@ steps: env: ES_RUNTIME_JAVA: "{{matrix.ES_RUNTIME_JAVA}}" GRADLE_TASK: "{{matrix.GRADLE_TASK}}" + - label: "{{matrix.ES_RUNTIME_JAVA}} / {{matrix.BWC_VERSION}} / java-matrix-bwc" + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v$$BWC_VERSION#bwcTest + timeout_in_minutes: 300 + matrix: + setup: + ES_RUNTIME_JAVA: + - graalvm-ce17 + - openjdk17 + - openjdk21 + - openjdk22 + BWC_VERSION: $BWC_LIST + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n1-standard-32 + buildDirectory: /dev/shm/bk + env: + ES_RUNTIME_JAVA: "{{matrix.ES_RUNTIME_JAVA}}" + BWC_VERSION: "{{matrix.BWC_VERSION}}" - label: release-tests command: .buildkite/scripts/release-tests.sh timeout_in_minutes: 360 diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 141975568d353..80f38dc79eecc 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -1152,6 +1152,16 @@ steps: buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.12.2 + - label: 8.12.3 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.12.3#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n1-standard-32 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.12.3 - label: 8.13.0 / bwc command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.13.0#bwcTest timeout_in_minutes: 300 @@ -1220,7 +1230,6 @@ steps: - checkPart1 - checkPart2 - checkPart3 - - bwcTestSnapshots - checkRestCompat agents: provider: gcp @@ -1230,6 +1239,22 @@ steps: env: ES_RUNTIME_JAVA: "{{matrix.ES_RUNTIME_JAVA}}" GRADLE_TASK: "{{matrix.GRADLE_TASK}}" + - label: "{{matrix.ES_RUNTIME_JAVA}} / {{matrix.BWC_VERSION}} / java-fips-matrix-bwc" + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dtests.fips.enabled=true v$$BWC_VERSION#bwcTest + timeout_in_minutes: 300 + matrix: + setup: + ES_RUNTIME_JAVA: + - openjdk17 + BWC_VERSION: ["7.17.19", "8.12.3", "8.13.0", "8.14.0"] + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n1-standard-32 + buildDirectory: /dev/shm/bk + env: + ES_RUNTIME_JAVA: "{{matrix.ES_RUNTIME_JAVA}}" + BWC_VERSION: "{{matrix.BWC_VERSION}}" - group: java-matrix steps: - label: "{{matrix.ES_RUNTIME_JAVA}} / {{matrix.GRADLE_TASK}} / java-matrix" @@ -1240,9 +1265,6 @@ steps: ES_RUNTIME_JAVA: - graalvm-ce17 - openjdk17 - - openjdk18 - - openjdk19 - - openjdk20 - openjdk21 - openjdk22 GRADLE_TASK: @@ -1259,6 +1281,25 @@ steps: env: ES_RUNTIME_JAVA: "{{matrix.ES_RUNTIME_JAVA}}" GRADLE_TASK: "{{matrix.GRADLE_TASK}}" + - label: "{{matrix.ES_RUNTIME_JAVA}} / {{matrix.BWC_VERSION}} / java-matrix-bwc" + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v$$BWC_VERSION#bwcTest + timeout_in_minutes: 300 + matrix: + setup: + ES_RUNTIME_JAVA: + - graalvm-ce17 + - openjdk17 + - openjdk21 + - openjdk22 + BWC_VERSION: ["7.17.19", "8.12.3", "8.13.0", "8.14.0"] + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n1-standard-32 + buildDirectory: /dev/shm/bk + env: + ES_RUNTIME_JAVA: "{{matrix.ES_RUNTIME_JAVA}}" + BWC_VERSION: "{{matrix.BWC_VERSION}}" - label: release-tests command: .buildkite/scripts/release-tests.sh timeout_in_minutes: 360 diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 4c6349a86b800..8b454fa92ab02 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -114,5 +114,6 @@ BWC_VERSION: - "8.12.0" - "8.12.1" - "8.12.2" + - "8.12.3" - "8.13.0" - "8.14.0" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index 96c111fd46948..d85a432684495 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,5 +1,5 @@ BWC_VERSION: - "7.17.19" - - "8.12.2" + - "8.12.3" - "8.13.0" - "8.14.0" diff --git a/README.asciidoc b/README.asciidoc index a8b3704887e5b..dc27735d3c015 100644 --- a/README.asciidoc +++ b/README.asciidoc @@ -1,20 +1,24 @@ = Elasticsearch -Elasticsearch is a distributed, RESTful search engine optimized for speed and relevance on production-scale workloads. You can use Elasticsearch to perform real-time search over massive datasets for applications including: +Elasticsearch is a distributed search and analytics engine optimized for speed and relevance on production-scale workloads. Elasticsearch is the foundation of Elastic's open Stack platform. Search in near real-time over massive datasets, perform vector searches, integrate with generative AI applications, and much more. -* Vector search +Use cases enabled by Elasticsearch include: + +* https://www.elastic.co/search-labs/blog/articles/retrieval-augmented-generation-rag[Retrieval Augmented Generation (RAG)] +* https://www.elastic.co/search-labs/blog/categories/vector-search[Vector search] * Full-text search * Logs * Metrics * Application performance monitoring (APM) * Security logs - \... and more! To learn more about Elasticsearch's features and capabilities, see our https://www.elastic.co/products/elasticsearch[product page]. +To access information on https://www.elastic.co/search-labs/blog/categories/ml-research[machine learning innovations] and the latest https://www.elastic.co/search-labs/blog/categories/lucene[Lucene contributions from Elastic], more information can be found in https://www.elastic.co/search-labs[Search Labs]. + [[get-started]] == Get started diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/JdkDownloadPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/JdkDownloadPluginFuncTest.groovy index 2640a5a43e167..67a04ebc5b7a0 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/JdkDownloadPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/JdkDownloadPluginFuncTest.groovy @@ -99,17 +99,17 @@ class JdkDownloadPluginFuncTest extends AbstractGradleFuncTest { given: def mockRepoUrl = urlPath(jdkVendor, jdkVersion, platform) def mockedContent = filebytes(jdkVendor, platform) - 3.times { - settingsFile << """ - include ':sub-$it' - """ - } buildFile.text = """ plugins { id 'elasticsearch.jdk-download' apply false } subprojects { + + } + """ + 3.times { + subProject(':sub-' + it) << """ apply plugin: 'elasticsearch.jdk-download' jdks { @@ -126,8 +126,8 @@ class JdkDownloadPluginFuncTest extends AbstractGradleFuncTest { println "JDK HOME: " + jdks.myJdk } } - } - """ + """ + } when: def result = WiremockFixture.withWireMock(mockRepoUrl, mockedContent) { server -> @@ -165,7 +165,7 @@ class JdkDownloadPluginFuncTest extends AbstractGradleFuncTest { architecture = "x64" } } - + tasks.register("getJdk", PrintJdk) { dependsOn jdks.myJdk jdkPath = jdks.myJdk.getPath() @@ -174,7 +174,7 @@ class JdkDownloadPluginFuncTest extends AbstractGradleFuncTest { class PrintJdk extends DefaultTask { @Input String jdkPath - + @TaskAction void print() { println "JDK HOME: " + jdkPath } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java index f0604ab33ceec..e0588ed440c57 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java @@ -32,7 +32,9 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; +import java.util.Optional; import javax.inject.Inject; @@ -142,6 +144,8 @@ public void execute(BuildFinishedFlowAction.Parameters parameters) throws FileNo System.out.println("Generating buildscan link for artifact..."); + // Output should be in the format: "\n" + // and multiple artifacts could be returned Process process = new ProcessBuilder( "buildkite-agent", "artifact", @@ -150,7 +154,7 @@ public void execute(BuildFinishedFlowAction.Parameters parameters) throws FileNo "--step", System.getenv("BUILDKITE_JOB_ID"), "--format", - "%i" + "%i %c" ).start(); process.waitFor(); String processOutput; @@ -159,7 +163,17 @@ public void execute(BuildFinishedFlowAction.Parameters parameters) throws FileNo } catch (IOException e) { processOutput = ""; } - String artifactUuid = processOutput.trim(); + + // Sort them by timestamp, and grab the most recent one + Optional artifact = Arrays.stream(processOutput.trim().split("\n")).map(String::trim).min((a, b) -> { + String[] partsA = a.split(" "); + String[] partsB = b.split(" "); + // ISO-8601 timestamps can be sorted lexicographically + return partsB[1].compareTo(partsA[1]); + }); + + // Grab just the UUID from the artifact + String artifactUuid = artifact.orElse("").split(" ")[0]; System.out.println("Artifact UUID: " + artifactUuid); if (artifactUuid.isEmpty() == false) { diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index d1d0da4b1c262..0883097e75aad 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -1,5 +1,5 @@ elasticsearch = 8.14.0 -lucene = 9.9.2 +lucene = 9.10.0 bundled_jdk_vendor = openjdk bundled_jdk = 21.0.2+13@f2283984656d49d69e91c558476027ac @@ -13,7 +13,7 @@ supercsv = 2.4.0 log4j = 2.19.0 slf4j = 2.0.6 ecsLogging = 1.2.0 -jna = 5.10.0 +jna = 5.12.1 netty = 4.1.107.Final commons_lang3 = 3.9 google_oauth_client = 1.34.1 diff --git a/build-tools/src/integTest/groovy/org/elasticsearch/gradle/DistributionDownloadPluginFuncTest.groovy b/build-tools/src/integTest/groovy/org/elasticsearch/gradle/DistributionDownloadPluginFuncTest.groovy index 228223897ede9..3a06bdf917ff6 100644 --- a/build-tools/src/integTest/groovy/org/elasticsearch/gradle/DistributionDownloadPluginFuncTest.groovy +++ b/build-tools/src/integTest/groovy/org/elasticsearch/gradle/DistributionDownloadPluginFuncTest.groovy @@ -70,25 +70,14 @@ class DistributionDownloadPluginFuncTest extends AbstractGradleFuncTest { def version = VersionProperties.getElasticsearch() def platform = ElasticsearchDistribution.Platform.LINUX - 3.times { - settingsFile << """ - include ':sub-$it' - """ - } buildFile.text = """ - import org.elasticsearch.gradle.Architecture - plugins { id 'elasticsearch.distribution-download' } - - subprojects { - apply plugin: 'elasticsearch.distribution-download' - - ${setupTestDistro(version, platform)} - ${setupDistroTask()} - } """ + 3.times { + subProject(':sub-' + it) << applyPluginAndSetupDistro(version, platform) + } when: def runner = gradleRunner('setupDistro', '-i', '-g', gradleUserHome) @@ -118,14 +107,6 @@ class DistributionDownloadPluginFuncTest extends AbstractGradleFuncTest { id 'elasticsearch.distribution-download' } - ${setupTestDistro(version, platform)} - ${setupDistroTask()} - - """ - } - - private static String setupTestDistro(String version, ElasticsearchDistribution.Platform platform) { - return """ elasticsearch_distributions { test_distro { version = "$version" @@ -134,11 +115,7 @@ class DistributionDownloadPluginFuncTest extends AbstractGradleFuncTest { architecture = Architecture.current(); } } - """ - } - private static String setupDistroTask() { - return """ tasks.register("setupDistro", Sync) { from(elasticsearch_distributions.test_distro) into("build/distro") diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java index bf539efaf3c30..54962ac241f75 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java @@ -433,7 +433,7 @@ private void commonNodeConfig() { if (node.getTestDistribution().equals(TestDistribution.INTEG_TEST)) { node.defaultConfig.put("xpack.security.enabled", "false"); } else { - if (node.getVersion().onOrAfter("7.16.0")) { + if (hasDeprecationIndexing(node)) { node.defaultConfig.put("cluster.deprecation_indexing.enabled", "false"); } } @@ -474,13 +474,17 @@ public void nextNodeToNextVersion() { commonNodeConfig(); nodeIndex += 1; if (node.getTestDistribution().equals(TestDistribution.DEFAULT)) { - if (node.getVersion().onOrAfter("7.16.0")) { + if (hasDeprecationIndexing(node)) { node.setting("cluster.deprecation_indexing.enabled", "false"); } } node.start(); } + private static boolean hasDeprecationIndexing(ElasticsearchNode node) { + return node.getVersion().onOrAfter("7.16.0") && node.getSettingKeys().contains("stateless.enabled") == false; + } + @Override public void extraConfigFile(String destination, File from) { nodes.all(node -> node.extraConfigFile(destination, from)); diff --git a/build.gradle b/build.gradle index c0b613beefea4..0cb4133f2ed6e 100644 --- a/build.gradle +++ b/build.gradle @@ -64,6 +64,17 @@ ext.testArtifact = { p, String name = "test" -> }; } +class StepExpansion { + String templatePath + List versions + String variable +} + +class ListExpansion { + List versions + String variable +} + tasks.register("updateCIBwcVersions") { def writeVersions = { File file, List versions -> file.text = "" @@ -73,42 +84,60 @@ tasks.register("updateCIBwcVersions") { } } - def writeBuildkiteList = { String outputFilePath, String pipelineTemplatePath, List versions -> + def writeBuildkitePipeline = { String outputFilePath, String pipelineTemplatePath, List listExpansions, List stepExpansions = [] -> def outputFile = file(outputFilePath) def pipelineTemplate = file(pipelineTemplatePath) - def listString = "[" + versions.collect { "\"${it}\"" }.join(", ") + "]" - outputFile.text = "# This file is auto-generated. See ${pipelineTemplatePath}\n" + pipelineTemplate.text.replaceAll('\\$BWC_LIST', listString) - } + def pipeline = pipelineTemplate.text - def writeBuildkiteSteps = { String outputFilePath, String pipelineTemplatePath, String stepTemplatePath, List versions -> - def outputFile = file(outputFilePath) - def pipelineTemplate = file(pipelineTemplatePath) - def stepTemplate = file(stepTemplatePath) + listExpansions.each { expansion -> + def listString = "[" + expansion.versions.collect { "\"${it}\"" }.join(", ") + "]" + pipeline = pipeline.replaceAll('\\$' + expansion.variable, listString) + } - def steps = "" - versions.each { - steps += "\n" + stepTemplate.text.replaceAll('\\$BWC_VERSION', it.toString()) + stepExpansions.each { expansion -> + def steps = "" + expansion.versions.each { + steps += "\n" + file(expansion.templatePath).text.replaceAll('\\$BWC_VERSION', it.toString()) + } + pipeline = pipeline.replaceAll(' *\\$' + expansion.variable, steps) } - outputFile.text = "# This file is auto-generated. See ${pipelineTemplatePath}\n" + pipelineTemplate.text.replaceAll(' *\\$BWC_STEPS', steps) + outputFile.text = "# This file is auto-generated. See ${pipelineTemplatePath}\n" + pipeline + } + + // Writes a Buildkite pipelime from a template, and replaces $BWC_LIST with an array of versions + // Useful for writing a list of versions in a matrix configuration + def expandBwcList = { String outputFilePath, String pipelineTemplatePath, List versions -> + writeBuildkitePipeline(outputFilePath, pipelineTemplatePath, [new ListExpansion(versions: versions, variable: "BWC_LIST")]) + } + + // Writes a Buildkite pipeline from a template, and replaces $BWC_STEPS with a list of steps, one for each version + // Useful when you need to configure more versions than are allowed in a matrix configuration + def expandBwcSteps = { String outputFilePath, String pipelineTemplatePath, String stepTemplatePath, List versions -> + writeBuildkitePipeline(outputFilePath, pipelineTemplatePath, [], [new StepExpansion(templatePath: stepTemplatePath, versions: versions, variable: "BWC_STEPS")]) } doLast { writeVersions(file(".ci/bwcVersions"), BuildParams.bwcVersions.allIndexCompatible) writeVersions(file(".ci/snapshotBwcVersions"), BuildParams.bwcVersions.unreleasedIndexCompatible) - writeBuildkiteList( + expandBwcList( ".buildkite/pipelines/intake.yml", ".buildkite/pipelines/intake.template.yml", BuildParams.bwcVersions.unreleasedIndexCompatible ) - writeBuildkiteSteps( + writeBuildkitePipeline( ".buildkite/pipelines/periodic.yml", ".buildkite/pipelines/periodic.template.yml", - ".buildkite/pipelines/periodic.bwc.template.yml", - BuildParams.bwcVersions.allIndexCompatible + [ + new ListExpansion(versions: BuildParams.bwcVersions.unreleasedIndexCompatible, variable: "BWC_LIST"), + ], + [ + new StepExpansion(templatePath: ".buildkite/pipelines/periodic.bwc.template.yml", versions: BuildParams.bwcVersions.allIndexCompatible, variable: "BWC_STEPS"), + ] ) - writeBuildkiteSteps( + + expandBwcSteps( ".buildkite/pipelines/periodic-packaging.yml", ".buildkite/pipelines/periodic-packaging.template.yml", ".buildkite/pipelines/periodic-packaging.bwc.template.yml", diff --git a/distribution/archives/build.gradle b/distribution/archives/build.gradle index dcd9fbf733088..0508f29ef595a 100644 --- a/distribution/archives/build.gradle +++ b/distribution/archives/build.gradle @@ -11,7 +11,7 @@ import java.nio.file.Path apply plugin: 'elasticsearch.internal-distribution-archive-setup' -CopySpec archiveFiles(CopySpec modulesFiles, String distributionType, String platform, String architecture, boolean isTestDistro) { +CopySpec archiveFiles(String distributionType, String os, String architecture, boolean isTestDistro) { return copySpec { into("elasticsearch-${version}") { into('lib') { @@ -29,9 +29,9 @@ CopySpec archiveFiles(CopySpec modulesFiles, String distributionType, String pla into('bin') { with binFiles(distributionType, isTestDistro) } - into("darwin".equals(platform) ? 'jdk.app' : 'jdk') { + into("darwin".equals(os) ? 'jdk.app' : 'jdk') { if (isTestDistro == false) { - with jdkFiles(project, platform, architecture) + with jdkFiles(project, os, architecture) } } into('') { @@ -56,7 +56,11 @@ CopySpec archiveFiles(CopySpec modulesFiles, String distributionType, String pla with noticeFile(isTestDistro) into('modules') { - with modulesFiles + if (isTestDistro) { + with integTestModulesFiles + } else { + with modulesFiles(os, architecture) + } } } } @@ -65,42 +69,42 @@ CopySpec archiveFiles(CopySpec modulesFiles, String distributionType, String pla distribution_archives { integTestZip { content { - archiveFiles(integTestModulesFiles, 'zip', null, 'x64', true) + archiveFiles('zip', null, null, true) } } windowsZip { archiveClassifier = 'windows-x86_64' content { - archiveFiles(modulesFiles('windows-x86_64'), 'zip', 'windows', 'x64', false) + archiveFiles('zip', 'windows', 'x64', false) } } darwinTar { archiveClassifier = 'darwin-x86_64' content { - archiveFiles(modulesFiles('darwin-x86_64'), 'tar', 'darwin', 'x64', false) + archiveFiles('tar', 'darwin', 'x64', false) } } darwinAarch64Tar { archiveClassifier = 'darwin-aarch64' content { - archiveFiles(modulesFiles('darwin-aarch64'), 'tar', 'darwin', 'aarch64', false) + archiveFiles('tar', 'darwin', 'aarch64', false) } } linuxAarch64Tar { archiveClassifier = 'linux-aarch64' content { - archiveFiles(modulesFiles('linux-aarch64'), 'tar', 'linux', 'aarch64', false) + archiveFiles('tar', 'linux', 'aarch64', false) } } linuxTar { archiveClassifier = 'linux-x86_64' content { - archiveFiles(modulesFiles('linux-x86_64'), 'tar', 'linux', 'x64', false) + archiveFiles('tar', 'linux', 'x64', false) } } } diff --git a/distribution/build.gradle b/distribution/build.gradle index e45f1d09625d6..c8cc60b6facf6 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -332,10 +332,10 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { } } - modulesFiles = { platform -> + modulesFiles = { os, architecture -> copySpec { eachFile { - if (it.relativePath.segments[-2] == 'bin' || ((platform == 'darwin-x86_64' || platform == 'darwin-aarch64') && it.relativePath.segments[-2] == 'MacOS')) { + if (it.relativePath.segments[-2] == 'bin' || (os == 'darwin' && it.relativePath.segments[-2] == 'MacOS')) { // bin files, wherever they are within modules (eg platform specific) should be executable // and MacOS is an alternative to bin on macOS it.mode = 0755 @@ -344,7 +344,12 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { } } List excludePlatforms = ['linux-x86_64', 'linux-aarch64', 'windows-x86_64', 'darwin-x86_64', 'darwin-aarch64'] - if (platform != null) { + if (os != null) { + String platform = os + '-' + architecture + if (architecture == 'x64') { + // ML platform dir uses the x86_64 nomenclature + platform = os + '-x86_64' + } excludePlatforms.remove(excludePlatforms.indexOf(platform)) } else { excludePlatforms = [] @@ -430,15 +435,15 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { } } - jdkFiles = { Project project, String platform, String architecture -> + jdkFiles = { Project project, String os, String architecture -> return copySpec { - from project.jdks."bundled_${platform}_${architecture}" + from project.jdks."bundled_${os}_${architecture}" exclude "demo/**" /* * The Contents/MacOS directory interferes with notarization, and is unused by our distribution, so we exclude * it from the build. */ - if ("darwin".equals(platform)) { + if ("darwin".equals(os)) { exclude "Contents/MacOS" } eachFile { FileCopyDetails details -> diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index 1d0f77bd35970..1983736e4ee9e 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -143,7 +143,7 @@ def commonPackageConfig(String type, String architecture) { with libFiles } into('modules') { - with modulesFiles('linux-' + ((architecture == 'x64') ? 'x86_64' : architecture)) + with modulesFiles('linux', architecture) } into('jdk') { with jdkFiles(project, 'linux', architecture) diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/InstallPluginActionTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/InstallPluginActionTests.java index c088e89338e74..3dc7af07d4d83 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/InstallPluginActionTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/InstallPluginActionTests.java @@ -101,13 +101,13 @@ import java.util.zip.ZipOutputStream; import static org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase.forEachFileRecursively; -import static org.elasticsearch.test.hamcrest.RegexMatcher.matches; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.hasToString; +import static org.hamcrest.Matchers.matchesRegex; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.startsWith; @@ -1286,7 +1286,7 @@ public void testInvalidShaFileMismatchFilename() throws Exception { ) ); assertEquals(ExitCodes.IO_ERROR, e.exitCode); - assertThat(e, hasToString(matches("checksum file at \\[.*\\] is not for this plugin"))); + assertThat(e, hasToString(matchesRegex(".*checksum file at \\[.*\\] is not for this plugin.*"))); } public void testInvalidShaFileContainingExtraLine() throws Exception { diff --git a/docs-mdx/painless/painless-field-context.mdx b/docs-mdx/painless/painless-field-context.mdx new file mode 100644 index 0000000000000..8e3c38938b5b8 --- /dev/null +++ b/docs-mdx/painless/painless-field-context.mdx @@ -0,0 +1,136 @@ +--- +id: enElasticsearchPainlessPainlessFieldContext +slug: /en/elasticsearch/painless/painless-field-context +title: Field context +description: Description to be written +tags: [] +--- + +
+ +Use a Painless script to create a +[script field](((ref))/search-fields.html#script-fields) to return +a customized value for each document in the results of a query. + +**Variables** + +`params` (`Map`, read-only) + : User-defined parameters passed in as part of the query. + +`doc` (`Map`, read-only) + : Contains the fields of the specified document where each field is a + `List` of values. + +[`params['_source']`](((ref))/mapping-source-field.html) (`Map`, read-only) + : Contains extracted JSON in a `Map` and `List` structure for the fields + existing in a stored document. + +**Return** + +`Object` + : The customized value for each document. + +**API** + +Both the standard Painless API and +Specialized Field API are available. + +**Example** + +To run this example, first follow the steps in +context examples. + +You can then use these two example scripts to compute custom information +for each search hit and output it to two new fields. + +The first script gets the doc value for the `datetime` field and calls +the `getDayOfWeekEnum` function to determine the corresponding day of the week. + +```Painless +doc['datetime'].value.getDayOfWeekEnum().getDisplayName(TextStyle.FULL, Locale.ROOT) +``` + +The second script calculates the number of actors. Actors' names are stored +as a keyword array in the `actors` field. + +```Painless +doc['actors'].size() [^1] +``` +[^1]: By default, doc values are not available for `text` fields. If `actors` was +a `text` field, you could still calculate the number of actors by extracting +values from `_source` with `params['_source']['actors'].size()`. + +The following request returns the calculated day of week and the number of +actors that appear in each play: + +```console +GET seats/_search +{ + "size": 2, + "query": { + "match_all": {} + }, + "script_fields": { + "day-of-week": { + "script": { + "source": "doc['datetime'].value.getDayOfWeekEnum().getDisplayName(TextStyle.FULL, Locale.ROOT)" + } + }, + "number-of-actors": { + "script": { + "source": "doc['actors'].size()" + } + } + } +} +``` +{/* TEST[setup:seats] */} + +```console-result +{ + "took" : 68, + "timed_out" : false, + "_shards" : { + "total" : 1, + "successful" : 1, + "skipped" : 0, + "failed" : 0 + }, + "hits" : { + "total" : { + "value" : 11, + "relation" : "eq" + }, + "max_score" : 1.0, + "hits" : [ + { + "_index" : "seats", + "_id" : "1", + "_score" : 1.0, + "fields" : { + "day-of-week" : [ + "Thursday" + ], + "number-of-actors" : [ + 4 + ] + } + }, + { + "_index" : "seats", + "_id" : "2", + "_score" : 1.0, + "fields" : { + "day-of-week" : [ + "Thursday" + ], + "number-of-actors" : [ + 1 + ] + } + } + ] + } +} +``` +{/* TESTRESPONSE[s/"took" : 68/"took" : "$body.took"/] */} \ No newline at end of file diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index 86862769c70e3..6e4ffa8885fbf 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -1,8 +1,8 @@ include::{docs-root}/shared/versions/stack/{source_branch}.asciidoc[] -:lucene_version: 9.9.2 -:lucene_version_path: 9_9_2 +:lucene_version: 9.10.0 +:lucene_version_path: 9_10_0 :jdk: 11.0.2 :jdk_major: 11 :build_type: tar diff --git a/docs/changelog/103542.yaml b/docs/changelog/103542.yaml new file mode 100644 index 0000000000000..74e713eb2f606 --- /dev/null +++ b/docs/changelog/103542.yaml @@ -0,0 +1,7 @@ +pr: 103542 +summary: Flatten object mappings when subobjects is false +area: Mapping +type: feature +issues: + - 99860 + - 103497 diff --git a/docs/changelog/104026.yaml b/docs/changelog/104026.yaml deleted file mode 100644 index d9aa704de1dbd..0000000000000 --- a/docs/changelog/104026.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104026 -summary: Include user's privileges actions in IdP plugin `_has_privileges` request -area: IdentityProvider -type: enhancement -issues: [] diff --git a/docs/changelog/104830.yaml b/docs/changelog/104830.yaml new file mode 100644 index 0000000000000..c056f3d618b75 --- /dev/null +++ b/docs/changelog/104830.yaml @@ -0,0 +1,5 @@ +pr: 104830 +summary: All new `shard_seed` parameter for `random_sampler` agg +area: Aggregations +type: enhancement +issues: [] diff --git a/docs/changelog/104870.yaml b/docs/changelog/104870.yaml deleted file mode 100644 index 65bc9a964eb3e..0000000000000 --- a/docs/changelog/104870.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 104870 -summary: Make `_reset` action stop transforms without force first -area: Transform -type: bug -issues: - - 100596 - - 104825 diff --git a/docs/changelog/105024.yaml b/docs/changelog/105024.yaml deleted file mode 100644 index 96268b78ddf5d..0000000000000 --- a/docs/changelog/105024.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105024 -summary: "[Connectors API] Fix bug with crawler configuration parsing and `sync_now`\ - \ flag" -area: Application -type: bug -issues: [] diff --git a/docs/changelog/105048.yaml b/docs/changelog/105048.yaml deleted file mode 100644 index d865f447a0a93..0000000000000 --- a/docs/changelog/105048.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105048 -summary: "ES|QL: Fix exception handling on `date_parse` with wrong date pattern" -area: ES|QL -type: bug -issues: - - 104124 diff --git a/docs/changelog/105061.yaml b/docs/changelog/105061.yaml deleted file mode 100644 index ae8a36183e0e7..0000000000000 --- a/docs/changelog/105061.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105061 -summary: "ESQL: Push CIDR_MATCH to Lucene if possible" -area: ES|QL -type: bug -issues: - - 105042 diff --git a/docs/changelog/105066.yaml b/docs/changelog/105066.yaml deleted file mode 100644 index 95757a9edaf81..0000000000000 --- a/docs/changelog/105066.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105066 -summary: Fix handling of `ml.config_version` node attribute for nodes with machine learning disabled -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/105067.yaml b/docs/changelog/105067.yaml new file mode 100644 index 0000000000000..562e8271f5502 --- /dev/null +++ b/docs/changelog/105067.yaml @@ -0,0 +1,5 @@ +pr: 105067 +summary: "ESQL: Use faster field caps" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/105070.yaml b/docs/changelog/105070.yaml deleted file mode 100644 index ff4c115e21eea..0000000000000 --- a/docs/changelog/105070.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105070 -summary: Validate settings before reloading JWT shared secret -area: Authentication -type: bug -issues: [] diff --git a/docs/changelog/105096.yaml b/docs/changelog/105096.yaml deleted file mode 100644 index bfc72a6277bb1..0000000000000 --- a/docs/changelog/105096.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105096 -summary: Harden index mapping parameter check in enrich runner -area: Ingest Node -type: bug -issues: [] diff --git a/docs/changelog/105153.yaml b/docs/changelog/105153.yaml deleted file mode 100644 index 6c6b1f995df4b..0000000000000 --- a/docs/changelog/105153.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105153 -summary: Field-caps should read fields from up-to-dated shards -area: "Search" -type: bug -issues: - - 104809 diff --git a/docs/changelog/105183.yaml b/docs/changelog/105183.yaml deleted file mode 100644 index 04ec159cf02d0..0000000000000 --- a/docs/changelog/105183.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 105183 -summary: Fix handling surrogate pairs in the XLM Roberta tokenizer -area: Machine Learning -type: bug -issues: - - 104626 - - 104981 diff --git a/docs/changelog/105213.yaml b/docs/changelog/105213.yaml deleted file mode 100644 index 40595a8166ef2..0000000000000 --- a/docs/changelog/105213.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105213 -summary: Inference service should reject tasks during shutdown -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/105228.yaml b/docs/changelog/105228.yaml deleted file mode 100644 index 7526a3caa81d9..0000000000000 --- a/docs/changelog/105228.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105228 -summary: Downsampling better handle if source index isn't allocated and fix bug in - retrieving last processed tsid -area: Downsampling -type: bug -issues: [] diff --git a/docs/changelog/105234.yaml b/docs/changelog/105234.yaml deleted file mode 100644 index eac54b948d4f6..0000000000000 --- a/docs/changelog/105234.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105234 -summary: Do not log warning when triggering an `ABORTING` transform -area: Transform -type: bug -issues: - - 105233 diff --git a/docs/changelog/105245.yaml b/docs/changelog/105245.yaml deleted file mode 100644 index f6093f2c7435e..0000000000000 --- a/docs/changelog/105245.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105245 -summary: Finalize all snapshots completed by shard snapshot updates -area: Snapshot/Restore -type: bug -issues: - - 104939 diff --git a/docs/changelog/105258.yaml b/docs/changelog/105258.yaml deleted file mode 100644 index e31e6ec0de749..0000000000000 --- a/docs/changelog/105258.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105258 -summary: Close `currentChunkedWrite` on client cancel -area: Network -type: bug -issues: [] diff --git a/docs/changelog/105293.yaml b/docs/changelog/105293.yaml deleted file mode 100644 index 33eb3884a7e53..0000000000000 --- a/docs/changelog/105293.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105293 -summary: Fix leaked HTTP response sent after close -area: Network -type: bug -issues: - - 104651 diff --git a/docs/changelog/105306.yaml b/docs/changelog/105306.yaml deleted file mode 100644 index 7b75c370901ab..0000000000000 --- a/docs/changelog/105306.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105306 -summary: Fix race in HTTP response shutdown handling -area: Network -type: bug -issues: [] diff --git a/docs/changelog/105315.yaml b/docs/changelog/105315.yaml deleted file mode 100644 index 207e72467a689..0000000000000 --- a/docs/changelog/105315.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105315 -summary: Always show `composed_of` field for composable index templates -area: Indices APIs -type: bug -issues: - - 104627 diff --git a/docs/changelog/105365.yaml b/docs/changelog/105365.yaml deleted file mode 100644 index 265e6dccc3915..0000000000000 --- a/docs/changelog/105365.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105365 -summary: Fix bug in `rule_query` where `text_expansion` errored because it was not - rewritten -area: Application -type: bug -issues: [] diff --git a/docs/changelog/105421.yaml b/docs/changelog/105421.yaml new file mode 100644 index 0000000000000..2ff9ef008c803 --- /dev/null +++ b/docs/changelog/105421.yaml @@ -0,0 +1,5 @@ +pr: 105421 +summary: "ESQL: Add timers to many status results" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/105442.yaml b/docs/changelog/105442.yaml new file mode 100644 index 0000000000000..b0af1b634d984 --- /dev/null +++ b/docs/changelog/105442.yaml @@ -0,0 +1,6 @@ +pr: 105442 +summary: Handling exceptions on watcher reload +area: Watcher +type: bug +issues: + - 69842 diff --git a/docs/changelog/105578.yaml b/docs/changelog/105578.yaml new file mode 100644 index 0000000000000..1ffa0128c1d0a --- /dev/null +++ b/docs/changelog/105578.yaml @@ -0,0 +1,13 @@ +pr: 105578 +summary: Upgrade to Lucene 9.10.0 +area: Search +type: enhancement +issues: [] +highlight: + title: New Lucene 9.10 release + body: |- + - https://github.com/apache/lucene/pull/13090: Prevent humongous allocations in ScalarQuantizer when building quantiles. + - https://github.com/apache/lucene/pull/12962: Speedup concurrent multi-segment HNSW graph search + - https://github.com/apache/lucene/pull/13033: Range queries on numeric/date/ip fields now exit earlier on segments whose values don't intersect with the query range. This should especially help when there are other required clauses in the `bool` query and when the range filter is narrow, e.g. filtering on the last 5 minutes. + - https://github.com/apache/lucene/pull/13026: `bool` queries that mix `filter` and `should` clauses will now propagate minimum competitive scores through the `should` clauses. This should yield speedups when sorting by descending score. + notable: true diff --git a/docs/changelog/105593.yaml b/docs/changelog/105593.yaml new file mode 100644 index 0000000000000..4eef0d9404f42 --- /dev/null +++ b/docs/changelog/105593.yaml @@ -0,0 +1,5 @@ +pr: 105593 +summary: "ESQL: push down \"[text_field] is not null\"" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/105650.yaml b/docs/changelog/105650.yaml new file mode 100644 index 0000000000000..f43da5b315f4c --- /dev/null +++ b/docs/changelog/105650.yaml @@ -0,0 +1,6 @@ +pr: 105650 +summary: "ESQL: Fix wrong attribute shadowing in pushdown rules" +area: ES|QL +type: bug +issues: + - 105434 diff --git a/docs/changelog/105660.yaml b/docs/changelog/105660.yaml new file mode 100644 index 0000000000000..1b30a25417906 --- /dev/null +++ b/docs/changelog/105660.yaml @@ -0,0 +1,5 @@ +pr: 105660 +summary: "Text structure endpoints to determine the structure of a list of messages and of an indexed field" +area: Machine Learning +type: feature +issues: [] diff --git a/docs/changelog/105674.yaml b/docs/changelog/105674.yaml new file mode 100644 index 0000000000000..7b8d04f4687a3 --- /dev/null +++ b/docs/changelog/105674.yaml @@ -0,0 +1,6 @@ +pr: 105674 +summary: Health monitor concurrency fixes +area: Health +type: bug +issues: + - 105065 diff --git a/docs/changelog/105689.yaml b/docs/changelog/105689.yaml new file mode 100644 index 0000000000000..e76281f1b2fc7 --- /dev/null +++ b/docs/changelog/105689.yaml @@ -0,0 +1,6 @@ +pr: 105689 +summary: Fix `uri_parts` processor behaviour for missing extensions +area: Ingest Node +type: bug +issues: + - 105612 diff --git a/docs/changelog/105691.yaml b/docs/changelog/105691.yaml new file mode 100644 index 0000000000000..89797782b06ee --- /dev/null +++ b/docs/changelog/105691.yaml @@ -0,0 +1,5 @@ +pr: 105691 +summary: "ES|QL: Disable optimizations that rely on Expression.nullable()" +area: ES|QL +type: bug +issues: [] diff --git a/docs/changelog/105717.yaml b/docs/changelog/105717.yaml new file mode 100644 index 0000000000000..c75bc4fe65798 --- /dev/null +++ b/docs/changelog/105717.yaml @@ -0,0 +1,5 @@ +pr: 105717 +summary: Upgrade jna to 5.12.1 +area: Infra/Core +type: upgrade +issues: [] diff --git a/docs/changelog/105757.yaml b/docs/changelog/105757.yaml new file mode 100644 index 0000000000000..f11aed2b2d96b --- /dev/null +++ b/docs/changelog/105757.yaml @@ -0,0 +1,5 @@ +pr: 105757 +summary: Add pluggable `BuildVersion` in `NodeMetadata` +area: Infra/Core +type: enhancement +issues: [] diff --git a/docs/changelog/105768.yaml b/docs/changelog/105768.yaml new file mode 100644 index 0000000000000..49d7f1f15c453 --- /dev/null +++ b/docs/changelog/105768.yaml @@ -0,0 +1,5 @@ +pr: 105768 +summary: Add two new OGC functions ST_X and ST_Y +area: "ES|QL" +type: enhancement +issues: [] diff --git a/docs/changelog/105770.yaml b/docs/changelog/105770.yaml new file mode 100644 index 0000000000000..ec8ae4f380e2f --- /dev/null +++ b/docs/changelog/105770.yaml @@ -0,0 +1,5 @@ +pr: 105770 +summary: Field-caps field has value lookup use map instead of looping array +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/105772.yaml b/docs/changelog/105772.yaml new file mode 100644 index 0000000000000..73680aa04e5ab --- /dev/null +++ b/docs/changelog/105772.yaml @@ -0,0 +1,5 @@ +pr: 105772 +summary: "[ILM] Delete step deletes data stream with only one index" +area: ILM+SLM +type: bug +issues: [] diff --git a/docs/changelog/105779.yaml b/docs/changelog/105779.yaml new file mode 100644 index 0000000000000..3699ca0e2f246 --- /dev/null +++ b/docs/changelog/105779.yaml @@ -0,0 +1,5 @@ +pr: 105779 +summary: "[Profiling] Speed up serialization of flamegraph" +area: Application +type: enhancement +issues: [] diff --git a/docs/changelog/105781.yaml b/docs/changelog/105781.yaml new file mode 100644 index 0000000000000..c3ae7f0035904 --- /dev/null +++ b/docs/changelog/105781.yaml @@ -0,0 +1,5 @@ +pr: 105781 +summary: CCS with `minimize_roundtrips` performs incremental merges of each `SearchResponse` +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/105789.yaml b/docs/changelog/105789.yaml new file mode 100644 index 0000000000000..02a6936fa3294 --- /dev/null +++ b/docs/changelog/105789.yaml @@ -0,0 +1,6 @@ +pr: 105789 +summary: Make Health API more resilient to multi-version clusters +area: Health +type: bug +issues: + - 90183 diff --git a/docs/changelog/105791.yaml b/docs/changelog/105791.yaml new file mode 100644 index 0000000000000..f18b5e6b8fdd7 --- /dev/null +++ b/docs/changelog/105791.yaml @@ -0,0 +1,5 @@ +pr: 105791 +summary: "Bugfix: Disable eager loading `BitSetFilterCache` on Indexing Nodes" +area: Search +type: bug +issues: [] diff --git a/docs/changelog/105797.yaml b/docs/changelog/105797.yaml new file mode 100644 index 0000000000000..7c832e2e5e63c --- /dev/null +++ b/docs/changelog/105797.yaml @@ -0,0 +1,5 @@ +pr: 105797 +summary: Enable retrying on 500 error response from Cohere text embedding API +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/105847.yaml b/docs/changelog/105847.yaml new file mode 100644 index 0000000000000..a731395bc9a81 --- /dev/null +++ b/docs/changelog/105847.yaml @@ -0,0 +1,5 @@ +pr: 105847 +summary: (API+) CAT Nodes alias for shard header to match CAT Allocation +area: Stats +type: enhancement +issues: [] diff --git a/docs/changelog/105848.yaml b/docs/changelog/105848.yaml new file mode 100644 index 0000000000000..18291066177f6 --- /dev/null +++ b/docs/changelog/105848.yaml @@ -0,0 +1,5 @@ +pr: 105848 +summary: '`ProjectOperator` should not retain references to released blocks' +area: ES|QL +type: bug +issues: [] diff --git a/docs/changelog/105893.yaml b/docs/changelog/105893.yaml new file mode 100644 index 0000000000000..c88736f5dda3d --- /dev/null +++ b/docs/changelog/105893.yaml @@ -0,0 +1,5 @@ +pr: 105893 +summary: Specialize serialization for `ArrayVectors` +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/105894.yaml b/docs/changelog/105894.yaml new file mode 100644 index 0000000000000..a1a99eaa6259b --- /dev/null +++ b/docs/changelog/105894.yaml @@ -0,0 +1,5 @@ +pr: 105894 +summary: Add allocation stats +area: Allocation +type: enhancement +issues: [] diff --git a/docs/changelog/105945.yaml b/docs/changelog/105945.yaml new file mode 100644 index 0000000000000..ec76faf6ef76f --- /dev/null +++ b/docs/changelog/105945.yaml @@ -0,0 +1,5 @@ +pr: 105945 +summary: "[Connector API] Fix default ordering in `SyncJob` list endpoint" +area: Application +type: bug +issues: [] diff --git a/docs/changelog/105985.yaml b/docs/changelog/105985.yaml new file mode 100644 index 0000000000000..2f2a8c1394070 --- /dev/null +++ b/docs/changelog/105985.yaml @@ -0,0 +1,5 @@ +pr: 105985 +summary: Wait forever for `IndexTemplateRegistry` asset installation +area: Indices APIs +type: enhancement +issues: [] diff --git a/docs/changelog/105987.yaml b/docs/changelog/105987.yaml new file mode 100644 index 0000000000000..d09a6907c72bf --- /dev/null +++ b/docs/changelog/105987.yaml @@ -0,0 +1,6 @@ +pr: 105987 +summary: Fix `categorize_text` aggregation nested under empty buckets +area: Machine Learning +type: bug +issues: + - 105836 diff --git a/docs/changelog/105994.yaml b/docs/changelog/105994.yaml new file mode 100644 index 0000000000000..ef9889d0a47af --- /dev/null +++ b/docs/changelog/105994.yaml @@ -0,0 +1,5 @@ +pr: 105994 +summary: Fix bug when nested knn pre-filter might match nested docs +area: Vector Search +type: bug +issues: [] diff --git a/docs/changelog/106020.yaml b/docs/changelog/106020.yaml new file mode 100644 index 0000000000000..094a43b430f89 --- /dev/null +++ b/docs/changelog/106020.yaml @@ -0,0 +1,5 @@ +pr: 106020 +summary: Fix resetting a job if the original reset task no longer exists. +area: Machine Learning +type: bug +issues: [] diff --git a/docs/changelog/106031.yaml b/docs/changelog/106031.yaml new file mode 100644 index 0000000000000..d0a0303e74164 --- /dev/null +++ b/docs/changelog/106031.yaml @@ -0,0 +1,13 @@ +pr: 106031 +summary: Deprecate allowing `fields` in scenarios where it is ignored +area: Mapping +type: deprecation +issues: [] +deprecation: + title: Deprecate allowing `fields` in scenarios where it is ignored + area: Mapping + details: The following mapped types have always ignored `fields` when using multi-fields. + This deprecation makes this clearer and we will completely disallow `fields` for + these mapped types in the future. + impact: "In the future, `join`, `aggregate_metric_double`, and `constant_keyword`,\ + \ will all disallow supplying `fields` as a parameter in the mapping." diff --git a/docs/changelog/106036.yaml b/docs/changelog/106036.yaml new file mode 100644 index 0000000000000..7b129c6c0a7a3 --- /dev/null +++ b/docs/changelog/106036.yaml @@ -0,0 +1,5 @@ +pr: 106036 +summary: Add status for enrich operator +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/106053.yaml b/docs/changelog/106053.yaml new file mode 100644 index 0000000000000..72cfe0207795d --- /dev/null +++ b/docs/changelog/106053.yaml @@ -0,0 +1,5 @@ +pr: 106053 +summary: Speed up serialization of `BytesRefArray` +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/106057.yaml b/docs/changelog/106057.yaml new file mode 100644 index 0000000000000..c07f658fbbf8a --- /dev/null +++ b/docs/changelog/106057.yaml @@ -0,0 +1,5 @@ +pr: 106057 +summary: Avoid computing `currentInferenceProcessors` on every cluster state +area: Machine Learning +type: bug +issues: [] diff --git a/docs/changelog/106060.yaml b/docs/changelog/106060.yaml new file mode 100644 index 0000000000000..2b6a47372ddd3 --- /dev/null +++ b/docs/changelog/106060.yaml @@ -0,0 +1,5 @@ +pr: 106060 +summary: "[Connector API] Fix serialisation of script params in connector index service" +area: Application +type: bug +issues: [] diff --git a/docs/changelog/106062.yaml b/docs/changelog/106062.yaml new file mode 100644 index 0000000000000..f4ff3df4045e6 --- /dev/null +++ b/docs/changelog/106062.yaml @@ -0,0 +1,6 @@ +pr: 106062 +summary: "During ML maintenance, reset jobs in the reset state without a corresponding\ + \ task" +area: Machine Learning +type: bug +issues: [] diff --git a/docs/changelog/106063.yaml b/docs/changelog/106063.yaml new file mode 100644 index 0000000000000..57c05370a943f --- /dev/null +++ b/docs/changelog/106063.yaml @@ -0,0 +1,5 @@ +pr: 106063 +summary: Consider `ShardRouting` roles when calculating shard copies in shutdown status +area: Infra/Node Lifecycle +type: bug +issues: [] diff --git a/docs/changelog/106102.yaml b/docs/changelog/106102.yaml new file mode 100644 index 0000000000000..b7c13514f6715 --- /dev/null +++ b/docs/changelog/106102.yaml @@ -0,0 +1,5 @@ +pr: 106102 +summary: Specialize serialization of array blocks +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/106105.yaml b/docs/changelog/106105.yaml new file mode 100644 index 0000000000000..09f80e9e71e6d --- /dev/null +++ b/docs/changelog/106105.yaml @@ -0,0 +1,5 @@ +pr: 106105 +summary: Respect --pass option in certutil csr mode +area: TLS +type: bug +issues: [] diff --git a/docs/changelog/106150.yaml b/docs/changelog/106150.yaml new file mode 100644 index 0000000000000..05bd8b06987c6 --- /dev/null +++ b/docs/changelog/106150.yaml @@ -0,0 +1,5 @@ +pr: 106150 +summary: Use correct system index bulk executor +area: CRUD +type: bug +issues: [] diff --git a/docs/changelog/106156.yaml b/docs/changelog/106156.yaml new file mode 100644 index 0000000000000..63232efe6e5fb --- /dev/null +++ b/docs/changelog/106156.yaml @@ -0,0 +1,6 @@ +pr: 106156 +summary: Disable parallel collection for terms aggregation with `min_doc_count` equals + to 0 +area: Aggregations +type: bug +issues: [] diff --git a/docs/changelog/106172.yaml b/docs/changelog/106172.yaml new file mode 100644 index 0000000000000..80d80b9d7f299 --- /dev/null +++ b/docs/changelog/106172.yaml @@ -0,0 +1,5 @@ +pr: 106172 +summary: "[Profiling] Allow to override index settings" +area: Application +type: enhancement +issues: [] diff --git a/docs/changelog/106189.yaml b/docs/changelog/106189.yaml new file mode 100644 index 0000000000000..ec485f0e60efb --- /dev/null +++ b/docs/changelog/106189.yaml @@ -0,0 +1,6 @@ +pr: 106189 +summary: Fix numeric sorts in `_cat/nodes` +area: CAT APIs +type: bug +issues: + - 48070 diff --git a/docs/changelog/97561.yaml b/docs/changelog/97561.yaml new file mode 100644 index 0000000000000..cacefbf7e4ca3 --- /dev/null +++ b/docs/changelog/97561.yaml @@ -0,0 +1,5 @@ +pr: 97561 +summary: Add index forecasts to /_cat/allocation output +area: Allocation +type: enhancement +issues: [] diff --git a/docs/changelog/99048.yaml b/docs/changelog/99048.yaml new file mode 100644 index 0000000000000..722c145dae78f --- /dev/null +++ b/docs/changelog/99048.yaml @@ -0,0 +1,6 @@ +pr: 99048 +summary: String sha512() painless function +area: Infra/Scripting +type: enhancement +issues: + - 97691 diff --git a/docs/community-clients/index.asciidoc b/docs/community-clients/index.asciidoc index e15e766ffec0c..ebde04b02f18a 100644 --- a/docs/community-clients/index.asciidoc +++ b/docs/community-clients/index.asciidoc @@ -43,12 +43,12 @@ a number of clients that have been contributed by the community for various lang [[b4j]] == B4J * https://www.b4x.com/android/forum/threads/server-jelasticsearch-search-and-text-analytics.73335/[jElasticsearch]: - B4J client based on the official Java REST client. + B4J client based on the official Java REST client. **- Last release more than a year ago** [[cpp]] == C++ * https://github.com/seznam/elasticlient[elasticlient]: simple library for - simplified work with Elasticsearch in C++. + simplified work with Elasticsearch in C++. **- Last commit more than a year ago** [[clojure]] == Clojure @@ -57,7 +57,7 @@ a number of clients that have been contributed by the community for various lang Clojure client, based on the new official low-level REST client. * https://github.com/clojurewerkz/elastisch[Elastisch]: - Clojure client. + Clojure client. **- Last commit more than a year ago** [[coldfusion]] == ColdFusion (CFML) @@ -71,17 +71,17 @@ a number of clients that have been contributed by the community for various lang == Erlang * https://github.com/tsloughter/erlastic_search[erlastic_search]: - Erlang client using HTTP. + Erlang client using HTTP. **- Last commit more than a year ago** * https://github.com/datahogs/tirexs[Tirexs]: An https://github.com/elixir-lang/elixir[Elixir] based API/DSL, inspired by https://github.com/karmi/tire[Tire]. Ready to use in pure Erlang - environment. + environment. **- Last commit more than a year ago** * https://github.com/sashman/elasticsearch_elixir_bulk_processor[Elixir Bulk Processor]: Dynamically configurable Elixir port of the {client}/java-api/current/java-docs-bulk-processor.html[Bulk Processor]. - Implemented using GenStages to handle back pressure. + Implemented using GenStages to handle back pressure. **- Last commit more than a year ago** [[go]] == Go @@ -90,13 +90,13 @@ Also see the {client}/go-api/current/index.html[official Elasticsearch Go client]. * https://github.com/mattbaird/elastigo[elastigo]: - Go client. + Go client. **- Last commit more than a year ago** * https://github.com/olivere/elastic[elastic]: - Elasticsearch client for Google Go. + Elasticsearch client for Google Go. **- Last commit more than a year ago** * https://github.com/softctrl/elk[elk]: - Golang lib for Elasticsearch client. + Golang lib for Elasticsearch client. **- Last commit more than a year ago** [[haskell]] @@ -114,7 +114,7 @@ client]. Java Rest client with comprehensive Query DSL API. * https://github.com/searchbox-io/Jest[Jest]: - Java Rest client. + Java Rest client. ** - No longer maintained** [[javascript]] == JavaScript @@ -133,19 +133,19 @@ Elasticsearch client inspired by the {client}/ruby-api/current/index.html[offici * https://github.com/mbuhot/eskotlin[ES Kotlin]: Elasticsearch Query DSL for kotlin based on the - {client}/java-api/current/index.html[official Elasticsearch Java client]. + {client}/java-api/current/index.html[official Elasticsearch Java client]. **- Last commit more than a year ago** * https://github.com/jillesvangurp/es-kotlin-wrapper-client[ES Kotlin Wrapper Client]: Kotlin extension functions and abstractions for the {client}/java-api/current/index.html[official Elasticsearch high-level client]. Aims to reduce the amount of boilerplate needed to do searches, bulk - indexing and other common things users do with the client. + indexing and other common things users do with the client. **- No longer maintained** [[lua]] == Lua * https://github.com/DhavalKapil/elasticsearch-lua[elasticsearch-lua]: - Lua client for Elasticsearch + Lua client for Elasticsearch **- Last commit more than a year ago** [[dotnet]] == .NET @@ -158,7 +158,8 @@ See the {client}/net-api/current/index.html[official Elasticsearch .NET client]. Also see the {client}/perl-api/current/index.html[official Elasticsearch Perl client]. -* https://metacpan.org/pod/Elastijk[Elastijk]: A low-level, minimal HTTP client. +* https://metacpan.org/pod/Elastijk[Elastijk]: A low-level, minimal HTTP client. +**- Last commit more than a year ago** [[php]] @@ -171,11 +172,13 @@ client]. PHP client. * https://github.com/nervetattoo/elasticsearch[elasticsearch]: PHP client. +**- Last commit more than a year ago** * https://github.com/madewithlove/elasticsearcher[elasticsearcher]: Agnostic lightweight package on top of the Elasticsearch PHP client. Its main goal is to allow for easier structuring of queries and indices in your application. It does not want to hide or replace functionality of the Elasticsearch PHP client. +**- Last commit more than a year ago** [[python]] == Python @@ -191,9 +194,11 @@ client]. * https://github.com/ropensci/elasticdsl[elasticdsl]: A high-level R DSL for Elasticsearch, wrapping the elastic R client. + **- No longer maintained** * https://github.com/uptake/uptasticsearch[uptasticsearch]: - An R client tailored to data science workflows. + An R client tailored to data science workflows. + **- Last commit more than a year ago** [[ruby]] == Ruby @@ -202,6 +207,7 @@ Also see the {client}/ruby-api/current/index.html[official Elasticsearch Ruby cl * https://github.com/printercu/elastics-rb[elastics]: Tiny client with built-in zero-downtime migrations and ActiveRecord integration. + **- Last commit more than a year ago** * https://github.com/toptal/chewy[chewy]: An ODM and wrapper for the official Elasticsearch client. @@ -219,10 +225,12 @@ Also see the {client}/rust-api/current/index.html[official Elasticsearch Rust client]. * https://github.com/benashford/rs-es[rs-es]: - A REST API client with a strongly-typed Query DSL. + A REST API client with a strongly-typed Query DSL. + **- Last commit more than a year ago** * https://github.com/elastic-rs/elastic[elastic]: A modular REST API client that supports freeform queries. + **- Last commit more than a year ago** [[scala]] == Scala @@ -231,19 +239,23 @@ client]. Scala DSL. * https://github.com/gphat/wabisabi[wabisabi]: - Asynchronous REST API Scala client. + Asynchronous REST API Scala client. **- No longer maintained** * https://github.com/workday/escalar[escalar]: - Type-safe Scala wrapper for the REST API. + Type-safe Scala wrapper for the REST API. + **- Last commit more than a year ago** * https://github.com/SumoLogic/elasticsearch-client[elasticsearch-client]: Scala DSL that uses the REST API. Akka and AWS helpers included. + **- No longer maintained** + [[smalltalk]] == Smalltalk * https://github.com/newapplesho/elasticsearch-smalltalk[elasticsearch-smalltalk]: - Pharo Smalltalk client for Elasticsearch. + Pharo Smalltalk client for Elasticsearch. + **- Last commit more than a year ago** [[swift]] == Swift @@ -254,4 +266,5 @@ client]. == Vert.x * https://github.com/reactiverse/elasticsearch-client[elasticsearch-client]: - An Elasticsearch client for Eclipse Vert.x. + An Elasticsearch client for Eclipse Vert.x + **- Last commit more than a year ago** \ No newline at end of file diff --git a/docs/reference/analysis/normalizers.asciidoc b/docs/reference/analysis/normalizers.asciidoc index deb04a9bd44ba..6acd415437525 100644 --- a/docs/reference/analysis/normalizers.asciidoc +++ b/docs/reference/analysis/normalizers.asciidoc @@ -6,15 +6,15 @@ token. As a consequence, they do not have a tokenizer and only accept a subset of the available char filters and token filters. Only the filters that work on a per-character basis are allowed. For instance a lowercasing filter would be allowed, but not a stemming filter, which needs to look at the keyword as a -whole. The current list of filters that can be used in a normalizer is -following: `arabic_normalization`, `asciifolding`, `bengali_normalization`, +whole. The current list of filters that can be used in a normalizer definition +are: `arabic_normalization`, `asciifolding`, `bengali_normalization`, `cjk_width`, `decimal_digit`, `elision`, `german_normalization`, `hindi_normalization`, `indic_normalization`, `lowercase`, `pattern_replace`, `persian_normalization`, `scandinavian_folding`, `serbian_normalization`, `sorani_normalization`, `trim`, `uppercase`. Elasticsearch ships with a `lowercase` built-in normalizer. For other forms of -normalization a custom configuration is required. +normalization, a custom configuration is required. [discrete] === Custom normalizers diff --git a/docs/reference/api-conventions.asciidoc b/docs/reference/api-conventions.asciidoc index 64cb499a9cd4e..1a63af19b0a33 100644 --- a/docs/reference/api-conventions.asciidoc +++ b/docs/reference/api-conventions.asciidoc @@ -302,6 +302,14 @@ Content-Type: application/vnd.elasticsearch+json; compatible-with=7 Accept: application/vnd.elasticsearch+json; compatible-with=7 ---------------------------------------------------------------------- +[discrete] +[[api-push-back]] +=== HTTP `429 Too Many Requests` status code push back + +{es} APIs may respond with the HTTP `429 Too Many Requests` status code, indicating that the cluster is too busy +to handle the request. When this happens, consider retrying after a short delay. If the retry also receives +a `429 Too Many Requests` response, extend the delay by backing off exponentially before each subsequent retry. + [discrete] [[api-url-access-control]] === URL-based access control @@ -329,8 +337,22 @@ value `true`. All other values will raise an error. [discrete] === Number Values -All REST APIs support providing numbered parameters as `string` on top -of supporting the native JSON number types. +When passing a numeric parameter in a request body, you may use a `string` +containing the number instead of the native numeric type. For example: + +[source,console] +-------------------------------------------------- +POST /_search +{ + "size": "1000" +} +-------------------------------------------------- + +Integer-valued fields in a response body are described as `integer` (or +occasionally `long`) in this manual, but there are generally no explicit bounds +on such values. JSON, SMILE, CBOR and YAML all permit arbitrarily large integer +values. Do not assume that `integer` fields in a response body will always fit +into a 32-bit signed integer. [[byte-units]] [discrete] diff --git a/docs/reference/cat/allocation.asciidoc b/docs/reference/cat/allocation.asciidoc index f9574ed933398..7bab1926cff09 100644 --- a/docs/reference/cat/allocation.asciidoc +++ b/docs/reference/cat/allocation.asciidoc @@ -57,6 +57,16 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=cat-v] `shards`:: Number of primary and replica shards assigned to the node. +`shards.undesired`:: +Amount of shards that are scheduled to be moved elsewhere in the cluster +or -1 other than desired balance allocator is used + +`write_load.forecast`:: +Sum of index write load forecasts + +`disk.indices.forecast`:: +Sum of shard size forecasts + `disk.indices`:: Disk space used by the node's shards. Does not include disk space for the <> or unassigned shards. @@ -99,6 +109,8 @@ IP address and port for the node. `node`:: Name for the node. Set using <>. +`node.role`, `r`, `role`, `nodeRole`:: +Node roles [[cat-allocation-api-example]] ==== {api-examples-title} @@ -113,8 +125,8 @@ The API returns the following response: [source,txt] -------------------------------------------------- -shards disk.indices disk.used disk.avail disk.total disk.percent host ip node node.role - 1 260b 47.3gb 43.4gb 100.7gb 46 127.0.0.1 127.0.0.1 CSUXak2 himrst +shards shards.undesired write_load.forecast disk.indices.forecast disk.indices disk.used disk.avail disk.total disk.percent host ip node node.role + 1 0 0.0 260b 260b 47.3gb 43.4gb 100.7gb 46 127.0.0.1 127.0.0.1 CSUXak2 himrst -------------------------------------------------- // TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/ s/46/\\d+/] // TESTRESPONSE[s/CSUXak2 himrst/.+/ non_json] diff --git a/docs/reference/cat/nodes.asciidoc b/docs/reference/cat/nodes.asciidoc index b670ee26a20a9..da1ed532e41fa 100644 --- a/docs/reference/cat/nodes.asciidoc +++ b/docs/reference/cat/nodes.asciidoc @@ -318,7 +318,7 @@ Time spent in suggest, such as `0`. `suggest.total`, `suto`, `suggestTotal`:: Number of suggest operations, such as `0`. -`shard_stats.total_count`, `sstc`, `shardStatsTotalCount`:: +`shard_stats.total_count`, `sstc`, `shards`, `shardStatsTotalCount`:: Number of shards assigned. `mappings.total_count`, `mtc`, `mappingsTotalCount`:: diff --git a/docs/reference/cluster/get-desired-balance.asciidoc b/docs/reference/cluster/get-desired-balance.asciidoc index 2628b5abca9f3..3fd87dcfedc4f 100644 --- a/docs/reference/cluster/get-desired-balance.asciidoc +++ b/docs/reference/cluster/get-desired-balance.asciidoc @@ -7,6 +7,7 @@ NOTE: {cloud-only} Exposes: + * the desired balance computation and reconciliation stats * balancing stats such as distribution of shards, disk and ingest forecasts across nodes and data tiers (based on the current cluster state) diff --git a/docs/reference/cluster/nodes-stats.asciidoc b/docs/reference/cluster/nodes-stats.asciidoc index eacbabb99f045..c008b074acccd 100644 --- a/docs/reference/cluster/nodes-stats.asciidoc +++ b/docs/reference/cluster/nodes-stats.asciidoc @@ -50,6 +50,9 @@ using metrics. `adaptive_selection`:: Statistics about <>. + `allocations`:: + Statistics about allocated shards + `breaker`:: Statistics about the field data circuit breaker. @@ -1792,14 +1795,14 @@ Total number of unallocated bytes in all file stores. `available`:: (<>) Total disk space available to this Java virtual machine on all file -stores. Depending on OS or process level restrictions, this might appear +stores. Depending on OS or process level restrictions (e.g. XFS quotas), this might appear less than `free`. This is the actual amount of free disk space the {es} node can utilise. `available_in_bytes`:: (integer) Total number of bytes available to this Java virtual machine on all file -stores. Depending on OS or process level restrictions, this might appear +stores. Depending on OS or process level restrictions (e.g. XFS quotas), this might appear less than `free_in_bytes`. This is the actual amount of free disk space the {es} node can utilise. ======= @@ -2802,6 +2805,44 @@ search requests on the keyed node. The rank of this node; used for shard selection when routing search requests. ====== + +[[cluster-nodes-stats-api-response-body-allocations]] +`allocations`:: +(object) +Contains allocations statistics for the node. ++ +.Properties of `allocations` +[%collapsible%open] +====== +`shards`:: +(integer) +The number of shards currently allocated to this node + +`undesired_shards`:: +(integer) +The amount of shards that are scheduled to be moved elsewhere in the cluster +if desired balance allocator is used or -1 if any other allocator is used. + +`forecasted_ingest_load`:: +(double) +Total forecasted ingest load of all shards assigned to this node + +`forecasted_disk_usage`:: +(<>) +Forecasted size of all shards assigned to the node + +`forecasted_disk_usage_bytes`:: +(integer) +Forecasted size, in bytes, of all shards assigned to the node + +`current_disk_usage`:: +(<>) +Current size of all shards assigned to the node + +`current_disk_usage_bytes`:: +(integer) +Current size, in bytes, of all shards assigned to the node +====== ===== ==== diff --git a/docs/reference/data-streams/lifecycle/index.asciidoc b/docs/reference/data-streams/lifecycle/index.asciidoc index ef5558817885e..bf861df7c80d4 100644 --- a/docs/reference/data-streams/lifecycle/index.asciidoc +++ b/docs/reference/data-streams/lifecycle/index.asciidoc @@ -36,8 +36,8 @@ each data stream and performs the following steps: automatically tail merges the index. Data stream lifecycle executes a merge operation that only targets the long tail of small segments instead of the whole shard. As the segments are organised into tiers of exponential sizes, merging the long tail of small segments is only a -fraction of the cost of force mergeing to a single segment. The small segments would usually -hold the most recent data so tail mergeing will focus the merging resources on the higher-value +fraction of the cost of force merging to a single segment. The small segments would usually +hold the most recent data so tail merging will focus the merging resources on the higher-value data that is most likely to keep being queried. 4. If <> is configured it will execute all the configured downsampling rounds. diff --git a/docs/reference/esql/esql-across-clusters.asciidoc b/docs/reference/esql/esql-across-clusters.asciidoc new file mode 100644 index 0000000000000..f35a62c49aca3 --- /dev/null +++ b/docs/reference/esql/esql-across-clusters.asciidoc @@ -0,0 +1,224 @@ +[[esql-cross-clusters]] +=== Using {esql} across clusters + +++++ +Using {esql} across clusters +++++ + +[partintro] + +preview::["{ccs-cap} for {esql} is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."] + +With {esql}, you can execute a single query across multiple clusters. + +==== Prerequisites + +include::{es-repo-dir}/search/search-your-data/search-across-clusters.asciidoc[tag=ccs-prereqs] + +include::{es-repo-dir}/search/search-your-data/search-across-clusters.asciidoc[tag=ccs-gateway-seed-nodes] + +include::{es-repo-dir}/search/search-your-data/search-across-clusters.asciidoc[tag=ccs-proxy-mode] + +[discrete] +[[ccq-remote-cluster-setup]] +==== Remote cluster setup +include::{es-repo-dir}/search/search-your-data/search-across-clusters.asciidoc[tag=ccs-remote-cluster-setup] + +<1> Since `skip_unavailable` was not set on `cluster_three`, it uses +the default of `false`. See the <> +section for details. + +[discrete] +[[ccq-from]] +==== Query across multiple clusters + +In the `FROM` command, specify data streams and indices on remote clusters +using the format `:`. For instance, the following +{esql} request queries the `my-index-000001` index on a single remote cluster +named `cluster_one`: + +[source,esql] +---- +FROM cluster_one:my-index-000001 +| LIMIT 10 +---- + +Similarly, this {esql} request queries the `my-index-000001` index from +three clusters: + +* The local ("querying") cluster +* Two remote clusters, `cluster_one` and `cluster_two` + +[source,esql] +---- +FROM my-index-000001,cluster_one:my-index-000001,cluster_two:my-index-000001 +| LIMIT 10 +---- + +Likewise, this {esql} request queries the `my-index-000001` index from all +remote clusters (`cluster_one`, `cluster_two`, and `cluster_three`): + +[source,esql] +---- +FROM *:my-index-000001 +| LIMIT 10 +---- + +[discrete] +[[ccq-enrich]] +==== Enrich across clusters + +Enrich in {esql} across clusters operates similarly to <>. +If the enrich policy and its enrich indices are consistent across all clusters, simply +write the enrich command as you would without remote clusters. In this default mode, +{esql} can execute the enrich command on either the querying cluster or the fulfilling +clusters, aiming to minimize computation or inter-cluster data transfer. Ensuring that +the policy exists with consistent data on both the querying cluster and the fulfilling +clusters is critical for ES|QL to produce a consistent query result. + +In the following example, the enrich with `hosts` policy can be executed on +either the querying cluster or the remote cluster `cluster_one`. + +[source,esql] +---- +FROM my-index-000001,cluster_one:my-index-000001 +| ENRICH hosts ON ip +| LIMIT 10 +---- + +Enrich with an {esql} query against remote clusters only can also happen on +the querying cluster. This means the below query requires the `hosts` enrich +policy to exist on the querying cluster as well. + +[source,esql] +---- +FROM cluster_one:my-index-000001,cluster_two:my-index-000001 +| LIMIT 10 +| ENRICH hosts ON ip +---- + +[discrete] +[[esql-enrich-coordinator]] +==== Enrich with coordinator mode + +{esql} provides the enrich `_coordinator` mode to force {esql} to execute the enrich +command on the querying cluster. This mode should be used when the enrich policy is +not available on the remote clusters or maintaining consistency of enrich indices +across clusters is challenging. + +[source,esql] +---- +FROM my-index-000001,cluster_one:my-index-000001 +| ENRICH _coordinator:hosts ON ip +| SORT host_name +| LIMIT 10 +---- + +[discrete] +[IMPORTANT] +==== +Enrich with the `_coordinator` mode usually increases inter-cluster data transfer and +workload on the querying cluster. +==== + +[discrete] +[[esql-enrich-remote]] +==== Enrich with remote mode + +{esql} also provides the enrich `_remote` mode to force {esql} to execute the enrich +command independently on each fulfilling cluster where the target indices reside. +This mode is useful for managing different enrich data on each cluster, such as detailed +information of hosts for each region where the target (main) indices contain +log events from these hosts. + +In the below example, the `hosts` enrich policy is required to exist on all +fulfilling clusters: the `querying` cluster (as local indices are included), +the remote cluster `cluster_one`, and `cluster_two`. + +[source,esql] +---- +FROM my-index-000001,cluster_one:my-index-000001,cluster_two:my-index-000001 +| ENRICH _remote:hosts ON ip +| SORT host_name +| LIMIT 10 +---- + +A `_remote` enrich cannot be executed after a <> +command. The following example would result in an error: + +[source,esql] +---- +FROM my-index-000001,cluster_one:my-index-000001,cluster_two:my-index-000001 +| STATS COUNT(*) BY ip +| ENRICH _remote:hosts ON ip +| SORT host_name +| LIMIT 10 +---- + +[discrete] +[[esql-multi-enrich]] +==== Multiple enrich commands + +You can include multiple enrich commands in the same query with different +modes. {esql} will attempt to execute them accordingly. For example, this +query performs two enriches, first with the `hosts` policy on any cluster +and then with the `vendors` policy on the querying cluster. + +[source,esql] +---- +FROM my-index-000001,cluster_one:my-index-000001,cluster_two:my-index-000001 +| ENRICH hosts ON ip +| ENRICH _coordinator:vendors ON os +| LIMIT 10 +---- + +A `_remote` enrich command can't be executed after a `_coordinator` enrich +command. The following example would result in an error. + +[source,esql] +---- +FROM my-index-000001,cluster_one:my-index-000001,cluster_two:my-index-000001 +| ENRICH _coordinator:hosts ON ip +| ENRICH _remote:vendors ON os +| LIMIT 10 +---- + +[discrete] +[[ccq-exclude]] +==== Excluding clusters or indices from {esql} query + +To exclude an entire cluster, prefix the cluster alias with a minus sign in +the `FROM` command, for example: `-my_cluster:*`: + +[source,esql] +---- +FROM my-index-000001,cluster*:my-index-000001,-cluster_three:* +| LIMIT 10 +---- + +To exclude a specific remote index, prefix the index with a minus sign in +the `FROM` command, such as `my_cluster:-my_index`: + +[source,esql] +---- +FROM my-index-000001,cluster*:my-index-*,cluster_three:-my-index-000001 +| LIMIT 10 +---- + +[discrete] +[[ccq-skip-unavailable-clusters]] +==== Optional remote clusters + +{ccs-cap} for {esql} currently does not respect the `skip_unavailable` +setting. As a result, if a remote cluster specified in the request is +unavailable or failed, {ccs} for {esql} queries will fail regardless of the setting. + +We are actively working to align the behavior of {ccs} for {esql} with other +{ccs} APIs. This includes providing detailed execution information for each cluster +in the response, such as execution time, selected target indices, and shards. + +[discrete] +[[ccq-during-upgrade]] +==== Query across clusters during an upgrade + +include::{es-repo-dir}/search/search-your-data/search-across-clusters.asciidoc[tag=ccs-during-upgrade] diff --git a/docs/reference/esql/esql-functions-operators.asciidoc b/docs/reference/esql/esql-functions-operators.asciidoc index 375bb4ee9dd00..a1ad512fbe512 100644 --- a/docs/reference/esql/esql-functions-operators.asciidoc +++ b/docs/reference/esql/esql-functions-operators.asciidoc @@ -21,6 +21,9 @@ include::functions/string-functions.asciidoc[tag=string_list] <>:: include::functions/date-time-functions.asciidoc[tag=date_list] +<>:: +include::functions/spatial-functions.asciidoc[tag=spatial_list] + <>:: include::functions/type-conversion-functions.asciidoc[tag=type_list] @@ -37,6 +40,7 @@ include::functions/aggregation-functions.asciidoc[] include::functions/math-functions.asciidoc[] include::functions/string-functions.asciidoc[] include::functions/date-time-functions.asciidoc[] +include::functions/spatial-functions.asciidoc[] include::functions/type-conversion-functions.asciidoc[] include::functions/conditional-functions-and-expressions.asciidoc[] include::functions/mv-functions.asciidoc[] diff --git a/docs/reference/esql/esql-using.asciidoc b/docs/reference/esql/esql-using.asciidoc index f11fdd2d058a5..3e045163069ec 100644 --- a/docs/reference/esql/esql-using.asciidoc +++ b/docs/reference/esql/esql-using.asciidoc @@ -12,10 +12,14 @@ and set up alerts. Using {esql} in {elastic-sec} to investigate events in Timeline, create detection rules, and build {esql} queries using Elastic AI Assistant. +<>:: +Using {esql} to query across multiple clusters. + <>:: Using the <> to list and cancel {esql} queries. include::esql-rest.asciidoc[] include::esql-kibana.asciidoc[] include::esql-security-solution.asciidoc[] +include::esql-across-clusters.asciidoc[] include::task-management.asciidoc[] diff --git a/docs/reference/esql/functions/mv-functions.asciidoc b/docs/reference/esql/functions/mv-functions.asciidoc index a95a3d36a9963..07d89e7879e67 100644 --- a/docs/reference/esql/functions/mv-functions.asciidoc +++ b/docs/reference/esql/functions/mv-functions.asciidoc @@ -17,7 +17,9 @@ * <> * <> * <> +* <> * <> +* <> // end::mv_list[] include::mv_avg.asciidoc[] @@ -29,4 +31,6 @@ include::mv_last.asciidoc[] include::mv_max.asciidoc[] include::mv_median.asciidoc[] include::mv_min.asciidoc[] +include::mv_slice.asciidoc[] include::mv_sum.asciidoc[] +include::mv_zip.asciidoc[] diff --git a/docs/reference/esql/functions/mv_slice.asciidoc b/docs/reference/esql/functions/mv_slice.asciidoc new file mode 100644 index 0000000000000..f4431b25232a2 --- /dev/null +++ b/docs/reference/esql/functions/mv_slice.asciidoc @@ -0,0 +1,47 @@ +[discrete] +[[esql-mv_slice]] +=== `MV_SLICE` + +*Syntax* + +[.text-center] +image::esql/functions/signature/mv_slice.svg[Embedded,opts=inline] + +*Parameters* + +`field`:: +Multivalue expression. If `null`, the function returns `null`. + +`start`:: +Start position. If `null`, the function returns `null`. The start argument can be negative. An index of -1 is used to specify the last value in the list. + +`end`:: +End position. Optional; if omitted, the position at `start` is returned. The end argument can be negative. An index of -1 is used to specify the last value in the list. + +*Description* + +Returns a subset of the multivalued field using the start and end index values. + +*Supported types* + +include::types/mv_slice.asciidoc[] + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/ints.csv-spec[tag=mv_slice_positive] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/ints.csv-spec[tag=mv_slice_positive-result] +|=== + +[source.merge.styled,esql] +---- +include::{esql-specs}/ints.csv-spec[tag=mv_slice_negative] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/ints.csv-spec[tag=mv_slice_negative-result] +|=== diff --git a/docs/reference/esql/functions/mv_zip.asciidoc b/docs/reference/esql/functions/mv_zip.asciidoc new file mode 100644 index 0000000000000..4e71e2cafb9c4 --- /dev/null +++ b/docs/reference/esql/functions/mv_zip.asciidoc @@ -0,0 +1,38 @@ +[discrete] +[[esql-mv_zip]] +=== `MV_ZIP` + +*Syntax* + +[.text-center] +image::esql/functions/signature/mv_zip.svg[Embedded,opts=inline] + +*Parameters* + +`mvLeft`:: +Multivalue expression. + +`mvRight`:: +Multivalue expression. + +`delim`:: +Delimiter. Optional; if omitted, `,` is used as a default delimiter. + +*Description* + +Combines the values from two multivalued fields with a delimiter that joins them together. + +*Supported types* + +include::types/mv_zip.asciidoc[] + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/string.csv-spec[tag=mv_zip] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/string.csv-spec[tag=mv_zip-result] +|=== diff --git a/docs/reference/esql/functions/signature/mv_slice.svg b/docs/reference/esql/functions/signature/mv_slice.svg new file mode 100644 index 0000000000000..277566a35e47d --- /dev/null +++ b/docs/reference/esql/functions/signature/mv_slice.svg @@ -0,0 +1 @@ +MV_SLICE(v,start,end) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/mv_zip.svg b/docs/reference/esql/functions/signature/mv_zip.svg new file mode 100644 index 0000000000000..02c61b3c4bc5c --- /dev/null +++ b/docs/reference/esql/functions/signature/mv_zip.svg @@ -0,0 +1 @@ +MV_ZIP(mvLeft,mvRight,delim) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/st_x.svg b/docs/reference/esql/functions/signature/st_x.svg new file mode 100644 index 0000000000000..d6fac8a96505a --- /dev/null +++ b/docs/reference/esql/functions/signature/st_x.svg @@ -0,0 +1 @@ +ST_X(point) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/st_y.svg b/docs/reference/esql/functions/signature/st_y.svg new file mode 100644 index 0000000000000..c6dc23724d59c --- /dev/null +++ b/docs/reference/esql/functions/signature/st_y.svg @@ -0,0 +1 @@ +ST_Y(point) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/to_cartesianpoint.svg b/docs/reference/esql/functions/signature/to_cartesianpoint.svg new file mode 100644 index 0000000000000..44484e8321e2f --- /dev/null +++ b/docs/reference/esql/functions/signature/to_cartesianpoint.svg @@ -0,0 +1 @@ +TO_CARTESIANPOINT(v) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/to_cartesianshape.svg b/docs/reference/esql/functions/signature/to_cartesianshape.svg new file mode 100644 index 0000000000000..c16ce9a6c15bc --- /dev/null +++ b/docs/reference/esql/functions/signature/to_cartesianshape.svg @@ -0,0 +1 @@ +TO_CARTESIANSHAPE(v) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/to_geopoint.svg b/docs/reference/esql/functions/signature/to_geopoint.svg new file mode 100644 index 0000000000000..444817aa388b9 --- /dev/null +++ b/docs/reference/esql/functions/signature/to_geopoint.svg @@ -0,0 +1 @@ +TO_GEOPOINT(v) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/to_geoshape.svg b/docs/reference/esql/functions/signature/to_geoshape.svg new file mode 100644 index 0000000000000..91b02332ad806 --- /dev/null +++ b/docs/reference/esql/functions/signature/to_geoshape.svg @@ -0,0 +1 @@ +TO_GEOSHAPE(v) \ No newline at end of file diff --git a/docs/reference/esql/functions/spatial-functions.asciidoc b/docs/reference/esql/functions/spatial-functions.asciidoc new file mode 100644 index 0000000000000..d99fe36191a31 --- /dev/null +++ b/docs/reference/esql/functions/spatial-functions.asciidoc @@ -0,0 +1,16 @@ +[[esql-spatial-functions]] +==== {esql} spatial functions + +++++ +Spatial functions +++++ + +{esql} supports these spatial functions: + +// tag::spatial_list[] +* <> +* <> +// end::spatial_list[] + +include::st_x.asciidoc[] +include::st_y.asciidoc[] diff --git a/docs/reference/esql/functions/st_centroid.asciidoc b/docs/reference/esql/functions/st_centroid.asciidoc index abed1e71eab8f..cee0c85d5cb45 100644 --- a/docs/reference/esql/functions/st_centroid.asciidoc +++ b/docs/reference/esql/functions/st_centroid.asciidoc @@ -15,4 +15,9 @@ include::{esql-specs}/spatial.csv-spec[tag=st_centroid-airports-result] Supported types: -include::types/st_centroid.asciidoc[] +[%header.monospaced.styled,format=dsv,separator=|] +|=== +v | result +geo_point | geo_point +cartesian_point | cartesian_point +|=== diff --git a/docs/reference/esql/functions/st_x.asciidoc b/docs/reference/esql/functions/st_x.asciidoc new file mode 100644 index 0000000000000..0f40a66417f9f --- /dev/null +++ b/docs/reference/esql/functions/st_x.asciidoc @@ -0,0 +1,33 @@ +[discrete] +[[esql-st_x]] +=== `ST_X` + +*Syntax* + +[.text-center] +image::esql/functions/signature/st_x.svg[Embedded,opts=inline] + +*Parameters* + +`point`:: +Expression of type `geo_point` or `cartesian_point`. If `null`, the function returns `null`. + +*Description* + +Extracts the `x` coordinate from the supplied point. +If the points is of type `geo_point` this is equivalent to extracting the `longitude` value. + +*Supported types* + +include::types/st_x.asciidoc[] + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/spatial.csv-spec[tag=st_x_y] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/spatial.csv-spec[tag=st_x_y-result] +|=== diff --git a/docs/reference/esql/functions/st_y.asciidoc b/docs/reference/esql/functions/st_y.asciidoc new file mode 100644 index 0000000000000..e876852228d83 --- /dev/null +++ b/docs/reference/esql/functions/st_y.asciidoc @@ -0,0 +1,33 @@ +[discrete] +[[esql-st_y]] +=== `ST_Y` + +*Syntax* + +[.text-center] +image::esql/functions/signature/st_y.svg[Embedded,opts=inline] + +*Parameters* + +`point`:: +Expression of type `geo_point` or `cartesian_point`. If `null`, the function returns `null`. + +*Description* + +Extracts the `y` coordinate from the supplied point. +If the points is of type `geo_point` this is equivalent to extracting the `latitude` value. + +*Supported types* + +include::types/st_y.asciidoc[] + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/spatial.csv-spec[tag=st_x_y] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/spatial.csv-spec[tag=st_x_y-result] +|=== diff --git a/docs/reference/esql/functions/types/mv_slice.asciidoc b/docs/reference/esql/functions/types/mv_slice.asciidoc new file mode 100644 index 0000000000000..1891fed3631e9 --- /dev/null +++ b/docs/reference/esql/functions/types/mv_slice.asciidoc @@ -0,0 +1,17 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +v | start | end | result +boolean | integer | integer | boolean +cartesian_point | integer | integer | cartesian_point +cartesian_shape | integer | integer | cartesian_shape +datetime | integer | integer | datetime +double | integer | integer | double +geo_point | integer | integer | geo_point +geo_shape | integer | integer | geo_shape +integer | integer | integer | integer +ip | integer | integer | ip +keyword | integer | integer | keyword +long | integer | integer | long +text | integer | integer | text +version | integer | integer | version +|=== diff --git a/docs/reference/esql/functions/types/mv_zip.asciidoc b/docs/reference/esql/functions/types/mv_zip.asciidoc new file mode 100644 index 0000000000000..6ee6c29c77264 --- /dev/null +++ b/docs/reference/esql/functions/types/mv_zip.asciidoc @@ -0,0 +1,6 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +mvLeft | mvRight | delim | result +keyword | keyword | keyword | keyword +text | text | text | keyword +|=== diff --git a/docs/reference/esql/functions/types/st_centroid.asciidoc b/docs/reference/esql/functions/types/st_centroid.asciidoc deleted file mode 100644 index cbafb9d0fa6dc..0000000000000 --- a/docs/reference/esql/functions/types/st_centroid.asciidoc +++ /dev/null @@ -1,6 +0,0 @@ -[%header.monospaced.styled,format=dsv,separator=|] -|=== -v | result -geo_point | geo_point -cartesian_point | cartesian_point -|=== diff --git a/docs/reference/esql/functions/types/st_x.asciidoc b/docs/reference/esql/functions/types/st_x.asciidoc new file mode 100644 index 0000000000000..94ed4b296f1d4 --- /dev/null +++ b/docs/reference/esql/functions/types/st_x.asciidoc @@ -0,0 +1,6 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +point | result +cartesian_point | double +geo_point | double +|=== diff --git a/docs/reference/esql/functions/types/st_y.asciidoc b/docs/reference/esql/functions/types/st_y.asciidoc new file mode 100644 index 0000000000000..94ed4b296f1d4 --- /dev/null +++ b/docs/reference/esql/functions/types/st_y.asciidoc @@ -0,0 +1,6 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +point | result +cartesian_point | double +geo_point | double +|=== diff --git a/docs/reference/esql/index.asciidoc b/docs/reference/esql/index.asciidoc index 8fb20b981b93e..531336277ba6b 100644 --- a/docs/reference/esql/index.asciidoc +++ b/docs/reference/esql/index.asciidoc @@ -56,7 +56,7 @@ GROK>> and <>. <>:: An overview of using the <>, <>, -<>, and <>. +<>, <>, and <>. <>:: The current limitations of {esql}. diff --git a/docs/reference/esql/processing-commands/enrich.asciidoc b/docs/reference/esql/processing-commands/enrich.asciidoc index 603683858b8c0..f73eea6018cbc 100644 --- a/docs/reference/esql/processing-commands/enrich.asciidoc +++ b/docs/reference/esql/processing-commands/enrich.asciidoc @@ -15,6 +15,10 @@ ENRICH policy [ON match_field] [WITH [new_name1 = ]field1, [new_name2 = ]field2, The name of the enrich policy. You need to <> and <> the enrich policy first. +`mode`:: +The mode of the enrich command in cross cluster {esql}. +See <>. + `match_field`:: The match field. `ENRICH` uses its value to look for records in the enrich index. If not specified, the match will be performed on the column with the same diff --git a/docs/reference/esql/source-commands/from.asciidoc b/docs/reference/esql/source-commands/from.asciidoc index dbb5010060257..d81c46530e089 100644 --- a/docs/reference/esql/source-commands/from.asciidoc +++ b/docs/reference/esql/source-commands/from.asciidoc @@ -66,6 +66,16 @@ or aliases: FROM employees-00001,other-employees-* ---- +Use the format `:` to query data streams and indices +on remote clusters: + +[source,esql] +---- +FROM cluster_one:employees-00001,cluster_two:other-employees-* +---- + +See <>. + Use the optional `METADATA` directive to enable <>: [source,esql] diff --git a/docs/reference/how-to/knn-search.asciidoc b/docs/reference/how-to/knn-search.asciidoc index 15e3ff7c38e86..bfe99ad615c47 100644 --- a/docs/reference/how-to/knn-search.asciidoc +++ b/docs/reference/how-to/knn-search.asciidoc @@ -17,7 +17,9 @@ The default <> is `float`. But this can be automatically quantized during index time through <>. Quantization will reduce the required memory by 4x, but it will also reduce the precision of the vectors and -increase disk usage for the field (by up to 25%). +increase disk usage for the field (by up to 25%). Increased disk usage is a +result of {es} storing both the quantized and the unquantized vectors. +For example, when quantizing 40GB of floating point vectors an extra 10GB of data will be stored for the quantized vectors. The total disk usage amounts to 50GB, but the memory usage for fast search will be reduced to 10GB. For `float` vectors with `dim` greater than or equal to `384`, using a <> index is highly recommended. diff --git a/docs/reference/images/search/learning-to-rank-feature-extraction.png b/docs/reference/images/search/learning-to-rank-feature-extraction.png new file mode 100644 index 0000000000000..6dc2ee31902f6 Binary files /dev/null and b/docs/reference/images/search/learning-to-rank-feature-extraction.png differ diff --git a/docs/reference/images/search/learning-to-rank-judgment-list.png b/docs/reference/images/search/learning-to-rank-judgment-list.png new file mode 100644 index 0000000000000..3f0c212df321b Binary files /dev/null and b/docs/reference/images/search/learning-to-rank-judgment-list.png differ diff --git a/docs/reference/images/search/learning-to-rank-overview.png b/docs/reference/images/search/learning-to-rank-overview.png new file mode 100644 index 0000000000000..ea9557a70ac78 Binary files /dev/null and b/docs/reference/images/search/learning-to-rank-overview.png differ diff --git a/docs/reference/index-modules/slowlog.asciidoc b/docs/reference/index-modules/slowlog.asciidoc index 55c0867e485f5..c29296b59ad4a 100644 --- a/docs/reference/index-modules/slowlog.asciidoc +++ b/docs/reference/index-modules/slowlog.asciidoc @@ -58,33 +58,56 @@ The search slow log file is configured in the `log4j2.properties` file. [discrete] ==== Identifying search slow log origin -It is often useful to identify what triggered a slow running query. If a call was initiated with an `X-Opaque-ID` header, then the user ID -is included in Search Slow logs as an additional **id** field +It is often useful to identify what triggered a slow running query. +To include information about the user that triggered a slow search, +use the `index.search.slowlog.include.user` setting. + +[source,console] +-------------------------------------------------- +PUT /my-index-000001/_settings +{ + "index.search.slowlog.include.user": true +} +-------------------------------------------------- +// TEST[setup:my_index] + +This will result in user information being included in the slow log. [source,js] --------------------------- { - "type": "index_search_slowlog", - "timestamp": "2030-08-30T11:59:37,786+02:00", - "level": "WARN", - "component": "i.s.s.query", - "cluster.name": "distribution_run", - "node.name": "node-0", - "message": "[index6][0]", - "took": "78.4micros", - "took_millis": "0", - "total_hits": "0 hits", - "stats": "[]", - "search_type": "QUERY_THEN_FETCH", - "total_shards": "1", - "source": "{\"query\":{\"match_all\":{\"boost\":1.0}}}", - "id": "MY_USER_ID", - "cluster.uuid": "Aq-c-PAeQiK3tfBYtig9Bw", - "node.id": "D7fUYfnfTLa2D7y-xw6tZg" + "@timestamp": "2024-02-21T12:42:37.255Z", + "log.level": "WARN", + "auth.type": "REALM", + "elasticsearch.slowlog.id": "tomcat-123", + "elasticsearch.slowlog.message": "[index6][0]", + "elasticsearch.slowlog.search_type": "QUERY_THEN_FETCH", + "elasticsearch.slowlog.source": "{\"query\":{\"match_all\":{\"boost\":1.0}}}", + "elasticsearch.slowlog.stats": "[]", + "elasticsearch.slowlog.took": "747.3micros", + "elasticsearch.slowlog.took_millis": 0, + "elasticsearch.slowlog.total_hits": "1 hits", + "elasticsearch.slowlog.total_shards": 1, + "user.name": "elastic", + "user.realm": "reserved", + "ecs.version": "1.2.0", + "service.name": "ES_ECS", + "event.dataset": "elasticsearch.index_search_slowlog", + "process.thread.name": "elasticsearch[runTask-0][search][T#5]", + "log.logger": "index.search.slowlog.query", + "elasticsearch.cluster.uuid": "Ui23kfF1SHKJwu_hI1iPPQ", + "elasticsearch.node.id": "JK-jn-XpQ3OsDUsq5ZtfGg", + "elasticsearch.node.name": "node-0", + "elasticsearch.cluster.name": "distribution_run" } + --------------------------- // NOTCONSOLE +If a call was initiated with an `X-Opaque-ID` header, then the ID is included +in Search Slow logs in the **elasticsearch.slowlog.id** field. See +<> for details and best practices. + [discrete] [[index-slow-log]] === Index Slow log @@ -119,6 +142,18 @@ PUT /my-index-000001/_settings -------------------------------------------------- // TEST[setup:my_index] +To include information about the user that triggered a slow indexing event, +use the `index.indexing.slowlog.include.user` setting. + +[source,console] +-------------------------------------------------- +PUT /my-index-000001/_settings +{ + "index.indexing.slowlog.include.user": true +} +-------------------------------------------------- +// TEST[setup:my_index] + By default Elasticsearch will log the first 1000 characters of the _source in the slowlog. You can change that with `index.indexing.slowlog.source`. Setting it to `false` or `0` will skip logging the source entirely, while setting it to diff --git a/docs/reference/indices/clone-index.asciidoc b/docs/reference/indices/clone-index.asciidoc index 748b3adddd528..ef8ed28c6ac05 100644 --- a/docs/reference/indices/clone-index.asciidoc +++ b/docs/reference/indices/clone-index.asciidoc @@ -31,17 +31,13 @@ POST /my-index-000001/_clone/cloned-my-index-000001 For example, the following request prevents write operations on `my_source_index` -so it can be cloned. +so it can be cloned using the +<> API. Metadata changes like deleting the index are still allowed. [source,console] -------------------------------------------------- -PUT /my_source_index/_settings -{ - "settings": { - "index.blocks.write": true - } -} +PUT /my_source_index/_block/write -------------------------------------------------- // TEST[s/^/PUT my_source_index\n/] diff --git a/docs/reference/indices/shrink-index.asciidoc b/docs/reference/indices/shrink-index.asciidoc index 9ebc6ff5ef5a5..5d5e6c24d9e83 100644 --- a/docs/reference/indices/shrink-index.asciidoc +++ b/docs/reference/indices/shrink-index.asciidoc @@ -39,8 +39,8 @@ replica shards. You can later re-add replica shards as part of the shrink operation. You can use the following <> -request to remove an index's replica shards, relocates the index's remaining -shards to the same node, and make the index read-only. +request to remove an index's replica shards, and relocate the index's remaining +shards to the same node. [source,console] -------------------------------------------------- @@ -48,8 +48,7 @@ PUT /my_source_index/_settings { "settings": { "index.number_of_replicas": 0, <1> - "index.routing.allocation.require._name": "shrink_node_name", <2> - "index.blocks.write": true <3> + "index.routing.allocation.require._name": "shrink_node_name" <2> } } -------------------------------------------------- @@ -58,15 +57,20 @@ PUT /my_source_index/_settings <1> Removes replica shards for the index. <2> Relocates the index's shards to the `shrink_node_name` node. See <>. -<3> Prevents write operations to this index. Metadata changes, such as deleting - the index, are still allowed. - It can take a while to relocate the source index. Progress can be tracked with the <>, or the <> can be used to wait until all shards have relocated with the `wait_for_no_relocating_shards` parameter. +You can then make the index read-only with the following request using the +<> API: + +[source,console] +-------------------------------------------------- +PUT /my_source_index/_block/write +-------------------------------------------------- +// TEST[continued] [[shrink-index-api-desc]] ==== {api-description-title} @@ -101,7 +105,8 @@ A shrink operation: disks) . Recovers the target index as though it were a closed index which - had just been re-opened. + had just been re-opened. Recovers shards to <> + `.routing.allocation.initial_recovery._id`. [[_shrinking_an_index]] diff --git a/docs/reference/indices/split-index.asciidoc b/docs/reference/indices/split-index.asciidoc index 06f048856348e..26ae0f19b177c 100644 --- a/docs/reference/indices/split-index.asciidoc +++ b/docs/reference/indices/split-index.asciidoc @@ -37,23 +37,15 @@ POST /my-index-000001/_split/split-my-index-000001 ** The index must be read-only. ** The <> status must be green. -You can do make an index read-only -with the following request: +You can do make an index read-only with the following request using the +<> API: [source,console] -------------------------------------------------- -PUT /my_source_index/_settings -{ - "settings": { - "index.blocks.write": true <1> - } -} +PUT /my_source_index/_block/write -------------------------------------------------- // TEST[s/^/PUT my_source_index\n/] -<1> Prevents write operations to this index while still allowing metadata - changes like deleting the index. - The current write index on a data stream cannot be split. In order to split the current write index, the data stream must first be <> so that a new write index is created diff --git a/docs/reference/inference/put-inference.asciidoc b/docs/reference/inference/put-inference.asciidoc index 5332808d2ce12..2c0d4d38548bb 100644 --- a/docs/reference/inference/put-inference.asciidoc +++ b/docs/reference/inference/put-inference.asciidoc @@ -6,11 +6,11 @@ experimental[] Creates a model to perform an {infer} task. -IMPORTANT: The {infer} APIs enable you to use certain services, such as built-in -{ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, or -Hugging Face, in your cluster. For built-in models and models uploaded though -Eland, the {infer} APIs offer an alternative way to use and manage trained -models. However, if you do not plan to use the {infer} APIs to use these models +IMPORTANT: The {infer} APIs enable you to use certain services, such as built-in +{ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, or +Hugging Face. For built-in models and models uploaded though +Eland, the {infer} APIs offer an alternative way to use and manage trained +models. However, if you do not plan to use the {infer} APIs to use these models or if you want to use non-NLP models, use the <>. @@ -41,7 +41,7 @@ The following services are available through the {infer} API: * ELSER * Hugging Face * OpenAI -* text embedding (for built-in models and models uploaded through Eland) +* Elasticsearch (for built-in models and models uploaded through Eland) [discrete] @@ -68,12 +68,12 @@ The type of the {infer} task that the model will perform. Available task types: (Required, string) The type of service supported for the specified task type. Available services: -* `cohere`: specify the `text_embedding` task type to use the Cohere service. +* `cohere`: specify the `text_embedding` task type to use the Cohere service. * `elser`: specify the `sparse_embedding` task type to use the ELSER service. -* `hugging_face`: specify the `text_embedding` task type to use the Hugging Face +* `hugging_face`: specify the `text_embedding` task type to use the Hugging Face service. * `openai`: specify the `text_embedding` task type to use the OpenAI service. -* `text_embedding`: specify the `text_embedding` task type to use the E5 +* `elasticsearch`: specify the `text_embedding` task type to use the E5 built-in model or text embedding models uploaded by Eland. `service_settings`:: @@ -86,14 +86,14 @@ Settings used to install the {infer} model. These settings are specific to the ===== `api_key`::: (Required, string) -A valid API key of your Cohere account. You can find your Cohere API keys or you -can create a new one +A valid API key of your Cohere account. You can find your Cohere API keys or you +can create a new one https://dashboard.cohere.com/api-keys[on the API keys settings page]. -IMPORTANT: You need to provide the API key only once, during the {infer} model -creation. The <> does not retrieve your API key. After -creating the {infer} model, you cannot change the associated API key. If you -want to use a different API key, delete the {infer} model and recreate it with +IMPORTANT: You need to provide the API key only once, during the {infer} model +creation. The <> does not retrieve your API key. After +creating the {infer} model, you cannot change the associated API key. If you +want to use a different API key, delete the {infer} model and recreate it with the same name and the updated API key. `embedding_type`:: @@ -105,9 +105,9 @@ Valid values are: `model_id`:: (Optional, string) -The name of the model to use for the {infer} task. To review the available -models, refer to the -https://docs.cohere.com/reference/embed[Cohere docs]. Defaults to +The name of the model to use for the {infer} task. To review the available +models, refer to the +https://docs.cohere.com/reference/embed[Cohere docs]. Defaults to `embed-english-v2.0`. ===== + @@ -116,13 +116,13 @@ https://docs.cohere.com/reference/embed[Cohere docs]. Defaults to ===== `num_allocations`::: (Required, integer) -The number of model allocations to create. `num_allocations` must not exceed the +The number of model allocations to create. `num_allocations` must not exceed the number of available processors per node divided by the `num_threads`. `num_threads`::: (Required, integer) -The number of threads to use by each model allocation. `num_threads` must not -exceed the number of available processors per node divided by the number of +The number of threads to use by each model allocation. `num_threads` must not +exceed the number of available processors per node divided by the number of allocations. Must be a power of 2. Max allowed value is 32. ===== + @@ -131,14 +131,14 @@ allocations. Must be a power of 2. Max allowed value is 32. ===== `api_key`::: (Required, string) -A valid access token of your Hugging Face account. You can find your Hugging -Face access tokens or you can create a new one +A valid access token of your Hugging Face account. You can find your Hugging +Face access tokens or you can create a new one https://huggingface.co/settings/tokens[on the settings page]. -IMPORTANT: You need to provide the API key only once, during the {infer} model -creation. The <> does not retrieve your API key. After -creating the {infer} model, you cannot change the associated API key. If you -want to use a different API key, delete the {infer} model and recreate it with +IMPORTANT: You need to provide the API key only once, during the {infer} model +creation. The <> does not retrieve your API key. After +creating the {infer} model, you cannot change the associated API key. If you +want to use a different API key, delete the {infer} model and recreate it with the same name and the updated API key. `url`::: @@ -151,21 +151,21 @@ The URL endpoint to use for the requests. ===== `api_key`::: (Required, string) -A valid API key of your OpenAI account. You can find your OpenAI API keys in -your OpenAI account under the +A valid API key of your OpenAI account. You can find your OpenAI API keys in +your OpenAI account under the https://platform.openai.com/api-keys[API keys section]. -IMPORTANT: You need to provide the API key only once, during the {infer} model -creation. The <> does not retrieve your API key. After -creating the {infer} model, you cannot change the associated API key. If you -want to use a different API key, delete the {infer} model and recreate it with +IMPORTANT: You need to provide the API key only once, during the {infer} model +creation. The <> does not retrieve your API key. After +creating the {infer} model, you cannot change the associated API key. If you +want to use a different API key, delete the {infer} model and recreate it with the same name and the updated API key. `organization_id`::: (Optional, string) -The unique identifier of your organization. You can find the Organization ID in -your OpenAI account under -https://platform.openai.com/account/organization[**Settings** > **Organizations**]. +The unique identifier of your organization. You can find the Organization ID in +your OpenAI account under +https://platform.openai.com/account/organization[**Settings** > **Organizations**]. `url`::: (Optional, string) @@ -173,25 +173,25 @@ The URL endpoint to use for the requests. Can be changed for testing purposes. Defaults to `https://api.openai.com/v1/embeddings`. ===== + -.`service_settings` for the `text_embedding` service +.`service_settings` for the `elasticsearch` service [%collapsible%closed] ===== `model_id`::: (Required, string) -The name of the text embedding model to use for the {infer} task. It can be the -ID of either a built-in model (for example, `.multilingual-e5-small` for E5) or +The name of the model to use for the {infer} task. It can be the +ID of either a built-in model (for example, `.multilingual-e5-small` for E5) or a text embedding model already {ml-docs}/ml-nlp-import-model.html#ml-nlp-import-script[uploaded through Eland]. `num_allocations`::: (Required, integer) -The number of model allocations to create. `num_allocations` must not exceed the +The number of model allocations to create. `num_allocations` must not exceed the number of available processors per node divided by the `num_threads`. `num_threads`::: (Required, integer) -The number of threads to use by each model allocation. `num_threads` must not -exceed the number of available processors per node divided by the number of +The number of threads to use by each model allocation. `num_threads` must not +exceed the number of available processors per node divided by the number of allocations. Must be a power of 2. Max allowed value is 32. ===== @@ -211,26 +211,26 @@ Valid values are: * `classification`: use it for embeddings passed through a text classifier. * `clusterning`: use it for the embeddings run through a clustering algorithm. * `ingest`: use it for storing document embeddings in a vector database. - * `search`: use it for storing embeddings of search queries run against a + * `search`: use it for storing embeddings of search queries run against a vector data base to find relevant documents. `model`::: (Optional, string) -For `openai` sevice only. The name of the model to use for the {infer} task. Refer -to the +For `openai` sevice only. The name of the model to use for the {infer} task. Refer +to the https://platform.openai.com/docs/guides/embeddings/what-are-embeddings[OpenAI documentation] for the list of available text embedding models. `truncate`::: (Optional, string) -For `cohere` service only. Specifies how the API handles inputs longer than the +For `cohere` service only. Specifies how the API handles inputs longer than the maximum token length. Defaults to `END`. Valid values are: - * `NONE`: when the input exceeds the maximum input token length an error is + * `NONE`: when the input exceeds the maximum input token length an error is returned. - * `START`: when the input exceeds the maximum input token length the start of + * `START`: when the input exceeds the maximum input token length the start of + the input is discarded. + * `END`: when the input exceeds the maximum input token length the end of the input is discarded. - * `END`: when the input exceeds the maximum input token length the end of - the input is discarded. ===== @@ -267,7 +267,7 @@ PUT _inference/text_embedding/cohere-embeddings [discrete] [[inference-example-e5]] -===== E5 via the text embedding service +===== E5 via the elasticsearch service The following example shows how to create an {infer} model called `my-e5-model` to perform a `text_embedding` task type. @@ -276,7 +276,7 @@ The following example shows how to create an {infer} model called ------------------------------------------------------------ PUT _inference/text_embedding/my-e5-model { - "service": "text_embedding", + "service": "elasticsearch", "service_settings": { "num_allocations": 1, "num_threads": 1, @@ -285,8 +285,8 @@ PUT _inference/text_embedding/my-e5-model } ------------------------------------------------------------ // TEST[skip:TBD] -<1> The `model_id` must be the ID of one of the built-in E5 models. Valid values -are `.multilingual-e5-small` and `.multilingual-e5-small_linux-x86_64`. For +<1> The `model_id` must be the ID of one of the built-in E5 models. Valid values +are `.multilingual-e5-small` and `.multilingual-e5-small_linux-x86_64`. For further details, refer to the {ml-docs}/ml-nlp-e5.html[E5 model documentation]. @@ -339,7 +339,7 @@ The following example shows how to create an {infer} model called [source,console] ------------------------------------------------------------ -PUT _inference/text_embedding/hugging-face-embeddings +PUT _inference/text_embedding/hugging-face-embeddings { "service": "hugging_face", "service_settings": { @@ -349,20 +349,20 @@ PUT _inference/text_embedding/hugging-face-embeddings } ------------------------------------------------------------ // TEST[skip:TBD] -<1> A valid Hugging Face access token. You can find on the +<1> A valid Hugging Face access token. You can find on the https://huggingface.co/settings/tokens[settings page of your account]. -<2> The {infer} endpoint URL you created on Hugging Face. +<2> The {infer} endpoint URL you created on Hugging Face. -Create a new {infer} endpoint on -https://ui.endpoints.huggingface.co/[the Hugging Face endpoint page] to get an -endpoint URL. Select the model you want to use on the new endpoint creation page -- for example `intfloat/e5-small-v2` - then select the `Sentence Embeddings` -task under the Advanced configuration section. Create the endpoint. Copy the URL +Create a new {infer} endpoint on +https://ui.endpoints.huggingface.co/[the Hugging Face endpoint page] to get an +endpoint URL. Select the model you want to use on the new endpoint creation page +- for example `intfloat/e5-small-v2` - then select the `Sentence Embeddings` +task under the Advanced configuration section. Create the endpoint. Copy the URL after the endpoint initialization has been finished. [discrete] [[inference-example-eland]] -===== Models uploaded by Eland via the text embedding service +===== Models uploaded by Eland via the elasticsearch service The following example shows how to create an {infer} model called `my-msmarco-minilm-model` to perform a `text_embedding` task type. @@ -371,7 +371,7 @@ The following example shows how to create an {infer} model called ------------------------------------------------------------ PUT _inference/text_embedding/my-msmarco-minilm-model { - "service": "text_embedding", + "service": "elasticsearch", "service_settings": { "num_allocations": 1, "num_threads": 1, @@ -380,8 +380,8 @@ PUT _inference/text_embedding/my-msmarco-minilm-model } ------------------------------------------------------------ // TEST[skip:TBD] -<1> The `model_id` must be the ID of a text embedding model which has already -been +<1> The `model_id` must be the ID of a text embedding model which has already +been {ml-docs}/ml-nlp-import-model.html#ml-nlp-import-script[uploaded through Eland]. @@ -405,4 +405,4 @@ PUT _inference/text_embedding/openai_embeddings } } ------------------------------------------------------------ -// TEST[skip:TBD] \ No newline at end of file +// TEST[skip:TBD] diff --git a/docs/reference/ingest/processors/geoip.asciidoc b/docs/reference/ingest/processors/geoip.asciidoc index 77572f707f4cb..7e0e53747834a 100644 --- a/docs/reference/ingest/processors/geoip.asciidoc +++ b/docs/reference/ingest/processors/geoip.asciidoc @@ -435,6 +435,8 @@ each node's <> at `$ES_TMPDIR/geoip-databases/>, <>) diff --git a/docs/reference/mapping/params/format.asciidoc b/docs/reference/mapping/params/format.asciidoc index dff7bb4a11ee4..5babb4def2320 100644 --- a/docs/reference/mapping/params/format.asciidoc +++ b/docs/reference/mapping/params/format.asciidoc @@ -70,6 +70,11 @@ The following tables lists all the defaults ISO formats supported: (separated by `T`), is optional. Examples: `yyyy-MM-dd'T'HH:mm:ss.SSSZ` or `yyyy-MM-dd`. + NOTE: When using `date_optional_time`, the parsing is lenient and will attempt to parse + numbers as a year (e.g. `292278994` will be parsed as a year). This can lead to unexpected results + when paired with a numeric focused format like `epoch_second` and `epoch_millis`. + It is recommended you use `strict_date_optional_time` when pairing with a numeric focused format. + [[strict-date-time-nanos]]`strict_date_optional_time_nanos`:: A generic ISO datetime parser, where the date must include the year at a minimum, and the time diff --git a/docs/reference/mapping/params/subobjects.asciidoc b/docs/reference/mapping/params/subobjects.asciidoc index 8bac7ed8cbb37..b0a5d3817c332 100644 --- a/docs/reference/mapping/params/subobjects.asciidoc +++ b/docs/reference/mapping/params/subobjects.asciidoc @@ -24,7 +24,12 @@ PUT my-index-000001 "properties": { "metrics": { "type": "object", - "subobjects": false <1> + "subobjects": false, <1> + "properties": { + "time": { "type": "long" }, + "time.min": { "type": "long" }, + "time.max": { "type": "long" } + } } } } @@ -105,3 +110,70 @@ PUT my-index-000001/_doc/metric_1 <2> The document does not support objects The `subobjects` setting for existing fields and the top-level mapping definition cannot be updated. + +==== Auto-flattening object mappings + +It is generally recommended to define the properties of an object that is configured with `subobjects: false` with dotted field names +(as shown in the first example). +However, it is also possible to define these properties as sub-objects in the mappings. +In that case, the mapping will be automatically flattened before it is stored. +This makes it easier to re-use existing mappings without having to re-write them. + +Note that auto-flattening will not work when certain <> are set +on object mappings that are defined under an object configured with `subobjects: false`: + +* The <> mapping parameter must not be `false`. +* The <> mapping parameter must not contradict the implicit or explicit value of the parent. For example, when `dynamic` is set to `false` in the root of the mapping, object mappers that set `dynamic` to `true` can't be auto-flattened. +* The <> mapping parameter must not be set to `true` explicitly. + +[source,console] +-------------------------------------------------- +PUT my-index-000002 +{ + "mappings": { + "properties": { + "metrics": { + "subobjects": false, + "properties": { + "time": { + "type": "object", <1> + "properties": { + "min": { "type": "long" }, <2> + "max": { "type": "long" } + } + } + } + } + } + } +} +GET my-index-000002/_mapping +-------------------------------------------------- + +[source,console-result] +-------------------------------------------------- +{ + "my-index-000002" : { + "mappings" : { + "properties" : { + "metrics" : { + "subobjects" : false, + "properties" : { + "time.min" : { <3> + "type" : "long" + }, + "time.max" : { + "type" : "long" + } + } + } + } + } + } +} +-------------------------------------------------- + +<1> The metrics object can contain further object mappings that will be auto-flattened. + Object mappings at this level must not set certain mapping parameters as explained above. +<2> This field will be auto-flattened to `"time.min"` before the mapping is stored. +<3> The auto-flattened `"time.min"` field can be inspected by looking at the index mapping. diff --git a/docs/reference/mapping/types/dense-vector.asciidoc b/docs/reference/mapping/types/dense-vector.asciidoc index d600bc5566ace..cec41eab41238 100644 --- a/docs/reference/mapping/types/dense-vector.asciidoc +++ b/docs/reference/mapping/types/dense-vector.asciidoc @@ -232,7 +232,6 @@ expense of slower indexing speed. + ^*^ This parameter can only be specified when `index` is `true`. + -+ .Properties of `index_options` [%collapsible%open] ==== diff --git a/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc index 97120ff1873ae..1ab5de76a94b0 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc @@ -537,3 +537,4 @@ The API returns the following results: // TESTRESPONSE[s/"job_version" : "8.4.0"/"job_version" : $body.job_version/] // TESTRESPONSE[s/1656087283340/$body.$_path/] // TESTRESPONSE[s/"superuser"/"_es_test_root"/] +// TESTRESPONSE[s/"ignore_throttled" : true/"ignore_throttled" : true,"failure_store":"false"/] diff --git a/docs/reference/ml/anomaly-detection/ml-configuring-aggregations.asciidoc b/docs/reference/ml/anomaly-detection/ml-configuring-aggregations.asciidoc index b9ae702d3ccb4..f550c27db496e 100644 --- a/docs/reference/ml/anomaly-detection/ml-configuring-aggregations.asciidoc +++ b/docs/reference/ml/anomaly-detection/ml-configuring-aggregations.asciidoc @@ -76,6 +76,10 @@ chart. * Your {dfeed} can contain multiple aggregations, but only the ones with names that match values in the job configuration are fed to the job. +* Using +{ref}/search-aggregations-metrics-scripted-metric-aggregation.html[scripted metric] +aggregations is not supported in {dfeeds}. + [discrete] [[aggs-recommendations-dfeeds]] diff --git a/docs/reference/ml/anomaly-detection/ml-configuring-alerts.asciidoc b/docs/reference/ml/anomaly-detection/ml-configuring-alerts.asciidoc index bf98327807e70..2e678b929d296 100644 --- a/docs/reference/ml/anomaly-detection/ml-configuring-alerts.asciidoc +++ b/docs/reference/ml/anomaly-detection/ml-configuring-alerts.asciidoc @@ -5,8 +5,6 @@ :frontmatter-tags-content-type: [how-to] :frontmatter-tags-user-goals: [configure] -beta::[] - {kib} {alert-features} include support for {ml} rules, which run scheduled checks for anomalies in one or more {anomaly-jobs} or check the health of the job with certain conditions. If the conditions of the rule are met, an alert is @@ -83,73 +81,7 @@ TIP: You must also provide a _check interval_ that defines how often to evaluate the rule conditions. It is recommended to select an interval that is close to the bucket span of the job. -As the last step in the rule creation process, define its actions. - -[discrete] -[[anomaly-alert-actions]] -=== {anomaly-detect-cap} alert rule actions - -You can optionally send notifications when the rule conditions are met and when -they are no longer met. In particular, this rule type supports: - -* alert summaries -* actions that run when the anomaly score matches the conditions -* recovery actions that run when the conditions are no longer met - -Each action uses a connector, which stores connection information for a {kib} -service or supported third-party integration, depending on where you want to -send the notifications. For example, you can use a Slack connector to send a -message to a channel. Or you can use an index connector that writes an JSON -object to a specific index. For details about creating connectors, refer to -{kibana-ref}/action-types.html[Connectors]. - -After you select a connector, you must set the action frequency. You can choose -to create a summary of alerts on each check interval or on a custom interval. -For example, send slack notifications that summarize the new, ongoing, and -recovered alerts: - -[role="screenshot"] -image::images/ml-anomaly-alert-action-summary.png["Adding an alert summary action to the rule",500] -// NOTE: This is an autogenerated screenshot. Do not edit it directly. - -TIP: If you choose a custom action interval, it cannot be shorter than the -rule's check interval. - -Alternatively, you can set the action frequency such that actions run for each -alert. Choose how often the action runs (at each check interval, only when the -alert status changes, or at a custom action interval). You must also choose an -action group, which indicates whether the action runs when the anomaly score is -matched or when the alert is recovered. For example: - -[role="screenshot"] -image::images/ml-anomaly-alert-action-score-matched.png["Adding an action for each alert in the rule",500] -// NOTE: This is an autogenerated screenshot. Do not edit it directly. - -You can further refine the conditions under which actions run by specifying that -actions only run they match a KQL query or when an alert occurs within a -specific time frame. - -There is a set of variables that you can use to customize the notification -messages for each action. Click the icon above the message text box to get the -list of variables or refer to <>. - -[role="screenshot"] -image::images/ml-anomaly-alert-messages.png["Customizing your message",500] -// NOTE: This is an autogenerated screenshot. Do not edit it directly. - -After you save the configurations, the rule appears in the -*{stack-manage-app} > {rules-ui}* list; you can check its status and see the -overview of its configuration information. - -When an alert occurs, it is always the same name as the job ID of the associated -{anomaly-job} that triggered it. If necessary, you can snooze rules to prevent -them from generating actions. For more details, refer to -{kibana-ref}/create-and-manage-rules.html#controlling-rules[Snooze and disable rules]. - -You can also review how the alerts that are occured correlate with the -{anomaly-detect} results in the **Anomaly exloprer** by using the -**Anomaly timeline** swimlane and the **Alerts** panel. - +As the last step in the rule creation process, define its <>. [[creating-anomaly-jobs-health-rules]] == {anomaly-jobs-cap} health rules @@ -197,36 +129,78 @@ close to the bucket span of the job. As the last step in the rule creation process, define its actions. -[discrete] -[[anomaly-jobs-health-actions]] -=== {anomaly-jobs-cap} health rule actions +[[ml-configuring-alert-actions]] +== Actions You can optionally send notifications when the rule conditions are met and when -they are no longer met. In particular, this rule type supports: +they are no longer met. In particular, these rules support: -* actions that run when an issue is detected -* recovery actions that run when the rule conditions are no longer met +* alert summaries +* actions that run when the anomaly score matches the conditions (for {anomaly-detect} alert rules) +* actions that run when an issue is detected (for {anomaly-jobs} health rules) +* recovery actions that run when the conditions are no longer met + +Each action uses a connector, which stores connection information for a {kib} +service or supported third-party integration, depending on where you want to +send the notifications. For example, you can use a Slack connector to send a +message to a channel. Or you can use an index connector that writes a JSON +object to a specific index. For details about creating connectors, refer to +{kibana-ref}/action-types.html[Connectors]. -For each action, you must choose a connector, which provides connection -information for a {kib} service or third-party integration. You must set the -action frequency, which involves choosing how often to run the action (for -example, at each check interval, only when the alert status changes, or at a -custom action interval). You must also choose one of the action groups (for -example, the action runs when the issue is detected or when it is recovered). +After you select a connector, you must set the action frequency. You can choose +to create a summary of alerts on each check interval or on a custom interval. +For example, send slack notifications that summarize the new, ongoing, and +recovered alerts: + +[role="screenshot"] +image::images/ml-anomaly-alert-action-summary.png["Adding an alert summary action to the rule",500] +// NOTE: This is an autogenerated screenshot. Do not edit it directly. + +TIP: If you choose a custom action interval, it cannot be shorter than the +rule's check interval. + +Alternatively, you can set the action frequency such that actions run for each +alert. Choose how often the action runs (at each check interval, only when the +alert status changes, or at a custom action interval). For {anomaly-detect} +alert rules, you must also choose whether the action runs when the anomaly score +matches the condition or when the alert recovers: + +[role="screenshot"] +image::images/ml-anomaly-alert-action-score-matched.png["Adding an action for each alert in the rule",500] +// NOTE: This is an autogenerated screenshot. Do not edit it directly. + +In {anomaly-jobs} health rules, choose whether the action runs when the issue is +detected or when it is recovered: [role="screenshot"] image::images/ml-health-check-action.png["Adding an action for each alert in the rule",500] // NOTE: This is an autogenerated screenshot. Do not edit it directly. -You can pass rule values to an action to provide contextual details in the -notification messages. For the list of variables that you can include in the -message, click the icon above the message text box or refer to -<>. +You can further refine the rule by specifying that actions run only when they +match a KQL query or when an alert occurs within a specific time frame. + +There is a set of variables that you can use to customize the notification +messages for each action. Click the icon above the message text box to get the +list of variables or refer to <>. For example: + +[role="screenshot"] +image::images/ml-anomaly-alert-messages.png["Customizing your message",500] +// NOTE: This is an autogenerated screenshot. Do not edit it directly. After you save the configurations, the rule appears in the *{stack-manage-app} > {rules-ui}* list; you can check its status and see the overview of its configuration information. +When an alert occurs for an {anomaly-detect} alert rule, it is always the same +name as the job ID of the associated {anomaly-job} that triggered it. You can +review how the alerts that are occured correlate with the {anomaly-detect} +results in the **Anomaly explorer** by using the **Anomaly timeline** swimlane +and the **Alerts** panel. + +If necessary, you can snooze rules to prevent them from generating actions. For +more details, refer to +{kibana-ref}/create-and-manage-rules.html#controlling-rules[Snooze and disable rules]. + [[action-variables]] == Action variables diff --git a/docs/reference/ml/images/ml-anomaly-alert-severity.png b/docs/reference/ml/images/ml-anomaly-alert-severity.png index c93aaf6175cf8..acbdce91e605a 100644 Binary files a/docs/reference/ml/images/ml-anomaly-alert-severity.png and b/docs/reference/ml/images/ml-anomaly-alert-severity.png differ diff --git a/docs/reference/ml/images/ml-health-check-action.png b/docs/reference/ml/images/ml-health-check-action.png index 1d94893dc3390..31f6ef3d97b7a 100644 Binary files a/docs/reference/ml/images/ml-health-check-action.png and b/docs/reference/ml/images/ml-health-check-action.png differ diff --git a/docs/reference/ml/images/ml-health-check-config.png b/docs/reference/ml/images/ml-health-check-config.png index cc12d1d8f0334..c443c9f6dabe8 100644 Binary files a/docs/reference/ml/images/ml-health-check-config.png and b/docs/reference/ml/images/ml-health-check-config.png differ diff --git a/docs/reference/modules/cluster/remote-clusters-api-key.asciidoc b/docs/reference/modules/cluster/remote-clusters-api-key.asciidoc index b95ebdf143a57..0cac52deaae4b 100644 --- a/docs/reference/modules/cluster/remote-clusters-api-key.asciidoc +++ b/docs/reference/modules/cluster/remote-clusters-api-key.asciidoc @@ -62,6 +62,9 @@ information, refer to https://www.elastic.co/subscriptions. [[remote-clusters-security-api-key]] ==== Establish trust with a remote cluster +NOTE: If a remote cluster is part of an {ess} deployment, it has a valid certificate by default. +You can therefore skip steps related to certificates in these instructions. + ===== On the remote cluster // tag::remote-cluster-steps[] diff --git a/docs/reference/release-notes.asciidoc b/docs/reference/release-notes.asciidoc index e548459f31216..e3c8da281f2a1 100644 --- a/docs/reference/release-notes.asciidoc +++ b/docs/reference/release-notes.asciidoc @@ -8,6 +8,7 @@ This section summarizes the changes in each release. * <> * <> +* <> * <> * <> * <> @@ -63,6 +64,7 @@ This section summarizes the changes in each release. include::release-notes/8.14.0.asciidoc[] include::release-notes/8.13.0.asciidoc[] +include::release-notes/8.12.2.asciidoc[] include::release-notes/8.12.1.asciidoc[] include::release-notes/8.12.0.asciidoc[] include::release-notes/8.11.4.asciidoc[] diff --git a/docs/reference/release-notes/8.12.2.asciidoc b/docs/reference/release-notes/8.12.2.asciidoc new file mode 100644 index 0000000000000..2be8449b6c1df --- /dev/null +++ b/docs/reference/release-notes/8.12.2.asciidoc @@ -0,0 +1,58 @@ +[[release-notes-8.12.2]] +== {es} version 8.12.2 + +Also see <>. + +[[bug-8.12.2]] +[float] +=== Bug fixes + +Application:: +* Fix bug in `rule_query` where `text_expansion` errored because it was not rewritten {es-pull}105365[#105365] +* [Connectors API] Fix bug with crawler configuration parsing and `sync_now` flag {es-pull}105024[#105024] + +Authentication:: +* Validate settings before reloading JWT shared secret {es-pull}105070[#105070] + +Downsampling:: +* Downsampling better handle if source index isn't allocated and fix bug in retrieving last processed tsid {es-pull}105228[#105228] + +ES|QL:: +* ESQL: Push CIDR_MATCH to Lucene if possible {es-pull}105061[#105061] (issue: {es-issue}105042[#105042]) +* ES|QL: Fix exception handling on `date_parse` with wrong date pattern {es-pull}105048[#105048] (issue: {es-issue}104124[#104124]) + +Indices APIs:: +* Always show `composed_of` field for composable index templates {es-pull}105315[#105315] (issue: {es-issue}104627[#104627]) + +Ingest Node:: +* Backport stable `ThreadPool` constructor from `LogstashInternalBridge` {es-pull}105165[#105165] +* Harden index mapping parameter check in enrich runner {es-pull}105096[#105096] + +Machine Learning:: +* Fix handling of `ml.config_version` node attribute for nodes with machine learning disabled {es-pull}105066[#105066] +* Fix handling surrogate pairs in the XLM Roberta tokenizer {es-pull}105183[#105183] (issues: {es-issue}104626[#104626], {es-issue}104981[#104981]) +* Inference service should reject tasks during shutdown {es-pull}105213[#105213] + +Network:: +* Close `currentChunkedWrite` on client cancel {es-pull}105258[#105258] +* Fix leaked HTTP response sent after close {es-pull}105293[#105293] (issue: {es-issue}104651[#104651]) +* Fix race in HTTP response shutdown handling {es-pull}105306[#105306] + +Search:: +* Field-caps should read fields from up-to-dated shards {es-pull}105153[#105153] (issue: {es-issue}104809[#104809]) + +Snapshot/Restore:: +* Finalize all snapshots completed by shard snapshot updates {es-pull}105245[#105245] (issue: {es-issue}104939[#104939]) + +Transform:: +* Do not log warning when triggering an `ABORTING` transform {es-pull}105234[#105234] (issue: {es-issue}105233[#105233]) +* Make `_reset` action stop transforms without force first {es-pull}104870[#104870] (issues: {es-issue}100596[#100596], {es-issue}104825[#104825]) + +[[enhancement-8.12.2]] +[float] +=== Enhancements + +IdentityProvider:: +* Include user's privileges actions in IdP plugin `_has_privileges` request {es-pull}104026[#104026] + + diff --git a/docs/reference/release-notes/highlights.asciidoc b/docs/reference/release-notes/highlights.asciidoc index a3345c8dc3d74..92cd447a48deb 100644 --- a/docs/reference/release-notes/highlights.asciidoc +++ b/docs/reference/release-notes/highlights.asciidoc @@ -28,13 +28,40 @@ Other versions: endif::[] -// The notable-highlights tag marks entries that -// should be featured in the Stack Installation and Upgrade Guide: // tag::notable-highlights[] -// [discrete] -// === Heading -// -// Description. + +[discrete] +[[improve_storage_efficiency_for_non_metric_fields_in_tsdb]] +=== Improve storage efficiency for non-metric fields in TSDB +Adds a new `doc_values` encoding for non-metric fields in TSDB that takes advantage of TSDB's index sorting. +While terms that are used in multiple documents (such as the host name) are already stored only once in the terms dictionary, +there are a lot of repetitions in the references to the terms dictionary that are stored in `doc_values` (ordinals). +In TSDB, documents (and therefore `doc_values`) are implicitly sorted by dimenstions and timestamp. +This means that for each time series, we are storing long consecutive runs of the same ordinal. +With this change, we are introducing an encoding that detects and efficiently stores runs of the same value (such as `1 1 1 2 2 2 …`), +and runs of cycling values (such as `1 2 1 2 …`). +In our testing, we have seen a reduction in storage size by about 13%. +The effectiveness of this encoding depends on how many non-metric fields, such as dimensions, are used. +The more non-metric fields, the more effective this improvement will be. + +{es-pull}99747[#99747] + +[discrete] +[[ga_release_of_synonyms_api]] +=== GA Release of Synonyms API +Removes the beta label for the Synonyms API to make it GA. + +{es-pull}103223[#103223] + +[discrete] +[[flag_in_field_caps_to_return_only_fields_with_values_in_index]] +=== Flag in `_field_caps` to return only fields with values in index +We added support for filtering the field capabilities API output by removing +fields that don't have a value. This can be done through the newly added +`include_empty_fields` parameter, which defaults to true. + +{es-pull}103651[#103651] + // end::notable-highlights[] diff --git a/docs/reference/search/search-your-data/learning-to-rank-model-training.asciidoc b/docs/reference/search/search-your-data/learning-to-rank-model-training.asciidoc new file mode 100644 index 0000000000000..fb026578bc00d --- /dev/null +++ b/docs/reference/search/search-your-data/learning-to-rank-model-training.asciidoc @@ -0,0 +1,168 @@ +[[learning-to-rank-model-training]] +=== Deploy and manage Learning To Rank models +++++ +Deploy and manage LTR models +++++ + +preview::["The Learning To Rank feature is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but this feature is not subject to the support SLA of official GA features."] + +NOTE: This feature was introduced in version 8.12.0 and is only available to certain subscription levels. +For more information, see {subscriptions}. + +[discrete] +[[learning-to-rank-model-training-workflow]] +==== Train and deploy a model using Eland + +Typically, the https://xgboost.readthedocs.io/en/stable/[XGBoost^] model training process uses standard Python data science tools like Pandas and scikit-learn. + + +We have developed an +https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/08-learning-to-rank.ipynb[example +notebook^] available in the `elasticsearch-labs` repo. This interactive Python notebook +details an end-to-end model training and deployment workflow. + +We highly recommend using https://eland.readthedocs.io/[eland^] in your workflow, because it provides important functionalities for working with LTR in {es}. Use eland to: + +* Configure feature extraction + +* Extract features for training + +* Deploy the model in {es} + +[discrete] +[[learning-to-rank-model-training-feature-definition]] +===== Configure feature extraction in Eland + +Feature extractors are defined using templated queries. https://eland.readthedocs.io/[Eland^] provides the `eland.ml.ltr.QueryFeatureExtractor` to define these feature extractors directly in Python: + +[source,python] +---- +from eland.ml.ltr import QueryFeatureExtractor + +feature_extractors=[ + # We want to use the score of the match query for the title field as a feature: + QueryFeatureExtractor( + feature_name="title_bm25", + query={"match": {"title": "{{query}}"}} + ), + # We can use a script_score query to get the value + # of the field rating directly as a feature: + QueryFeatureExtractor( + feature_name="popularity", + query={ + "script_score": { + "query": {"exists": {"field": "popularity"}}, + "script": {"source": "return doc['popularity'].value;"}, + } + }, + ), + # We can execute a script on the value of the query + # and use the return value as a feature: + QueryFeatureExtractor( + feature_name="query_length", + query={ + "script_score": { + "query": {"match_all": {}}, + "script": { + "source": "return params['query'].splitOnToken(' ').length;", + "params": { + "query": "{{query}}", + } + }, + } + }, + ), +] +---- +// NOTCONSOLE + +Once the feature extractors have been defined, they are wrapped in an `eland.ml.ltr.LTRModelConfig` object for use in later training steps: + +[source,python] +---- +from eland.ml.ltr import LTRModelConfig + +ltr_config = LTRModelConfig(feature_extractors) +---- +// NOTCONSOLE + +[discrete] +[[learning-to-rank-model-training-feature-extraction]] +===== Extracting features for training + +Building your dataset is a critical step in the training process. This involves +extracting relevant features and adding them to your judgment list. We +recommend using Eland's `eland.ml.ltr.FeatureLogger` helper class for this +process. + +[source,python] +---- +from eland.ml.ltr import FeatureLogger + +# Create a feature logger that will be used to query {es} to retrieve the features: +feature_logger = FeatureLogger(es_client, MOVIE_INDEX, ltr_config) +---- +// NOTCONSOLE + +The FeatureLogger provides an `extract_features` method which enables you to extract features for a list of specific documents from your judgment list. At the same time, you can pass query parameters to the feature extractors defined earlier: + +[source,python] +---- +feature_logger.extract_features( + query_params:{"query": "foo"}, + doc_ids=["doc-1", "doc-2"] +) +---- +// NOTCONSOLE + +Our https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/08-learning-to-rank.ipynb[example notebook^] explains how to use the `FeatureLogger` to build a training dataset, by adding features to a judgment list. + +[discrete] +[[learning-to-rank-model-training-feature-extraction-notes]] +====== Notes on feature extraction + +* We strongly advise against implementing feature extraction on your own. It's crucial to maintain consistency in feature extraction between the training environment and inference in {es}. By using eland tooling, which is developed and tested in tandem with {es}, you can ensure that they function together consistently. + +* Feature extraction is performed by executing queries on the {es} server. This could put a lot of stress on your cluster, especially when your judgment list contains a lot of examples or you have many features. Our feature logger implementation is designed to minimize the number of search requests sent to the server and reduce load. However, it might be best to build your training dataset using an {es} cluster that is isolated from any user-facing, production traffic. + +[discrete] +[[learning-to-rank-model-deployment]] +===== Deploy your model into {es} + +Once your model is trained you will be able to deploy it in your {es} cluster. You can use Eland's `MLModel.import_ltr_model method`: + +[source,python] +---- +from eland.ml import MLModel + +LEARNING_TO_RANK_MODEL_ID="ltr-model-xgboost" + +MLModel.import_ltr_model( + es_client=es_client, + model=ranker, + model_id=LEARNING_TO_RANK_MODEL_ID, + ltr_model_config=ltr_config, + es_if_exists="replace", +) +---- +// NOTCONSOLE + +This method will serialize the trained model and the Learning To Rank configuration (including feature extraction) in a format that {es} can understand. The model is then deployed to {es} using the <>. + +The following types of models are currently supported for LTR with {es}: + +* https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html[`DecisionTreeRegressor`^] +* https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html[`RandomForestRegressor`^] +* https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMRegressor.html[`LGBMRegressor`^] +* https://xgboost.readthedocs.io/en/stable/python/python_api.html#xgboost.XGBRanker[`XGBRanker`^] +* https://xgboost.readthedocs.io/en/stable/python/python_api.html#xgboost.XGBRegressor[`XGBRegressor`^] + + +More model types will be supported in the future. + +[discrete] +[[learning-to-rank-model-management]] +==== Learning To Rank model management + +Once your model is deployed in {es} you can manage it using the https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-df-trained-models-apis.html[trained model APIs]. +You're now ready to work with your LTR model as a rescorer at <>. diff --git a/docs/reference/search/search-your-data/learning-to-rank-search-usage.asciidoc b/docs/reference/search/search-your-data/learning-to-rank-search-usage.asciidoc new file mode 100644 index 0000000000000..1d040a116ad9a --- /dev/null +++ b/docs/reference/search/search-your-data/learning-to-rank-search-usage.asciidoc @@ -0,0 +1,78 @@ +[[learning-to-rank-search-usage]] +=== Search using Learning To Rank +++++ +Search using LTR +++++ + +preview::["The Learning To Rank feature is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but this feature is not subject to the support SLA of official GA features."] + +NOTE: This feature was introduced in version 8.12.0 and is only available to certain subscription levels. +For more information, see {subscriptions}. + +[discrete] +[[learning-to-rank-rescorer]] +==== Learning To Rank as a rescorer + +Once your LTR model is trained and deployed in {es}, it can be used as a <> in the <>: + +[source,console] +---- +GET my-index/_search +{ + "query": { <1> + "multi_match": { + "fields": ["title", "content"], + "query": "the quick brown fox" + } + }, + "rescore": { + "learning_to_rank": { + "model_id": "ltr-model", <2> + "params": { <3> + "query_text": "the quick brown fox" + } + }, + "window_size": 100 <4> + } +} +---- +// TEST[skip:TBD] +<1> First pass query providing documents to be rescored. +<2> The unique identifier of the trained model uploaded to {es}. +<3> Named parameters to be passed to the query templates used for feature. +<4> The number of documents that should be examined by the rescorer on each shard. + +[discrete] +[[learning-to-rank-rescorer-limitations]] +===== Known limitations + +[discrete] +[[learning-to-rank-rescorer-limitations-window-size]] +====== Rescore window size + +Scores returned by LTR models are usually not comparable with the scores issued by the first pass query and can be lower than the non-rescored score. This can cause the non-rescored result document to be ranked higher than the rescored document. To prevent this, the `window_size` parameter is mandatory for LTR rescorers and should be greater than or equal to `from + size`. + +[discrete] +[[learning-to-rank-rescorer-limitations-pagination]] +====== Pagination + +When exposing pagination to users, `window_size` should remain constant as each page is progressed by passing different `from` values. Changing the `window_size` can alter the top hits causing results to confusingly shift as the user steps through pages. + +[discrete] +[[learning-to-rank-rescorer-limitations-negative-scores]] +====== Negative scores + +Depending on how your model is trained, it’s possible that the model will return negative scores for documents. While negative scores are not allowed from first-stage retrieval and ranking, it is possible to use them in the LTR rescorer. + +[discrete] +[[learning-to-rank-rescorer-limitations-field-collapsing]] +====== Compatibility with field collapsing + +LTR rescorers are not compatible with the <>. + +[discrete] +[[learning-to-rank-rescorer-limitations-term-statistics]] +====== Term statistics as features + +We do not currently support term statistics as features, however future releases will introduce this capability. + diff --git a/docs/reference/search/search-your-data/learning-to-rank.asciidoc b/docs/reference/search/search-your-data/learning-to-rank.asciidoc new file mode 100644 index 0000000000000..08fad9db9c0f6 --- /dev/null +++ b/docs/reference/search/search-your-data/learning-to-rank.asciidoc @@ -0,0 +1,136 @@ +[[learning-to-rank]] +== Learning To Rank + +preview::["The Learning To Rank feature is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but this feature is not subject to the support SLA of official GA features."] + +NOTE: This feature was introduced in version 8.12.0 and is only available to certain subscription levels. +For more information, see {subscriptions}. + +Learning To Rank (LTR) uses a trained machine learning (ML) model to build a +ranking function for your search engine. Typically, the model is used as a +second stage re-ranker, to improve the relevance of search results returned by a +simpler, first stage retrieval algorithm. The LTR function takes a list of +documents and a search context and outputs ranked documents: + +[[learning-to-rank-overview-diagram]] +.Learning To Rank overview +image::images/search/learning-to-rank-overview.png[Learning To Rank overview,align="center"] + + +[discrete] +[[learning-to-rank-search-context]] +=== Search context + +In addition to the list of documents to sort, the LTR function also requires a +search context. Typically, this search context includes at least the search +terms provided by the user (`text_query` in the example above). +The search context can also provide additional information used in the ranking mode. +This could be information about the user doing the search (such as demographic data, geolocation, or age); about the query (such as query length); or document in the context of the query (such as score for the title field). + +[discrete] +[[learning-to-rank-judgement-list]] +=== Judgment list +The LTR model is usually trained on a judgment list, which is a set of queries and documents with a relevance grade. Judgment lists can be human or machine generated: they're commonly populated from behavioural analytics, often with human moderation. Judgment lists determine the ideal ordering of results for a given search query. The goal of LTR is to fit the model to the judgment list rankings as closely as possible for new queries and documents. + +The judgment list is the main input used to train the model. It consists of a dataset that contains pairs of queries and documents, along with their corresponding relevance labels. +The relevance judgment is typically either a binary (relevant/irrelevant) or a more +granular label, such as a grade between 0 (completely irrelevant) to 4 (highly +relevant). The example below uses a graded relevance judgment. + + +[[learning-to-rank-judgment-list-example]] +.Judgment list example +image::images/search/learning-to-rank-judgment-list.png[Judgment list example,align="center"] + +[discrete] +[[judgment-list-notes]] +==== Notes on judgment lists + +While a judgment list can be created manually by humans, there are techniques available to leverage user engagement data, such as clicks or conversions, to construct judgment lists automatically. + +The quantity and the quality of your judgment list will greatly influence the overall performance of the LTR model. The following aspects should be considered very carefully when building your judgment list: + +* Most search engines can be searched using different query types. For example, in a movie search engine, users search by title but also by actor or director. It's essential to maintain a balanced number of examples for each query type in your judgment list. This prevents overfitting and allows the model to generalize effectively across all query types. + +* Users often provide more positive examples than negative ones. By balancing the number of positive and negative examples, you help the model learn to distinguish between relevant and irrelevant content more accurately. + +[discrete] +[[learning-to-rank-feature-extraction]] +=== Feature extraction + +Query and document pairs alone don't provide enough information to train the ML +models used for LTR. The relevance scores in judgment lists depend on a number +of properties or _features_. These features must be extracted to determine how +the various components combine to determine document relevance. The judgment +list plus the extracted features make up the training dataset for an LTR model. + +These features fall into one of three main categories: + +* *Document features*: + These features are derived directly from document properties. + Example: product price in an eCommerce store. + +* *Query features*: + These features are computed directly from the query submitted by the user. + Example: the number of words in the query. + +* *Query-document features*: + Features used to provide information about the document in the context of the query. + Example: the BM25 score for the `title` field. + +To prepare the dataset for training, the features are added to the judgment list: + +[[learning-to-rank-judgement-feature-extraction]] +.Judgment list with features +image::images/search/learning-to-rank-feature-extraction.png[Judgment list with features,align="center"] + +To do this in {es}, use templated queries to extract features when building the +training dataset and during inference at query time. Here is an example of a +templated query: + +[source,js] +---- +[ + { + "query_extractor": { + "feature_name": "title_bm25", + "query": { "match": { "title": "{{query}}" } } + } + } +] +---- +// NOTCONSOLE + +[discrete] +[[learning-to-rank-models]] +=== Models + +The heart of LTR is of course an ML model. A model is trained using the training data described above in combination with an objective. In the case of LTR, the objective is to rank result documents in an optimal way with respect to a judgment list, given some ranking metric such as https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Discounted_cumulative_gain[nDCG^] or https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Mean_average_precision[MAP^]. The model relies solely on the features and relevance labels from the training data. + +The LTR space is evolving rapidly and many approaches and model types are being +experimented with. In practice {es} relies specifically on gradient boosted decision tree +(https://en.wikipedia.org/wiki/Gradient_boosting#Gradient_tree_boosting[GBDT^]) models for LTR inference. + +Note that {es} supports model inference but the training process itself must +happen outside of {es}, using a GBDT model. Among the most popular LTR models +used today, https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/MSR-TR-2010-82.pdf[LambdaMART^] provides strong ranking performance with low inference +latencies. It relies on GBDT models and is therefore a perfect fit for LTR in +{es}. + +https://xgboost.readthedocs.io/en/stable/[XGBoost^] is a well known library that provides an https://xgboost.readthedocs.io/en/stable/tutorials/learning_to_rank.html[implementation^] of LambdaMART, making it a popular choice for LTR. We offer helpers in https://eland.readthedocs.io/[eland^] to facilitate the integration of a trained https://xgboost.readthedocs.io/en/stable/python/python_api.html#xgboost.XGBRanker[XBGRanker^] model as your LTR model in {es}. + +[TIP] +==== +Learn more about training in <>, or check out our https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/08-learning-to-rank.ipynb[interactive LTR notebook] available in the `elasticsearch-labs` repo. +==== +[discrete] +[[learning-to-rank-in-the-elastic-stack]] +=== LTR in the Elastic stack + +In the next pages of this guide you will learn to: + +* <> +* <> + +include::learning-to-rank-model-training.asciidoc[] +include::learning-to-rank-search-usage.asciidoc[] diff --git a/docs/reference/search/search-your-data/search-across-clusters.asciidoc b/docs/reference/search/search-your-data/search-across-clusters.asciidoc index 93955659a1b2a..ee1d9fcae18e8 100644 --- a/docs/reference/search/search-your-data/search-across-clusters.asciidoc +++ b/docs/reference/search/search-your-data/search-across-clusters.asciidoc @@ -22,10 +22,11 @@ The following APIs support {ccs}: * experimental:[] <> * experimental:[] <> * experimental:[] <> +* experimental:[] <> [discrete] -[[ccs-prereqs]] === Prerequisites +// tag::ccs-prereqs[] * {ccs-cap} requires remote clusters. To set up remote clusters on {ess}, see link:{cloud}/ec-enable-ccs.html[configure remote clusters on {ess}]. If you @@ -39,15 +40,19 @@ To ensure your remote cluster configuration supports {ccs}, see * The local coordinating node must have the <> node role. +// end::ccs-prereqs[] [[ccs-gateway-seed-nodes]] +// tag::ccs-gateway-seed-nodes[] * If you use <>, the local coordinating node must be able to connect to seed and gateway nodes on the remote cluster. + We recommend using gateway nodes capable of serving as coordinating nodes. The seed nodes can be a subset of these gateway nodes. +// end::ccs-gateway-seed-nodes[] [[ccs-proxy-mode]] +// tag::ccs-proxy-mode[] * If you use <>, the local coordinating node must be able to connect to the configured `proxy_address`. The proxy at this address must be able to route connections to gateway and coordinating nodes on the remote @@ -56,6 +61,7 @@ cluster. * {ccs-cap} requires different security privileges on the local cluster and remote cluster. See <> and <>. +// end::ccs-proxy-mode[] [discrete] [[ccs-example]] @@ -64,6 +70,7 @@ remote cluster. See <> and [discrete] [[ccs-remote-cluster-setup]] ==== Remote cluster setup +// tag::ccs-remote-cluster-setup[] The following <> API request adds three remote clusters: `cluster_one`, `cluster_two`, and `cluster_three`. @@ -99,6 +106,7 @@ PUT _cluster/settings -------------------------------- // TEST[setup:host] // TEST[s/35.238.149.\d+:930\d+/\${transport_host}/] +// end::ccs-remote-cluster-setup[] <1> Since `skip_unavailable` was not set on `cluster_three`, it uses the default of `false`. See the <> @@ -1393,6 +1401,7 @@ cluster as the local cluster when running a {ccs}. [[ccs-during-upgrade]] ==== {ccs-cap} during an upgrade +// tag::ccs-during-upgrade[] You can still search a remote cluster while performing a rolling upgrade on the local cluster. However, the local coordinating node's "upgrade from" and "upgrade to" version must be compatible @@ -1403,3 +1412,4 @@ duration of an upgrade is not supported. For more information about upgrades, see {stack-ref}/upgrading-elasticsearch.html[Upgrading {es}]. +// end::ccs-during-upgrade[] diff --git a/docs/reference/search/search-your-data/search-your-data.asciidoc b/docs/reference/search/search-your-data/search-your-data.asciidoc index 8362094fab10c..bed204985296c 100644 --- a/docs/reference/search/search-your-data/search-your-data.asciidoc +++ b/docs/reference/search/search-your-data/search-your-data.asciidoc @@ -46,6 +46,7 @@ include::search-api.asciidoc[] include::search-application-overview.asciidoc[] include::knn-search.asciidoc[] include::semantic-search.asciidoc[] +include::learning-to-rank.asciidoc[] include::search-across-clusters.asciidoc[] include::search-with-synonyms.asciidoc[] include::behavioral-analytics/behavioral-analytics-overview.asciidoc[] diff --git a/docs/reference/search/search-your-data/semantic-search-inference.asciidoc b/docs/reference/search/search-your-data/semantic-search-inference.asciidoc index 249fddce9c416..97a37e34eb116 100644 --- a/docs/reference/search/search-your-data/semantic-search-inference.asciidoc +++ b/docs/reference/search/search-your-data/semantic-search-inference.asciidoc @@ -4,18 +4,21 @@ Semantic search with the {infer} API ++++ -The instructions in this tutorial shows you how to use the {infer} API with the -Open AI service to perform semantic search on your data. The following example -uses OpenAI's `text-embedding-ada-002` second generation embedding model. You -can use any OpenAI models, they are all supported by the {infer} API. +The instructions in this tutorial shows you how to use the {infer} API with +various services to perform semantic search on your data. The following examples +use Cohere's `embed-english-v3.0` model and OpenAI's `text-embedding-ada-002` +second generation embedding model. You can use any Cohere and OpenAI models, +they are all supported by the {infer} API. + +Click the name of the service you want to use on any of the widgets below to +review the corresponding instructions. [discrete] -[[infer-openai-requirements]] +[[infer-service-requirements]] ==== Requirements -An https://openai.com/[OpenAI account] is required to use the {infer} API with -the OpenAI service. +include::{es-repo-dir}/tab-widgets/inference-api/infer-api-requirements-widget.asciidoc[] [discrete] @@ -24,113 +27,30 @@ the OpenAI service. Create the {infer} task by using the <>: -[source,console] ------------------------------------------------------------- -PUT _inference/text_embedding/openai_embeddings <1> -{ - "service": "openai", - "service_settings": { - "api_key": "" <2> - }, - "task_settings": { - "model": "text-embedding-ada-002" <3> - } -} ------------------------------------------------------------- -// TEST[skip:TBD] -<1> The task type is `text_embedding` in the path. -<2> The API key of your OpenAI account. You can find your OpenAI API keys in -your OpenAI account under the -https://platform.openai.com/api-keys[API keys section]. You need to provide -your API key only once. The <> does not return your API -key. -<3> The name of the embedding model to use. You can find the list of OpenAI -embedding models -https://platform.openai.com/docs/guides/embeddings/embedding-models[here]. +include::{es-repo-dir}/tab-widgets/inference-api/infer-api-task-widget.asciidoc[] [discrete] -[[infer-openai-mappings]] +[[infer-service-mappings]] ==== Create the index mapping The mapping of the destination index - the index that contains the embeddings that the model will create based on your input text - must be created. The destination index must have a field with the <> -field type to index the output of the OpenAI model. +field type to index the output of the used model. -[source,console] --------------------------------------------------- -PUT openai-embeddings -{ - "mappings": { - "properties": { - "content_embedding": { <1> - "type": "dense_vector", <2> - "dims": 1536, <3> - "element_type": "float", - "similarity": "dot_product" <4> - }, - "content": { <5> - "type": "text" <6> - } - } - } -} --------------------------------------------------- -<1> The name of the field to contain the generated tokens. It must be refrenced -in the {infer} pipeline configuration in the next step. -<2> The field to contain the tokens is a `dense_vector` field. -<3> The output dimensions of the model. Find this value in the -https://platform.openai.com/docs/guides/embeddings/embedding-models[OpenAI documentation] -of the model you use. -<4> The faster` dot_product` function can be used to calculate similarity -because OpenAI embeddings are normalised to unit length. You can check the -https://platform.openai.com/docs/guides/embeddings/which-distance-function-should-i-use[OpenAI docs] -about which similarity function to use. -<5> The name of the field from which to create the sparse vector representation. -In this example, the name of the field is `content`. It must be referenced in -the {infer} pipeline configuration in the next step. -<6> The field type which is text in this example. +include::{es-repo-dir}/tab-widgets/inference-api/infer-api-mapping-widget.asciidoc[] [discrete] -[[infer-openai-inference-ingest-pipeline]] +[[infer-service-inference-ingest-pipeline]] ==== Create an ingest pipeline with an inference processor Create an <> with an -<> and use the OpenAI model you created -above to infer against the data that is being ingested in the -pipeline. +<> and use the model you created above to +infer against the data that is being ingested in the pipeline. -[source,console] --------------------------------------------------- -PUT _ingest/pipeline/openai_embeddings -{ - "processors": [ - { - "inference": { - "model_id": "openai_embeddings", <1> - "input_output": { <2> - "input_field": "content", - "output_field": "content_embedding" - } - } - } - ] -} --------------------------------------------------- -<1> The name of the inference model you created by using the -<>. -<2> Configuration object that defines the `input_field` for the {infer} process -and the `output_field` that will contain the {infer} results. - -//// -[source,console] ----- -DELETE _ingest/pipeline/openai_embeddings ----- -// TEST[continued] -//// +include::{es-repo-dir}/tab-widgets/inference-api/infer-api-ingest-pipeline-widget.asciidoc[] [discrete] @@ -157,32 +77,10 @@ you can see an index named `test-data` with 182469 documents. [[reindexing-data-infer]] ==== Ingest the data through the {infer} ingest pipeline -Create the embeddings from the text by reindexing the data throught the {infer} -pipeline that uses the OpenAI model as the inference model. +Create the embeddings from the text by reindexing the data through the {infer} +pipeline that uses the chosen model as the inference model. -[source,console] ----- -POST _reindex?wait_for_completion=false -{ - "source": { - "index": "test-data", - "size": 50 <1> - }, - "dest": { - "index": "openai-embeddings", - "pipeline": "openai_embeddings" - } -} ----- -// TEST[skip:TBD] -<1> The default batch size for reindexing is 1000. Reducing `size` to a smaller -number makes the update of the reindexing process quicker which enables you to -follow the progress closely and detect errors early. - -NOTE: The -https://platform.openai.com/account/limits[rate limit of your OpenAI account] -may affect the throughput of the reindexing process. If this happens, change -`size` to `3` or a similar value in magnitude. +include::{es-repo-dir}/tab-widgets/inference-api/infer-api-reindex-widget.asciidoc[] The call returns a task ID to monitor the progress: @@ -214,63 +112,4 @@ provide the query text and the model you have used to create the embeddings. NOTE: If you cancelled the reindexing process, you run the query only a part of the data which affects the quality of your results. -[source,console] --------------------------------------------------- -GET openai-embeddings/_search -{ - "knn": { - "field": "content_embedding", - "query_vector_builder": { - "text_embedding": { - "model_id": "openai_embeddings", - "model_text": "Calculate fuel cost" - } - }, - "k": 10, - "num_candidates": 100 - }, - "_source": [ - "id", - "content" - ] -} --------------------------------------------------- -// TEST[skip:TBD] - -As a result, you receive the top 10 documents that are closest in meaning to the -query from the `openai-embeddings` index sorted by their proximity to the query: - -[source,consol-result] --------------------------------------------------- -"hits": [ - { - "_index": "openai-embeddings", - "_id": "DDd5OowBHxQKHyc3TDSC", - "_score": 0.83704096, - "_source": { - "id": 862114, - "body": "How to calculate fuel cost for a road trip. By Tara Baukus Mello • Bankrate.com. Dear Driving for Dollars, My family is considering taking a long road trip to finish off the end of the summer, but I'm a little worried about gas prices and our overall fuel cost.It doesn't seem easy to calculate since we'll be traveling through many states and we are considering several routes.y family is considering taking a long road trip to finish off the end of the summer, but I'm a little worried about gas prices and our overall fuel cost. It doesn't seem easy to calculate since we'll be traveling through many states and we are considering several routes." - } - }, - { - "_index": "openai-embeddings", - "_id": "ajd5OowBHxQKHyc3TDSC", - "_score": 0.8345704, - "_source": { - "id": 820622, - "body": "Home Heating Calculator. Typically, approximately 50% of the energy consumed in a home annually is for space heating. When deciding on a heating system, many factors will come into play: cost of fuel, installation cost, convenience and life style are all important.This calculator can help you estimate the cost of fuel for different heating appliances.hen deciding on a heating system, many factors will come into play: cost of fuel, installation cost, convenience and life style are all important. This calculator can help you estimate the cost of fuel for different heating appliances." - } - }, - { - "_index": "openai-embeddings", - "_id": "Djd5OowBHxQKHyc3TDSC", - "_score": 0.8327426, - "_source": { - "id": 8202683, - "body": "Fuel is another important cost. This cost will depend on your boat, how far you travel, and how fast you travel. A 33-foot sailboat traveling at 7 knots should be able to travel 300 miles on 50 gallons of diesel fuel.If you are paying $4 per gallon, the trip would cost you $200.Most boats have much larger gas tanks than cars.uel is another important cost. This cost will depend on your boat, how far you travel, and how fast you travel. A 33-foot sailboat traveling at 7 knots should be able to travel 300 miles on 50 gallons of diesel fuel." - } - }, - (...) - ] --------------------------------------------------- -// NOTCONSOLE +include::{es-repo-dir}/tab-widgets/inference-api/infer-api-search-widget.asciidoc[] \ No newline at end of file diff --git a/docs/reference/security/authentication/oidc-guide.asciidoc b/docs/reference/security/authentication/oidc-guide.asciidoc index 41cead20789b6..c2112b949c540 100644 --- a/docs/reference/security/authentication/oidc-guide.asciidoc +++ b/docs/reference/security/authentication/oidc-guide.asciidoc @@ -198,6 +198,7 @@ For instance bin/elasticsearch-keystore add xpack.security.authc.realms.oidc.oidc1.rp.client_secret ---- +NOTE: Changes to the `client_secret` requires a restart of the {es} nodes to pick up the change. NOTE: According to the OpenID Connect specification, the OP should also make their configuration available at a well known URL, which is the concatenation of their `Issuer` value with the diff --git a/docs/reference/sql/endpoints/rest.asciidoc b/docs/reference/sql/endpoints/rest.asciidoc index 6c83b67a94385..258df3c8afc97 100644 --- a/docs/reference/sql/endpoints/rest.asciidoc +++ b/docs/reference/sql/endpoints/rest.asciidoc @@ -579,7 +579,7 @@ POST _sql?format=json "fetch_size": 5 } ---- -// TEST[skip:waiting on https://github.com/elastic/elasticsearch/issues/75069] +// TEST[skip:waiting on https://github.com/elastic/elasticsearch/issues/106158] // TEST[setup:library] // TEST[s/"wait_for_completion_timeout": "2s"/"wait_for_completion_timeout": "0"/] @@ -603,7 +603,7 @@ For CSV, TSV, and TXT responses, the API returns these values in the respective "rows": [ ] } ---- -// TESTRESPONSE[skip:waiting on https://github.com/elastic/elasticsearch/issues/75069] +// TESTRESPONSE[skip:waiting on https://github.com/elastic/elasticsearch/issues/106158] // TESTRESPONSE[s/FnR0TDhyWUVmUmVtWXRWZER4MXZiNFEad2F5UDk2ZVdTVHV1S0xDUy00SklUdzozMTU=/$body.id/] // TESTRESPONSE[s/"is_partial": true/"is_partial": $body.is_partial/] // TESTRESPONSE[s/"is_running": true/"is_running": $body.is_running/] @@ -630,7 +630,7 @@ complete results. "completion_status": 200 } ---- -// TESTRESPONSE[skip:waiting on https://github.com/elastic/elasticsearch/issues/75069] +// TESTRESPONSE[skip:waiting on https://github.com/elastic/elasticsearch/issues/106158] // TESTRESPONSE[s/FnR0TDhyWUVmUmVtWXRWZER4MXZiNFEad2F5UDk2ZVdTVHV1S0xDUy00SklUdzozMTU=/$body.id/] // TESTRESPONSE[s/"expiration_time_in_millis": 1611690295000/"expiration_time_in_millis": $body.expiration_time_in_millis/] @@ -663,7 +663,7 @@ POST _sql?format=json "fetch_size": 5 } ---- -// TEST[skip:waiting on https://github.com/elastic/elasticsearch/issues/75069] +// TEST[skip:waiting on https://github.com/elastic/elasticsearch/issues/106158] // TEST[setup:library] You can use the get async SQL search API's `keep_alive` parameter to later @@ -702,7 +702,7 @@ POST _sql?format=json "fetch_size": 5 } ---- -// TEST[skip:waiting on https://github.com/elastic/elasticsearch/issues/75069] +// TEST[skip:waiting on https://github.com/elastic/elasticsearch/issues/106158] // TEST[setup:library] If `is_partial` and `is_running` are `false`, the search was synchronous and @@ -719,7 +719,7 @@ returned complete results. "cursor": ... } ---- -// TESTRESPONSE[skip:waiting on https://github.com/elastic/elasticsearch/issues/75069] +// TESTRESPONSE[skip:waiting on https://github.com/elastic/elasticsearch/issues/106158] // TESTRESPONSE[s/Fnc5UllQdUVWU0NxRFNMbWxNYXplaFEaMUpYQ05oSkpTc3kwZ21EdC1tbFJXQTo0NzA=/$body.id/] // TESTRESPONSE[s/"rows": \.\.\./"rows": $body.rows/] // TESTRESPONSE[s/"columns": \.\.\./"columns": $body.columns/] diff --git a/docs/reference/tab-widgets/inference-api/infer-api-ingest-pipeline-widget.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-ingest-pipeline-widget.asciidoc new file mode 100644 index 0000000000000..44d2f60966caa --- /dev/null +++ b/docs/reference/tab-widgets/inference-api/infer-api-ingest-pipeline-widget.asciidoc @@ -0,0 +1,39 @@ +++++ +
+
+ + +
+
+++++ + +include::infer-api-ingest-pipeline.asciidoc[tag=cohere] + +++++ +
+ +
+++++ \ No newline at end of file diff --git a/docs/reference/tab-widgets/inference-api/infer-api-ingest-pipeline.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-ingest-pipeline.asciidoc new file mode 100644 index 0000000000000..a5a1910e8f8ef --- /dev/null +++ b/docs/reference/tab-widgets/inference-api/infer-api-ingest-pipeline.asciidoc @@ -0,0 +1,63 @@ +//// + +[source,console] +---- +DELETE _ingest/pipeline/*_embeddings +---- +// TEST +// TEARDOWN + +//// + +// tag::cohere[] + +[source,console] +-------------------------------------------------- +PUT _ingest/pipeline/cohere_embeddings +{ + "processors": [ + { + "inference": { + "model_id": "cohere_embeddings", <1> + "input_output": { <2> + "input_field": "content", + "output_field": "content_embedding" + } + } + } + ] +} +-------------------------------------------------- +<1> The name of the inference configuration you created by using the +<>. +<2> Configuration object that defines the `input_field` for the {infer} process +and the `output_field` that will contain the {infer} results. + +// end::cohere[] + + +// tag::openai[] + +[source,console] +-------------------------------------------------- +PUT _ingest/pipeline/openai_embeddings +{ + "processors": [ + { + "inference": { + "model_id": "openai_embeddings", <1> + "input_output": { <2> + "input_field": "content", + "output_field": "content_embedding" + } + } + } + ] +} +-------------------------------------------------- +<1> The name of the inference configuration you created by using the +<>. +<2> Configuration object that defines the `input_field` for the {infer} process +and the `output_field` that will contain the {infer} results. + +// end::openai[] \ No newline at end of file diff --git a/docs/reference/tab-widgets/inference-api/infer-api-mapping-widget.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-mapping-widget.asciidoc new file mode 100644 index 0000000000000..336c8052c282f --- /dev/null +++ b/docs/reference/tab-widgets/inference-api/infer-api-mapping-widget.asciidoc @@ -0,0 +1,39 @@ +++++ +
+
+ + +
+
+++++ + +include::infer-api-mapping.asciidoc[tag=cohere] + +++++ +
+ +
+++++ \ No newline at end of file diff --git a/docs/reference/tab-widgets/inference-api/infer-api-mapping.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-mapping.asciidoc new file mode 100644 index 0000000000000..5ca5e0b7bf139 --- /dev/null +++ b/docs/reference/tab-widgets/inference-api/infer-api-mapping.asciidoc @@ -0,0 +1,71 @@ +// tag::cohere[] + +[source,console] +-------------------------------------------------- +PUT cohere-embeddings +{ + "mappings": { + "properties": { + "content_embedding": { <1> + "type": "dense_vector", <2> + "dims": 1024, <3> + "element_type": "byte" + }, + "content": { <4> + "type": "text" <5> + } + } + } +} +-------------------------------------------------- +<1> The name of the field to contain the generated tokens. It must be refrenced +in the {infer} pipeline configuration in the next step. +<2> The field to contain the tokens is a `dense_vector` field. +<3> The output dimensions of the model. Find this value in the +https://docs.cohere.com/reference/embed[Cohere documentation] of the model you +use. +<4> The name of the field from which to create the dense vector representation. +In this example, the name of the field is `content`. It must be referenced in +the {infer} pipeline configuration in the next step. +<5> The field type which is text in this example. + +// end::cohere[] + + +// tag::openai[] + +[source,console] +-------------------------------------------------- +PUT openai-embeddings +{ + "mappings": { + "properties": { + "content_embedding": { <1> + "type": "dense_vector", <2> + "dims": 1536, <3> + "element_type": "float", + "similarity": "dot_product" <4> + }, + "content": { <5> + "type": "text" <6> + } + } + } +} +-------------------------------------------------- +<1> The name of the field to contain the generated tokens. It must be refrenced +in the {infer} pipeline configuration in the next step. +<2> The field to contain the tokens is a `dense_vector` field. +<3> The output dimensions of the model. Find this value in the +https://platform.openai.com/docs/guides/embeddings/embedding-models[OpenAI documentation] +of the model you use. +<4> The faster` dot_product` function can be used to calculate similarity +because OpenAI embeddings are normalised to unit length. You can check the +https://platform.openai.com/docs/guides/embeddings/which-distance-function-should-i-use[OpenAI docs] +about which similarity function to use. +<5> The name of the field from which to create the dense vector representation. +In this example, the name of the field is `content`. It must be referenced in +the {infer} pipeline configuration in the next step. +<6> The field type which is text in this example. + +// end::openai[] \ No newline at end of file diff --git a/docs/reference/tab-widgets/inference-api/infer-api-reindex-widget.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-reindex-widget.asciidoc new file mode 100644 index 0000000000000..a73e4d7d76fc1 --- /dev/null +++ b/docs/reference/tab-widgets/inference-api/infer-api-reindex-widget.asciidoc @@ -0,0 +1,39 @@ +++++ +
+
+ + +
+
+++++ + +include::infer-api-reindex.asciidoc[tag=cohere] + +++++ +
+ +
+++++ \ No newline at end of file diff --git a/docs/reference/tab-widgets/inference-api/infer-api-reindex.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-reindex.asciidoc new file mode 100644 index 0000000000000..92e781f8b5a8a --- /dev/null +++ b/docs/reference/tab-widgets/inference-api/infer-api-reindex.asciidoc @@ -0,0 +1,55 @@ +// tag::cohere[] + +[source,console] +---- +POST _reindex?wait_for_completion=false +{ + "source": { + "index": "test-data", + "size": 50 <1> + }, + "dest": { + "index": "cohere-embeddings", + "pipeline": "cohere_embeddings" + } +} +---- +// TEST[skip:TBD] +<1> The default batch size for reindexing is 1000. Reducing `size` to a smaller +number makes the update of the reindexing process quicker which enables you to +follow the progress closely and detect errors early. + +NOTE: The +https://dashboard.cohere.com/billing[rate limit of your Cohere account] +may affect the throughput of the reindexing process. + +// end::cohere[] + + +// tag::openai[] + +[source,console] +---- +POST _reindex?wait_for_completion=false +{ + "source": { + "index": "test-data", + "size": 50 <1> + }, + "dest": { + "index": "openai-embeddings", + "pipeline": "openai_embeddings" + } +} +---- +// TEST[skip:TBD] +<1> The default batch size for reindexing is 1000. Reducing `size` to a smaller +number makes the update of the reindexing process quicker which enables you to +follow the progress closely and detect errors early. + +NOTE: The +https://platform.openai.com/account/limits[rate limit of your OpenAI account] +may affect the throughput of the reindexing process. If this happens, change +`size` to `3` or a similar value in magnitude. + +// end::openai[] \ No newline at end of file diff --git a/docs/reference/tab-widgets/inference-api/infer-api-requirements-widget.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-requirements-widget.asciidoc new file mode 100644 index 0000000000000..d1b981158c11b --- /dev/null +++ b/docs/reference/tab-widgets/inference-api/infer-api-requirements-widget.asciidoc @@ -0,0 +1,39 @@ +++++ +
+
+ + +
+
+++++ + +include::infer-api-requirements.asciidoc[tag=cohere] + +++++ +
+ +
+++++ \ No newline at end of file diff --git a/docs/reference/tab-widgets/inference-api/infer-api-requirements.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-requirements.asciidoc new file mode 100644 index 0000000000000..f0bed750b69c9 --- /dev/null +++ b/docs/reference/tab-widgets/inference-api/infer-api-requirements.asciidoc @@ -0,0 +1,14 @@ +// tag::cohere[] + +A https://cohere.com/[Cohere account] is required to use the {infer} API with +the Cohere service. + +// end::cohere[] + + +// tag::openai[] + +An https://openai.com/[OpenAI account] is required to use the {infer} API with +the OpenAI service. + +// end::openai[] \ No newline at end of file diff --git a/docs/reference/tab-widgets/inference-api/infer-api-search-widget.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-search-widget.asciidoc new file mode 100644 index 0000000000000..4433f2da067f1 --- /dev/null +++ b/docs/reference/tab-widgets/inference-api/infer-api-search-widget.asciidoc @@ -0,0 +1,39 @@ +++++ +
+
+ + +
+
+++++ + +include::infer-api-search.asciidoc[tag=cohere] + +++++ +
+ +
+++++ \ No newline at end of file diff --git a/docs/reference/tab-widgets/inference-api/infer-api-search.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-search.asciidoc new file mode 100644 index 0000000000000..1e8470471491f --- /dev/null +++ b/docs/reference/tab-widgets/inference-api/infer-api-search.asciidoc @@ -0,0 +1,139 @@ +// tag::cohere[] + +[source,console] +-------------------------------------------------- +GET cohere-embeddings/_search +{ + "knn": { + "field": "content_embedding", + "query_vector_builder": { + "text_embedding": { + "model_id": "cohere_embeddings", + "model_text": "Muscles in human body" + } + }, + "k": 10, + "num_candidates": 100 + }, + "_source": [ + "id", + "content" + ] +} +-------------------------------------------------- +// TEST[skip:TBD] + +As a result, you receive the top 10 documents that are closest in meaning to the +query from the `cohere-embeddings` index sorted by their proximity to the query: + +[source,consol-result] +-------------------------------------------------- +"hits": [ + { + "_index": "cohere-embeddings", + "_id": "-eFWCY4BECzWLnMZuI78", + "_score": 0.737484, + "_source": { + "id": 1690948, + "content": "Oxygen is supplied to the muscles via red blood cells. Red blood cells carry hemoglobin which oxygen bonds with as the hemoglobin rich blood cells pass through the blood vessels of the lungs.The now oxygen rich blood cells carry that oxygen to the cells that are demanding it, in this case skeletal muscle cells.ther ways in which muscles are supplied with oxygen include: 1 Blood flow from the heart is increased. 2 Blood flow to your muscles in increased. 3 Blood flow from nonessential organs is transported to working muscles." + } + }, + { + "_index": "cohere-embeddings", + "_id": "HuFWCY4BECzWLnMZuI_8", + "_score": 0.7176013, + "_source": { + "id": 1692482, + "content": "The thoracic cavity is separated from the abdominal cavity by the diaphragm. This is a broad flat muscle. (muscular) diaphragm The diaphragm is a muscle that separat…e the thoracic from the abdominal cavity. The pelvis is the lowest part of the abdominal cavity and it has no physical separation from it Diaphragm." + } + }, + { + "_index": "cohere-embeddings", + "_id": "IOFWCY4BECzWLnMZuI_8", + "_score": 0.7154432, + "_source": { + "id": 1692489, + "content": "Muscular Wall Separating the Abdominal and Thoracic Cavities; Thoracic Cavity of a Fetal Pig; In Mammals the Diaphragm Separates the Abdominal Cavity from the" + } + }, + { + "_index": "cohere-embeddings", + "_id": "C-FWCY4BECzWLnMZuI_8", + "_score": 0.695313, + "_source": { + "id": 1691493, + "content": "Burning, aching, tenderness and stiffness are just some descriptors of the discomfort you may feel in the muscles you exercised one to two days ago.For the most part, these sensations you experience after exercise are collectively known as delayed onset muscle soreness.urning, aching, tenderness and stiffness are just some descriptors of the discomfort you may feel in the muscles you exercised one to two days ago." + } + }, + (...) + ] +-------------------------------------------------- +// NOTCONSOLE + +// end::cohere[] + + +// tag::openai[] + +[source,console] +-------------------------------------------------- +GET openai-embeddings/_search +{ + "knn": { + "field": "content_embedding", + "query_vector_builder": { + "text_embedding": { + "model_id": "openai_embeddings", + "model_text": "Calculate fuel cost" + } + }, + "k": 10, + "num_candidates": 100 + }, + "_source": [ + "id", + "content" + ] +} +-------------------------------------------------- +// TEST[skip:TBD] + +As a result, you receive the top 10 documents that are closest in meaning to the +query from the `openai-embeddings` index sorted by their proximity to the query: + +[source,consol-result] +-------------------------------------------------- +"hits": [ + { + "_index": "openai-embeddings", + "_id": "DDd5OowBHxQKHyc3TDSC", + "_score": 0.83704096, + "_source": { + "id": 862114, + "body": "How to calculate fuel cost for a road trip. By Tara Baukus Mello • Bankrate.com. Dear Driving for Dollars, My family is considering taking a long road trip to finish off the end of the summer, but I'm a little worried about gas prices and our overall fuel cost.It doesn't seem easy to calculate since we'll be traveling through many states and we are considering several routes.y family is considering taking a long road trip to finish off the end of the summer, but I'm a little worried about gas prices and our overall fuel cost. It doesn't seem easy to calculate since we'll be traveling through many states and we are considering several routes." + } + }, + { + "_index": "openai-embeddings", + "_id": "ajd5OowBHxQKHyc3TDSC", + "_score": 0.8345704, + "_source": { + "id": 820622, + "body": "Home Heating Calculator. Typically, approximately 50% of the energy consumed in a home annually is for space heating. When deciding on a heating system, many factors will come into play: cost of fuel, installation cost, convenience and life style are all important.This calculator can help you estimate the cost of fuel for different heating appliances.hen deciding on a heating system, many factors will come into play: cost of fuel, installation cost, convenience and life style are all important. This calculator can help you estimate the cost of fuel for different heating appliances." + } + }, + { + "_index": "openai-embeddings", + "_id": "Djd5OowBHxQKHyc3TDSC", + "_score": 0.8327426, + "_source": { + "id": 8202683, + "body": "Fuel is another important cost. This cost will depend on your boat, how far you travel, and how fast you travel. A 33-foot sailboat traveling at 7 knots should be able to travel 300 miles on 50 gallons of diesel fuel.If you are paying $4 per gallon, the trip would cost you $200.Most boats have much larger gas tanks than cars.uel is another important cost. This cost will depend on your boat, how far you travel, and how fast you travel. A 33-foot sailboat traveling at 7 knots should be able to travel 300 miles on 50 gallons of diesel fuel." + } + }, + (...) + ] +-------------------------------------------------- +// NOTCONSOLE + +// end::openai[] \ No newline at end of file diff --git a/docs/reference/tab-widgets/inference-api/infer-api-task-widget.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-task-widget.asciidoc new file mode 100644 index 0000000000000..bc54bf6b14ddf --- /dev/null +++ b/docs/reference/tab-widgets/inference-api/infer-api-task-widget.asciidoc @@ -0,0 +1,39 @@ +++++ +
+
+ + +
+
+++++ + +include::infer-api-task.asciidoc[tag=cohere] + +++++ +
+ +
+++++ \ No newline at end of file diff --git a/docs/reference/tab-widgets/inference-api/infer-api-task.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-task.asciidoc new file mode 100644 index 0000000000000..be0319fcf1ec1 --- /dev/null +++ b/docs/reference/tab-widgets/inference-api/infer-api-task.asciidoc @@ -0,0 +1,56 @@ +// tag::cohere[] + +[source,console] +------------------------------------------------------------ +PUT _inference/text_embedding/cohere_embeddings <1> +{ + "service": "cohere", + "service_settings": { + "api_key": "", <2> + "model_id": "embed-english-v3.0", <3> + "embedding_type": "int8" + }, + "task_settings": { + } +} +------------------------------------------------------------ +// TEST[skip:TBD] +<1> The task type is `text_embedding` in the path. +<2> The API key of your Cohere account. You can find your API keys in your +Cohere dashboard under the +https://dashboard.cohere.com/api-keys[API keys section]. You need to provide +your API key only once. The <> does not return your API +key. +<3> The name of the embedding model to use. You can find the list of Cohere +embedding models https://docs.cohere.com/reference/embed[here]. + +// end::cohere[] + + +// tag::openai[] + +[source,console] +------------------------------------------------------------ +PUT _inference/text_embedding/openai_embeddings <1> +{ + "service": "openai", + "service_settings": { + "api_key": "", <2> + "model_id": "text-embedding-ada-002" <3> + }, + "task_settings": { + } +} +------------------------------------------------------------ +// TEST[skip:TBD] +<1> The task type is `text_embedding` in the path. +<2> The API key of your OpenAI account. You can find your OpenAI API keys in +your OpenAI account under the +https://platform.openai.com/api-keys[API keys section]. You need to provide +your API key only once. The <> does not return your API +key. +<3> The name of the embedding model to use. You can find the list of OpenAI +embedding models +https://platform.openai.com/docs/guides/embeddings/embedding-models[here]. + +// end::openai[] \ No newline at end of file diff --git a/docs/reference/text-structure/apis/find-field-structure.asciidoc b/docs/reference/text-structure/apis/find-field-structure.asciidoc new file mode 100644 index 0000000000000..6788ddf7f42be --- /dev/null +++ b/docs/reference/text-structure/apis/find-field-structure.asciidoc @@ -0,0 +1,316 @@ +[role="xpack"] +[[find-field-structure]] += Find field structure API + +Finds the structure of a field in an Elasticsearch index. + +[discrete] +[[find-field-structure-request]] +== {api-request-title} + +`GET _text_structure/find_field_structure` + +[discrete] +[[find-field-structure-prereqs]] +== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `monitor_text_structure` or +`monitor` cluster privileges to use this API. See +<>. + +[discrete] +[[find-field-structure-desc]] +== {api-description-title} + +This API provides a starting point for extracting further information from log messages +already ingested into {es}. For example, if you have ingested data into a very simple +index that has just `@timestamp` and `message` fields, you can use this API to +see what common structure exists in the `message` field. + +The response from the API contains: + +* Sample messages. +* Statistics that reveal the most common values for all fields detected within +the text and basic numeric statistics for numeric fields. +* Information about the structure of the text, which is useful when you write +ingest configurations to index it or similarly formatted text. +* Appropriate mappings for an {es} index, which you could use to ingest the text. + +All this information can be calculated by the structure finder with no guidance. +However, you can optionally override some of the decisions about the text +structure by specifying one or more query parameters. + +Details of the output can be seen in the <>. + +If the structure finder produces unexpected results, +specify the `explain` query parameter and an `explanation` will appear in +the response. It helps determine why the returned structure was +chosen. + +[discrete] +[[find-field-structure-query-parms]] +== {api-query-parms-title} + +`index`:: +(Required, string) The name of the index containing the field. + +`field`:: +(Required, string) The name of the field that's analyzed. + +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-column-names] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-delimiter] + +`documents_to_sample`:: +(Optional, unsigned integer) The number of documents to include in the structural +analysis. The minimum is 2; the default is 1000. + +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-explain] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-format] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-grok-pattern] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-ecs-compatibility] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-quote] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-should-trim-fields] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-timeout] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-timestamp-field] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-timestamp-format] + +[discrete] +[[find-field-structure-examples]] +== {api-examples-title} + +[discrete] +[[find-field-structure-example]] +=== Analyzing Elasticsearch log files + +Suppose you have a list of {es} log messages in an index. +You can analyze them with the `find_field_structure` endpoint as follows: + +[source,console] +---- +POST _bulk?refresh=true +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:36,256][INFO ][o.a.l.u.VectorUtilPanamaProvider] [laptop] Java vector incubator API enabled; uses preferredBitSize=128"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:41,038][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-url]"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:41,042][INFO ][o.e.p.PluginsService ] [laptop] loaded module [rest-root]"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-core]"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-redact]"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [ingest-user-agent]"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-monitoring]"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-s3]"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-analytics]"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-ent-search]"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-autoscaling]"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-painless]]"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-expression]"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-eql]"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:43,291][INFO ][o.e.e.NodeEnvironment ] [laptop] heap size [16gb], compressed ordinary object pointers [true]"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:46,098][INFO ][o.e.x.s.Security ] [laptop] Security is enabled"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:47,227][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] Profiling is enabled"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:47,259][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] profiling index templates will not be installed or reinstalled"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:47,755][INFO ][o.e.i.r.RecoverySettings ] [laptop] using rate limit [40mb] with [default=40mb, read=0b, write=0b, max=0b]"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:47,787][INFO ][o.e.d.DiscoveryModule ] [laptop] using discovery type [multi-node] and seed hosts providers [settings]"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:49,188][INFO ][o.e.n.Node ] [laptop] initialized"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:49,199][INFO ][o.e.n.Node ] [laptop] starting ..."} + +GET _text_structure/find_field_structure?index=test-logs&field=message +---- +// TEST + +If the request does not encounter errors, you receive the following result: + +[source,console-result] +---- +{ + "num_lines_analyzed" : 22, + "num_messages_analyzed" : 22, + "sample_start" : "[2024-03-05T10:52:36,256][INFO ][o.a.l.u.VectorUtilPanamaProvider] [laptop] Java vector incubator API enabled; uses preferredBitSize=128\n[2024-03-05T10:52:41,038][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-url]\n", <3> + "charset" : "UTF-8", + "format" : "semi_structured_text", + "multiline_start_pattern" : "^\\[\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "grok_pattern" : "\\[%{TIMESTAMP_ISO8601:timestamp}\\]\\[%{LOGLEVEL:loglevel} \\]\\[.*", + "ecs_compatibility" : "disabled", + "timestamp_field" : "timestamp", + "joda_timestamp_formats" : [ + "ISO8601" + ], + "java_timestamp_formats" : [ + "ISO8601" + ], + "need_client_timezone" : true, + "mappings" : { + "properties" : { + "@timestamp" : { + "type" : "date" + }, + "loglevel" : { + "type" : "keyword" + }, + "message" : { + "type" : "text" + } + } + }, + "ingest_pipeline" : { + "description" : "Ingest pipeline created by text structure finder", + "processors" : [ + { + "grok" : { + "field" : "message", + "patterns" : [ + "\\[%{TIMESTAMP_ISO8601:timestamp}\\]\\[%{LOGLEVEL:loglevel} \\]\\[.*" + ], + "ecs_compatibility" : "disabled" + } + }, + { + "date" : { + "field" : "timestamp", + "timezone" : "{{ event.timezone }}", + "formats" : [ + "ISO8601" + ] + } + }, + { + "remove" : { + "field" : "timestamp" + } + } + ] + }, + "field_stats" : { + "loglevel" : { + "count" : 22, + "cardinality" : 1, + "top_hits" : [ + { + "value" : "INFO", + "count" : 22 + } + ] + }, + "message" : { + "count" : 22, + "cardinality" : 22, + "top_hits" : [ + { + "value" : "[2024-03-05T10:52:36,256][INFO ][o.a.l.u.VectorUtilPanamaProvider] [laptop] Java vector incubator API enabled; uses preferredBitSize=128", + "count" : 1 + }, + { + "value" : "[2024-03-05T10:52:41,038][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-url]", + "count" : 1 + }, + { + "value" : "[2024-03-05T10:52:41,042][INFO ][o.e.p.PluginsService ] [laptop] loaded module [rest-root]", + "count" : 1 + }, + { + "value" : "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [ingest-user-agent]", + "count" : 1 + }, + { + "value" : "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-core]", + "count" : 1 + }, + { + "value" : "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-redact]", + "count" : 1 + }, + { + "value" : "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-painless]]", + "count" : 1 + }, + { + "value" : "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-s3]", + "count" : 1 + }, + { + "value" : "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-analytics]", + "count" : 1 + }, + { + "value" : "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-autoscaling]", + "count" : 1 + } + ] + }, + "timestamp" : { + "count" : 22, + "cardinality" : 14, + "earliest" : "2024-03-05T10:52:36,256", + "latest" : "2024-03-05T10:52:49,199", + "top_hits" : [ + { + "value" : "2024-03-05T10:52:41,044", + "count" : 6 + }, + { + "value" : "2024-03-05T10:52:41,043", + "count" : 3 + }, + { + "value" : "2024-03-05T10:52:41,059", + "count" : 2 + }, + { + "value" : "2024-03-05T10:52:36,256", + "count" : 1 + }, + { + "value" : "2024-03-05T10:52:41,038", + "count" : 1 + }, + { + "value" : "2024-03-05T10:52:41,042", + "count" : 1 + }, + { + "value" : "2024-03-05T10:52:43,291", + "count" : 1 + }, + { + "value" : "2024-03-05T10:52:46,098", + "count" : 1 + }, + { + "value" : "2024-03-05T10:52:47,227", + "count" : 1 + }, + { + "value" : "2024-03-05T10:52:47,259", + "count" : 1 + } + ] + } + } +} +---- +// TESTRESPONSE[s/"sample_start" : ".*",/"sample_start" : "$body.sample_start",/] +// The substitution is because the text is pre-processed by the test harness, +// so the fields may get reordered in the JSON the endpoint sees + +For a detailed description of the response format, or for additional examples +on ingesting delimited text (such as CSV) or newline-delimited JSON, refer to the +<>. diff --git a/docs/reference/text-structure/apis/find-message-structure.asciidoc b/docs/reference/text-structure/apis/find-message-structure.asciidoc new file mode 100644 index 0000000000000..085f65b852126 --- /dev/null +++ b/docs/reference/text-structure/apis/find-message-structure.asciidoc @@ -0,0 +1,292 @@ +[role="xpack"] +[[find-message-structure]] += Find messages structure API + +Finds the structure of a list of text messages. + +[discrete] +[[find-message-structure-request]] +== {api-request-title} + +`GET _text_structure/find_message_structure` + +`POST _text_structure/find_message_structure` + +[discrete] +[[find-message-structure-prereqs]] +== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `monitor_text_structure` or +`monitor` cluster privileges to use this API. See +<>. + +[discrete] +[[find-message-structure-desc]] +== {api-description-title} + +This API provides a starting point for ingesting data into {es} in a format that +is suitable for subsequent use with other {stack} functionality. Use this +API in preference to `find_structure` when your input text has already been +split up into separate messages by some other process. + +The response from the API contains: + +* Sample messages. +* Statistics that reveal the most common values for all fields detected within +the text and basic numeric statistics for numeric fields. +* Information about the structure of the text, which is useful when you write +ingest configurations to index it or similarly formatted text. +* Appropriate mappings for an {es} index, which you could use to ingest the text. + +All this information can be calculated by the structure finder with no guidance. +However, you can optionally override some of the decisions about the text +structure by specifying one or more query parameters. + +Details of the output can be seen in the <>. + +If the structure finder produces unexpected results, +specify the `explain` query parameter and an `explanation` will appear in +the response. It helps determine why the returned structure was +chosen. + +[discrete] +[[find-message-structure-query-parms]] +== {api-query-parms-title} + +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-column-names] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-delimiter] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-explain] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-format] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-grok-pattern] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-ecs-compatibility] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-quote] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-should-trim-fields] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-timeout] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-timestamp-field] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-timestamp-format] + +[discrete] +[[find-message-structure-request-body]] +== {api-request-body-title} + +`messages`:: +(Required, array of strings) +The list of messages you want to analyze. + +[discrete] +[[find-message-structure-examples]] +== {api-examples-title} + +[discrete] +[[find-message-structure-example]] +=== Analyzing Elasticsearch log files + +Suppose you have a list of {es} logs messages. +You can send it to the `find_message_structure` endpoint as follows: + +[source,console] +---- +POST _text_structure/find_message_structure +{ + "messages": [ + "[2024-03-05T10:52:36,256][INFO ][o.a.l.u.VectorUtilPanamaProvider] [laptop] Java vector incubator API enabled; uses preferredBitSize=128", + "[2024-03-05T10:52:41,038][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-url]", + "[2024-03-05T10:52:41,042][INFO ][o.e.p.PluginsService ] [laptop] loaded module [rest-root]", + "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-core]", + "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-redact]", + "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [ingest-user-agent]", + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-monitoring]", + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-s3]", + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-analytics]", + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-ent-search]", + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-autoscaling]", + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-painless]]", + "[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-expression]", + "[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-eql]", + "[2024-03-05T10:52:43,291][INFO ][o.e.e.NodeEnvironment ] [laptop] heap size [16gb], compressed ordinary object pointers [true]", + "[2024-03-05T10:52:46,098][INFO ][o.e.x.s.Security ] [laptop] Security is enabled", + "[2024-03-05T10:52:47,227][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] Profiling is enabled", + "[2024-03-05T10:52:47,259][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] profiling index templates will not be installed or reinstalled", + "[2024-03-05T10:52:47,755][INFO ][o.e.i.r.RecoverySettings ] [laptop] using rate limit [40mb] with [default=40mb, read=0b, write=0b, max=0b]", + "[2024-03-05T10:52:47,787][INFO ][o.e.d.DiscoveryModule ] [laptop] using discovery type [multi-node] and seed hosts providers [settings]", + "[2024-03-05T10:52:49,188][INFO ][o.e.n.Node ] [laptop] initialized", + "[2024-03-05T10:52:49,199][INFO ][o.e.n.Node ] [laptop] starting ..." + ] +} +---- +// TEST + +If the request does not encounter errors, you receive the following result: + +[source,console-result] +---- +{ + "num_lines_analyzed" : 22, + "num_messages_analyzed" : 22, + "sample_start" : "[2024-03-05T10:52:36,256][INFO ][o.a.l.u.VectorUtilPanamaProvider] [laptop] Java vector incubator API enabled; uses preferredBitSize=128\n[2024-03-05T10:52:41,038][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-url]\n", <3> + "charset" : "UTF-8", + "format" : "semi_structured_text", + "multiline_start_pattern" : "^\\[\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "grok_pattern" : "\\[%{TIMESTAMP_ISO8601:timestamp}\\]\\[%{LOGLEVEL:loglevel} \\]\\[.*", + "ecs_compatibility" : "disabled", + "timestamp_field" : "timestamp", + "joda_timestamp_formats" : [ + "ISO8601" + ], + "java_timestamp_formats" : [ + "ISO8601" + ], + "need_client_timezone" : true, + "mappings" : { + "properties" : { + "@timestamp" : { + "type" : "date" + }, + "loglevel" : { + "type" : "keyword" + }, + "message" : { + "type" : "text" + } + } + }, + "ingest_pipeline" : { + "description" : "Ingest pipeline created by text structure finder", + "processors" : [ + { + "grok" : { + "field" : "message", + "patterns" : [ + "\\[%{TIMESTAMP_ISO8601:timestamp}\\]\\[%{LOGLEVEL:loglevel} \\]\\[.*" + ], + "ecs_compatibility" : "disabled" + } + }, + { + "date" : { + "field" : "timestamp", + "timezone" : "{{ event.timezone }}", + "formats" : [ + "ISO8601" + ] + } + }, + { + "remove" : { + "field" : "timestamp" + } + } + ] + }, + "field_stats" : { + "loglevel" : { + "count" : 22, + "cardinality" : 1, + "top_hits" : [ + { + "value" : "INFO", + "count" : 22 + } + ] + }, + "message" : { + "count" : 22, + "cardinality" : 22, + "top_hits" : [ + { + "value" : "[2024-03-05T10:52:36,256][INFO ][o.a.l.u.VectorUtilPanamaProvider] [laptop] Java vector incubator API enabled; uses preferredBitSize=128", + "count" : 1 + }, + { + "value" : "[2024-03-05T10:52:41,038][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-url]", + "count" : 1 + }, + { + "value" : "[2024-03-05T10:52:41,042][INFO ][o.e.p.PluginsService ] [laptop] loaded module [rest-root]", + "count" : 1 + }, + { + "value" : "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [ingest-user-agent]", + "count" : 1 + }, + { + "value" : "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-core]", + "count" : 1 + }, + { + "value" : "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-redact]", + "count" : 1 + }, + { + "value" : "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-painless]]", + "count" : 1 + }, + { + "value" : "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-s3]", + "count" : 1 + }, + { + "value" : "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-analytics]", + "count" : 1 + }, + { + "value" : "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-autoscaling]", + "count" : 1 + } + ] + }, + "timestamp" : { + "count" : 22, + "cardinality" : 14, + "earliest" : "2024-03-05T10:52:36,256", + "latest" : "2024-03-05T10:52:49,199", + "top_hits" : [ + { + "value" : "2024-03-05T10:52:41,044", + "count" : 6 + }, + { + "value" : "2024-03-05T10:52:41,043", + "count" : 3 + }, + { + "value" : "2024-03-05T10:52:41,059", + "count" : 2 + }, + { + "value" : "2024-03-05T10:52:36,256", + "count" : 1 + }, + { + "value" : "2024-03-05T10:52:41,038", + "count" : 1 + }, + { + "value" : "2024-03-05T10:52:41,042", + "count" : 1 + }, + { + "value" : "2024-03-05T10:52:43,291", + "count" : 1 + }, + { + "value" : "2024-03-05T10:52:46,098", + "count" : 1 + }, + { + "value" : "2024-03-05T10:52:47,227", + "count" : 1 + }, + { + "value" : "2024-03-05T10:52:47,259", + "count" : 1 + } + ] + } + } +} +---- +// TESTRESPONSE + +For a detailed description of the response format, or for additional examples +on ingesting delimited text (such as CSV) or newline-delimited JSON, refer to the +<>. diff --git a/docs/reference/text-structure/apis/find-structure-shared.asciidoc b/docs/reference/text-structure/apis/find-structure-shared.asciidoc new file mode 100644 index 0000000000000..67a85dd072a9a --- /dev/null +++ b/docs/reference/text-structure/apis/find-structure-shared.asciidoc @@ -0,0 +1,215 @@ +tag::param-charset[] +`charset`:: +(Optional, string) The text's character set. It must be a character set that is +supported by the JVM that {es} uses. For example, `UTF-8`, `UTF-16LE`, +`windows-1252`, or `EUC-JP`. If this parameter is not specified, the structure +finder chooses an appropriate character set. +end::param-charset[] + +tag::param-column-names[] +`column_names`:: +(Optional, string) If you have set `format` to `delimited`, you can specify the +column names in a comma-separated list. If this parameter is not specified, the +structure finder uses the column names from the header row of the text. If the +text does not have a header row, columns are named "column1", "column2", +"column3", etc. +end::param-column-names[] + +tag::param-delimiter[] +`delimiter`:: +(Optional, string) If you have set `format` to `delimited`, you can specify the +character used to delimit the values in each row. Only a single character is +supported; the delimiter cannot have multiple characters. By default, the API +considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). +In this default scenario, all rows must have the same number of fields for the +delimited format to be detected. If you specify a delimiter, up to 10% of the +rows can have a different number of columns than the first row. +end::param-delimiter[] + +tag::param-explain[] +`explain`:: +(Optional, Boolean) If `true`, the response includes a +field named `explanation`, which is an array of strings that indicate how the +structure finder produced its result. The default value is `false`. +end::param-explain[] + +tag::param-format[] +`format`:: +(Optional, string) The high level structure of the text. Valid values are +`ndjson`, `xml`, `delimited`, and `semi_structured_text`. By default, the API +chooses the format. In this default scenario, all rows must have the same number +of fields for a delimited format to be detected. If the `format` is set to +`delimited` and the `delimiter` is not set, however, the API tolerates up to 5% +of rows that have a different number of columns than the first row. +end::param-format[] + +tag::param-grok-pattern[] +`grok_pattern`:: +(Optional, string) If you have set `format` to `semi_structured_text`, you can +specify a Grok pattern that is used to extract fields from every message in the +text. The name of the timestamp field in the Grok pattern must match what is +specified in the `timestamp_field` parameter. If that parameter is not +specified, the name of the timestamp field in the Grok pattern must match +"timestamp". If `grok_pattern` is not specified, the structure finder creates a +Grok pattern. +end::param-grok-pattern[] + +tag::param-ecs-compatibility[] +`ecs_compatibility`:: +(Optional, string) The mode of compatibility with ECS compliant Grok patterns. +Use this parameter to specify whether to use ECS Grok patterns instead of +legacy ones when the structure finder creates a Grok pattern. Valid values +are `disabled` and `v1`. The default value is `disabled`. This setting primarily +has an impact when a whole message Grok pattern such as `%{CATALINALOG}` +matches the input. If the structure finder identifies a common structure but +has no idea of meaning then generic field names such as `path`, `ipaddress`, +`field1` and `field2` are used in the `grok_pattern` output, with the intention +that a user who knows the meanings rename these fields before using it. +end::param-ecs-compatibility[] + +tag::param-has-header-row[] +`has_header_row`:: +(Optional, Boolean) If you have set `format` to `delimited`, you can use this +parameter to indicate whether the column names are in the first row of the text. +If this parameter is not specified, the structure finder guesses based on the +similarity of the first row of the text to other rows. +end::param-has-header-row[] + +tag::param-line-merge-size-limit[] +`line_merge_size_limit`:: +(Optional, unsigned integer) The maximum number of characters in a message when +lines are merged to form messages while analyzing semi-structured text. The +default is `10000`. If you have extremely long messages you may need to increase +this, but be aware that this may lead to very long processing times if the way +to group lines into messages is misdetected. +end::param-line-merge-size-limit[] + +tag::param-lines-to-sample[] +`lines_to_sample`:: +(Optional, unsigned integer) The number of lines to include in the structural +analysis, starting from the beginning of the text. The minimum is 2; the default +is `1000`. If the value of this parameter is greater than the number of lines in +the text, the analysis proceeds (as long as there are at least two lines in the +text) for all of the lines. ++ +-- +NOTE: The number of lines and the variation of the lines affects the speed of +the analysis. For example, if you upload text where the first 1000 lines +are all variations on the same message, the analysis will find more commonality +than would be seen with a bigger sample. If possible, however, it is more +efficient to upload sample text with more variety in the first 1000 lines than +to request analysis of 100000 lines to achieve some variety. + +-- +end::param-lines-to-sample[] + +tag::param-quote[] +`quote`:: +(Optional, string) If you have set `format` to `delimited`, you can specify the +character used to quote the values in each row if they contain newlines or the +delimiter character. Only a single character is supported. If this parameter is +not specified, the default value is a double quote (`"`). If your delimited text +format does not use quoting, a workaround is to set this argument to a character +that does not appear anywhere in the sample. +end::param-quote[] + +tag::param-should-trim-fields[] +`should_trim_fields`:: +(Optional, Boolean) If you have set `format` to `delimited`, you can specify +whether values between delimiters should have whitespace trimmed from them. If +this parameter is not specified and the delimiter is pipe (`|`), the default +value is `true`. Otherwise, the default value is `false`. +end::param-should-trim-fields[] + +tag::param-timeout[] +`timeout`:: +(Optional, <>) Sets the maximum amount of time that the +structure analysis may take. If the analysis is still running when the timeout +expires then it will be stopped. The default value is 25 seconds. +end::param-timeout[] + +tag::param-timestamp-field[] +`timestamp_field`:: +(Optional, string) The name of the field that contains the primary timestamp of +each record in the text. In particular, if the text were ingested into an index, +this is the field that would be used to populate the `@timestamp` field. ++ +-- +If the `format` is `semi_structured_text`, this field must match the name of the +appropriate extraction in the `grok_pattern`. Therefore, for semi-structured +text, it is best not to specify this parameter unless `grok_pattern` is +also specified. + +For structured text, if you specify this parameter, the field must exist +within the text. + +If this parameter is not specified, the structure finder makes a decision about +which field (if any) is the primary timestamp field. For structured text, +it is not compulsory to have a timestamp in the text. +-- +end::param-timestamp-field[] + +tag::param-timestamp-format[] +`timestamp_format`:: +(Optional, string) The Java time format of the timestamp field in the text. ++ +-- +Only a subset of Java time format letter groups are supported: + +* `a` +* `d` +* `dd` +* `EEE` +* `EEEE` +* `H` +* `HH` +* `h` +* `M` +* `MM` +* `MMM` +* `MMMM` +* `mm` +* `ss` +* `XX` +* `XXX` +* `yy` +* `yyyy` +* `zzz` + +Additionally `S` letter groups (fractional seconds) of length one to nine are +supported providing they occur after `ss` and separated from the `ss` by a `.`, +`,` or `:`. Spacing and punctuation is also permitted with the exception of `?`, +newline and carriage return, together with literal text enclosed in single +quotes. For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override +format. + +One valuable use case for this parameter is when the format is semi-structured +text, there are multiple timestamp formats in the text, and you know which +format corresponds to the primary timestamp, but you do not want to specify the +full `grok_pattern`. Another is when the timestamp format is one that the +structure finder does not consider by default. + +If this parameter is not specified, the structure finder chooses the best +format from a built-in set. + +If the special value `null` is specified the structure finder will not look +for a primary timestamp in the text. When the format is semi-structured text +this will result in the structure finder treating the text as single-line +messages. + +The following table provides the appropriate `timeformat` values for some example timestamps: + +|=== +| Timeformat | Presentation + +| yyyy-MM-dd HH:mm:ssZ | 2019-04-20 13:15:22+0000 +| EEE, d MMM yyyy HH:mm:ss Z | Sat, 20 Apr 2019 13:15:22 +0000 +| dd.MM.yy HH:mm:ss.SSS | 20.04.19 13:15:22.285 +|=== + +Refer to +https://docs.oracle.com/javase/8/docs/api/java/time/format/DateTimeFormatter.html[the Java date/time format documentation] +for more information about date and time format syntax. + +-- +end::param-timestamp-format[] diff --git a/docs/reference/text-structure/apis/find-structure.asciidoc b/docs/reference/text-structure/apis/find-structure.asciidoc index a65f87290b0a8..b49b0f3526689 100644 --- a/docs/reference/text-structure/apis/find-structure.asciidoc +++ b/docs/reference/text-structure/apis/find-structure.asciidoc @@ -1,6 +1,6 @@ [role="xpack"] [[find-structure]] -= Find structure API += Find text structure API Finds the structure of text. The text must contain data that is suitable to be ingested into the @@ -55,190 +55,21 @@ chosen. [[find-structure-query-parms]] == {api-query-parms-title} -`charset`:: -(Optional, string) The text's character set. It must be a character set that is -supported by the JVM that {es} uses. For example, `UTF-8`, `UTF-16LE`, -`windows-1252`, or `EUC-JP`. If this parameter is not specified, the structure -finder chooses an appropriate character set. - -`column_names`:: -(Optional, string) If you have set `format` to `delimited`, you can specify the -column names in a comma-separated list. If this parameter is not specified, the -structure finder uses the column names from the header row of the text. If the -text does not have a header role, columns are named "column1", "column2", -"column3", etc. - -`delimiter`:: -(Optional, string) If you have set `format` to `delimited`, you can specify the -character used to delimit the values in each row. Only a single character is -supported; the delimiter cannot have multiple characters. By default, the API -considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). -In this default scenario, all rows must have the same number of fields for the -delimited format to be detected. If you specify a delimiter, up to 10% of the -rows can have a different number of columns than the first row. - -`explain`:: -(Optional, Boolean) If this parameter is set to `true`, the response includes a -field named `explanation`, which is an array of strings that indicate how the -structure finder produced its result. The default value is `false`. - -`format`:: -(Optional, string) The high level structure of the text. Valid values are -`ndjson`, `xml`, `delimited`, and `semi_structured_text`. By default, the API -chooses the format. In this default scenario, all rows must have the same number -of fields for a delimited format to be detected. If the `format` is set to -`delimited` and the `delimiter` is not set, however, the API tolerates up to 5% -of rows that have a different number of columns than the first row. - -`grok_pattern`:: -(Optional, string) If you have set `format` to `semi_structured_text`, you can -specify a Grok pattern that is used to extract fields from every message in the -text. The name of the timestamp field in the Grok pattern must match what is -specified in the `timestamp_field` parameter. If that parameter is not -specified, the name of the timestamp field in the Grok pattern must match -"timestamp". If `grok_pattern` is not specified, the structure finder creates a -Grok pattern. - -`ecs_compatibility`:: -(Optional, string) The mode of compatibility with ECS compliant Grok patterns. -Use this parameter to specify whether to use ECS Grok patterns instead of -legacy ones when the structure finder creates a Grok pattern. Valid values -are `disabled` and `v1`. The default value is `disabled`. This setting primarily -has an impact when a whole message Grok pattern such as `%{CATALINALOG}` -matches the input. If the structure finder identifies a common structure but -has no idea of meaning then generic field names such as `path`, `ipaddress`, -`field1` and `field2` are used in the `grok_pattern` output, with the intention -that a user who knows the meanings rename these fields before using it. -`has_header_row`:: -(Optional, Boolean) If you have set `format` to `delimited`, you can use this -parameter to indicate whether the column names are in the first row of the text. -If this parameter is not specified, the structure finder guesses based on the -similarity of the first row of the text to other rows. - -`line_merge_size_limit`:: -(Optional, unsigned integer) The maximum number of characters in a message when -lines are merged to form messages while analyzing semi-structured text. The -default is `10000`. If you have extremely long messages you may need to increase -this, but be aware that this may lead to very long processing times if the way -to group lines into messages is misdetected. - -`lines_to_sample`:: -(Optional, unsigned integer) The number of lines to include in the structural -analysis, starting from the beginning of the text. The minimum is 2; the default -is `1000`. If the value of this parameter is greater than the number of lines in -the text, the analysis proceeds (as long as there are at least two lines in the -text) for all of the lines. -+ --- -NOTE: The number of lines and the variation of the lines affects the speed of -the analysis. For example, if you upload text where the first 1000 lines -are all variations on the same message, the analysis will find more commonality -than would be seen with a bigger sample. If possible, however, it is more -efficient to upload sample text with more variety in the first 1000 lines than -to request analysis of 100000 lines to achieve some variety. - --- - -`quote`:: -(Optional, string) If you have set `format` to `delimited`, you can specify the -character used to quote the values in each row if they contain newlines or the -delimiter character. Only a single character is supported. If this parameter is -not specified, the default value is a double quote (`"`). If your delimited text -format does not use quoting, a workaround is to set this argument to a character -that does not appear anywhere in the sample. - -`should_trim_fields`:: -(Optional, Boolean) If you have set `format` to `delimited`, you can specify -whether values between delimiters should have whitespace trimmed from them. If -this parameter is not specified and the delimiter is pipe (`|`), the default -value is `true`. Otherwise, the default value is `false`. - -`timeout`:: -(Optional, <>) Sets the maximum amount of time that the -structure analysis make take. If the analysis is still running when the timeout -expires then it will be aborted. The default value is 25 seconds. - -`timestamp_field`:: -(Optional, string) The name of the field that contains the primary timestamp of -each record in the text. In particular, if the text were ingested into an index, -this is the field that would be used to populate the `@timestamp` field. -+ --- -If the `format` is `semi_structured_text`, this field must match the name of the -appropriate extraction in the `grok_pattern`. Therefore, for semi-structured -text, it is best not to specify this parameter unless `grok_pattern` is -also specified. - -For structured text, if you specify this parameter, the field must exist -within the text. - -If this parameter is not specified, the structure finder makes a decision about -which field (if any) is the primary timestamp field. For structured text, -it is not compulsory to have a timestamp in the text. --- - -`timestamp_format`:: -(Optional, string) The Java time format of the timestamp field in the text. -+ --- -Only a subset of Java time format letter groups are supported: - -* `a` -* `d` -* `dd` -* `EEE` -* `EEEE` -* `H` -* `HH` -* `h` -* `M` -* `MM` -* `MMM` -* `MMMM` -* `mm` -* `ss` -* `XX` -* `XXX` -* `yy` -* `yyyy` -* `zzz` - -Additionally `S` letter groups (fractional seconds) of length one to nine are -supported providing they occur after `ss` and separated from the `ss` by a `.`, -`,` or `:`. Spacing and punctuation is also permitted with the exception of `?`, -newline and carriage return, together with literal text enclosed in single -quotes. For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override -format. - -One valuable use case for this parameter is when the format is semi-structured -text, there are multiple timestamp formats in the text, and you know which -format corresponds to the primary timestamp, but you do not want to specify the -full `grok_pattern`. Another is when the timestamp format is one that the -structure finder does not consider by default. - -If this parameter is not specified, the structure finder chooses the best -format from a built-in set. - -If the special value `null` is specified the structure finder will not look -for a primary timestamp in the text. When the format is semi-structured text -this will result in the structure finder treating the text as single-line -messages. - -The following table provides the appropriate `timeformat` values for some example timestamps: - -|=== -| Timeformat | Presentation - -| yyyy-MM-dd HH:mm:ssZ | 2019-04-20 13:15:22+0000 -| EEE, d MMM yyyy HH:mm:ss Z | Sat, 20 Apr 2019 13:15:22 +0000 -| dd.MM.yy HH:mm:ss.SSS | 20.04.19 13:15:22.285 -|=== - -See -https://docs.oracle.com/javase/8/docs/api/java/time/format/DateTimeFormatter.html[the Java date/time format documentation] -for more information about date and time format syntax. - --- +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-charset] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-column-names] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-delimiter] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-explain] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-format] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-grok-pattern] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-ecs-compatibility] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-has-header-row] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-line-merge-size-limit] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-lines-to-sample] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-quote] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-should-trim-fields] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-timeout] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-timestamp-field] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-timestamp-format] [discrete] [[find-structure-request-body]] diff --git a/docs/reference/text-structure/apis/index.asciidoc b/docs/reference/text-structure/apis/index.asciidoc index 8628badba7e78..9f4af120690f7 100644 --- a/docs/reference/text-structure/apis/index.asciidoc +++ b/docs/reference/text-structure/apis/index.asciidoc @@ -4,8 +4,12 @@ You can use the following APIs to find text structures: +* <> +* <> * <> * <> +include::find-field-structure.asciidoc[leveloffset=+2] +include::find-message-structure.asciidoc[leveloffset=+2] include::find-structure.asciidoc[leveloffset=+2] include::test-grok-pattern.asciidoc[leveloffset=+2] diff --git a/docs/reference/transform/images/transform-alert-actions.png b/docs/reference/transform/images/transform-alert-actions.png index ee3328ebd9907..a78c02fa305cd 100644 Binary files a/docs/reference/transform/images/transform-alert-actions.png and b/docs/reference/transform/images/transform-alert-actions.png differ diff --git a/docs/reference/transform/images/transform-alert-summary-actions.png b/docs/reference/transform/images/transform-alert-summary-actions.png new file mode 100644 index 0000000000000..3ec2bd86f3735 Binary files /dev/null and b/docs/reference/transform/images/transform-alert-summary-actions.png differ diff --git a/docs/reference/transform/transform-alerts.asciidoc b/docs/reference/transform/transform-alerts.asciidoc index 5c3604bf6bc3e..953018b3b6b02 100644 --- a/docs/reference/transform/transform-alerts.asciidoc +++ b/docs/reference/transform/transform-alerts.asciidoc @@ -1,23 +1,18 @@ -[role="xpack"] [[transform-alerts]] = Generating alerts for {transforms} - -beta::[] - -{kib} {alert-features} include support for {transform} rules, which check the -health of {ctransforms} with certain conditions. If the conditions of the rule -are met, an alert is created and the associated action is triggered. For +:frontmatter-description: Create {transform} health rules. +:frontmatter-tags-products: [alerting] +:frontmatter-tags-content-type: [how-to] +:frontmatter-tags-user-goals: [configure] + +{kib} {alert-features} include support for {transform} health rules, which +check the health of {ctransforms} with certain conditions. If the conditions of +the rule are met, an alert is created and the associated actions run. For example, you can create a rule to check if a {ctransform} is started and to notify you in an email if it is not. To learn more about {kib} {alert-features}, refer to {kibana-ref}/alerting-getting-started.html#alerting-getting-started[Alerting]. -The following {transform} rules are available: - -{transform-cap} health:: - Monitors {transforms} health and alerts if an operational issue occurred. - - [[creating-transform-rules]] == Creating a rule @@ -30,14 +25,11 @@ tags. Select the {transform} health rule type: image::images/transform-rule.png["Creating a transform health rule",500] // NOTE: This is screenshot is automatically generated. Do not edit it directly. -[[creating-transform-health-rules]] -=== {transform-cap} health - Select the {transform} or {transforms} to include. You can also use a special character (`*`) to apply the rule to all your {transforms}. {transforms-cap} created after the rule are automatically included. -The following health check is available and enabled by default: +The following health checks are available and enabled by default: _{transform-cap} is not started_:: Notifies if the corresponding {transforms} is not started or it does not index @@ -53,35 +45,50 @@ image::images/transform-check-config.png["Selecting health check",500] As the last step in the rule creation process, define its actions. - [[defining-actions]] == Defining actions You can add one or more actions to your rule to generate notifications when its -conditions are met and when they are no longer met. +conditions are met and when they are no longer met. In particular, this rule +type supports: -Each action uses a connector, which stores connection information for a {kib} -service or supported third-party integration, depending on where you want to -send the notifications. For example, you can use a Slack connector to send a -message to a channel. Or you can use an index connector that writes an JSON -object to a specific index. For details about creating connectors, refer to +* alert summaries +* actions that run when an issue is detected +* recovery actions that run when the rule conditions are no longer met + +For each action, you must choose a connector, which provides connection +information for a {kib} service or third party integration. For more information +about all the supported connectors, go to {kibana-ref}/action-types.html[Connectors]. -You must set the action frequency, which involves choosing how often to run -the action (for example, at each check interval, only when the alert status -changes, or at a custom action interval). Each rule type also has a list of -valid action groups and you must choose one of these groups (for example, the -action runs when the issue is detected or when it is recovered). +After you select a connector, you must set the action frequency. You can choose +to create a summary of alerts on each check interval or on a custom interval. +For example, send notifications that summarize the new, ongoing, and recovered +alerts: + +[role="screenshot"] +image::images/transform-alert-summary-actions.png["Setting action frequency to summary of alerts",500] +// NOTE: This is screenshot is automatically generated. Do not edit it directly. TIP: If you choose a custom action interval, it cannot be shorter than the rule's check interval. -It's also possible to customize the notification messages for each action. A -list of variables is available to include in the message, like {transform} ID, -description, {transform} state, and so on. +Alternatively, you can set the action frequency such that actions run for each +alert. Choose how often the action runs (at each check interval, only when the +alert status changes, or at a custom action interval). You must also choose an +action group, which indicates whether the action runs when the issue is detected +or when it is recovered. + +You can further refine the conditions under which actions run by specifying that +actions only run when they match a KQL query or when an alert occurs within a +specific time frame. + +There is a set of variables that you can use to customize the notification +messages for each action. Click the icon above the message text box to get the +list of variables or refer to <>. [role="screenshot"] -image::images/transform-alert-actions.png["Selecting connector type",500] +image::images/transform-alert-actions.png["Selecting action variables",500] // NOTE: This is screenshot is automatically generated. Do not edit it directly. After you save the configurations, the rule appears in the *{rules-ui}* list @@ -92,3 +99,44 @@ The name of an alert is always the same as the {transform} ID of the associated {transform} that triggered it. You can mute the notifications for a particular {transform} on the page of the rule that lists the individual alerts. You can open it via *{rules-ui}* by selecting the rule name. + +[[transform-action-variables]] +== Action variables + +The following variables are specific to the {transform} health rule type. +You can also specify {kibana-ref}/rule-action-variables.html[variables common to all rules]. + +`context.message`:: +A preconstructed message for the rule. For example: `Transform test-1 is not started.` + +`context.results`:: +The most recent results, which you can iterate over by using the +https://mustache.github.io/[Mustache] template array syntax. For example, the +message in an email connector action might contain: ++ +-- +[source,sh] +-------------------------------------------------- +[{{rule.name}}] Transform health check result: +{{context.message}} +{{#context.results}} + Transform ID: {{transform_id}} + {{#description}}Transform description: {{description}} + {{/description}}{{#transform_state}}Transform state: {{transform_state}} + {{/transform_state}}{{#health_status}}Transform health status: {{health_status}} + {{/health_status}}{{#issues}}Issue: {{issue}} + Issue count: {{count}} + {{#details}}Issue details: {{details}} + {{/details}}{{#first_occurrence}}First occurrence: {{first_occurrence}} + {{/first_occurrence}} + {{/issues}}{{#failure_reason}}Failure reason: {{failure_reason}} + {{/failure_reason}}{{#notification_message}}Notification message: {{notification_message}} + {{/notification_message}}{{#node_name}}Node name: {{node_name}} + {{/node_name}}{{#timestamp}}Timestamp: {{timestamp}} + {{/timestamp}} +{{/context.results}} +-------------------------------------------------- +-- + +For more examples, refer to +{kibana-ref}/rule-action-variables.html[Rule action variables]. \ No newline at end of file diff --git a/docs/reference/troubleshooting/common-issues/mapping-explosion.asciidoc b/docs/reference/troubleshooting/common-issues/mapping-explosion.asciidoc index 5ba18df3e6a6b..fd1a31228c95f 100644 --- a/docs/reference/troubleshooting/common-issues/mapping-explosion.asciidoc +++ b/docs/reference/troubleshooting/common-issues/mapping-explosion.asciidoc @@ -24,7 +24,8 @@ reporting that the coordinating node is waiting for all other nodes to confirm they are on mapping update request. * Discover's **Fields for wildcard** page-loading API command or {kibana-ref}/console-kibana.html[Dev Tools] page-refreshing Autocomplete API commands are taking a long time (more than 10 seconds) or -timing out in the browser's Developer Tools Network tab. +timing out in the browser's Developer Tools Network tab. For more +information, refer to our https://www.elastic.co/blog/troubleshooting-guide-common-issues-kibana-discover-load[walkthrough on troubleshooting Discover]. * Discover's **Available fields** taking a long time to compile Javascript in the browser's Developer Tools Performance tab. This may potentially escalate to temporary browser page unresponsiveness. diff --git a/docs/reference/watcher/actions/email.asciidoc b/docs/reference/watcher/actions/email.asciidoc index 71fdd95148d24..16b9cc4be0628 100644 --- a/docs/reference/watcher/actions/email.asciidoc +++ b/docs/reference/watcher/actions/email.asciidoc @@ -149,8 +149,10 @@ killed by firewalls or load balancers in-between. means, by default watcher tries to download a dashboard for 10 minutes, forty times fifteen seconds). The setting `xpack.notification.reporting.interval` can be configured globally to change the default. -| `request.auth` | Additional auth configuration for the request -| `request.proxy` | Additional proxy configuration for the request +| `auth` | Additional auth configuration for the request, see + {kibana-ref}/automating-report-generation.html#use-watcher[use watcher] for details +| `proxy` | Additional proxy configuration for the request. See <> + on how to configure the values. |====== diff --git a/gradle/build.versions.toml b/gradle/build.versions.toml index a17f77c6b4917..bbcad622cf5e5 100644 --- a/gradle/build.versions.toml +++ b/gradle/build.versions.toml @@ -17,7 +17,7 @@ commons-codec = "commons-codec:commons-codec:1.11" commmons-io = "commons-io:commons-io:2.2" docker-compose = "com.avast.gradle:gradle-docker-compose-plugin:0.17.5" forbiddenApis = "de.thetaphi:forbiddenapis:3.6" -gradle-enterprise = "com.gradle:gradle-enterprise-gradle-plugin:3.16.1" +gradle-enterprise = "com.gradle:gradle-enterprise-gradle-plugin:3.16.2" hamcrest = "org.hamcrest:hamcrest:2.1" httpcore = "org.apache.httpcomponents:httpcore:4.4.12" httpclient = "org.apache.httpcomponents:httpclient:4.5.14" diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index d7e4e1c723a24..a9d24d4d50b17 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -736,9 +736,9 @@ - - - + + + @@ -1594,6 +1594,11 @@ + + + + + @@ -2648,124 +2653,124 @@ - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + diff --git a/libs/build.gradle b/libs/build.gradle index a88618aea2fcc..afb82b5e63864 100644 --- a/libs/build.gradle +++ b/libs/build.gradle @@ -6,41 +6,10 @@ * Side Public License, v 1. */ -configure(subprojects - project('elasticsearch-log4j')) { +configure(childProjects.values() - project('elasticsearch-log4j')) { /* * All subprojects are java projects using Elasticsearch's standard build * tools. */ apply plugin: 'elasticsearch.build' - - /* - * Subprojects may depend on the "core" lib but may not depend on any - * other libs. This keeps our dependencies simpler. - * With the exception that specialised plugin apis can depend on "core" plugin api project - */ - project.afterEvaluate { - configurations.all { Configuration conf -> - dependencies.matching { it instanceof ProjectDependency }.all { ProjectDependency dep -> - Project depProject = dep.dependencyProject - if (depProject != null - && false == isPluginApi(project, depProject) - && false == depProject.path.equals(':libs:elasticsearch-x-content') - && false == depProject.path.equals(':libs:elasticsearch-core') - && false == depProject.path.equals(':libs:elasticsearch-plugin-api') - && false == depProject.path.equals(':libs:elasticsearch-logging') - && false == depProject.path.equals(':libs:elasticsearch-native') - && depProject.path.startsWith(':libs') - && depProject.name.startsWith('elasticsearch-')) { - throw new InvalidUserDataException("projects in :libs " - + "may not depend on other projects libs except " - + ":libs:elasticsearch-core but " - + "${project.path} depends on ${depProject.path}") - } - } - } - } -} - -boolean isPluginApi(Project project, Project depProject) { - return project.path.matches(".*elasticsearch-plugin-.*api") } diff --git a/libs/core/src/main/java/org/elasticsearch/core/Predicates.java b/libs/core/src/main/java/org/elasticsearch/core/Predicates.java new file mode 100644 index 0000000000000..47ac9ef258d68 --- /dev/null +++ b/libs/core/src/main/java/org/elasticsearch/core/Predicates.java @@ -0,0 +1,92 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.core; + +import java.util.function.Predicate; + +/** + * Utilities around predicates. + */ +public enum Predicates { + ; + + @SuppressWarnings("rawtypes") + private static final Predicate NEVER = new Predicate() { + @Override + public boolean test(Object o) { + return false; + } + + @Override + public Predicate and(Predicate other) { + return this; + } + + @Override + public Predicate negate() { + return ALWAYS; + } + + @Override + public Predicate or(Predicate other) { + return other; + } + + @Override + public String toString() { + return "Predicate[NEVER]"; + } + }; + + @SuppressWarnings("rawtypes") + private static final Predicate ALWAYS = new Predicate() { + @Override + public boolean test(Object o) { + return true; + } + + @Override + public Predicate and(Predicate other) { + return other; + } + + @Override + public Predicate negate() { + return NEVER; + } + + @Override + public Predicate or(Predicate other) { + return this; + } + + @Override + public String toString() { + return "Predicate[ALWAYS]"; + } + }; + + /** + * @return a predicate that accepts all input values + * @param type of the predicate + */ + @SuppressWarnings("unchecked") + public static Predicate always() { + return (Predicate) ALWAYS; + } + + /** + * @return a predicate that rejects all input values + * @param type of the predicate + */ + @SuppressWarnings("unchecked") + public static Predicate never() { + return (Predicate) NEVER; + } +} diff --git a/libs/geo/src/test/java/org/elasticsearch/geometry/simplify/Vector3DTests.java b/libs/geo/src/test/java/org/elasticsearch/geometry/simplify/Vector3DTests.java index a84fee8c892b7..5a88091cbcd4f 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geometry/simplify/Vector3DTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geometry/simplify/Vector3DTests.java @@ -9,9 +9,9 @@ package org.elasticsearch.geometry.simplify; import org.elasticsearch.test.ESTestCase; -import org.hamcrest.BaseMatcher; import org.hamcrest.Description; import org.hamcrest.Matcher; +import org.hamcrest.TypeSafeMatcher; import static java.lang.Math.toRadians; import static org.elasticsearch.geometry.simplify.SimplificationErrorCalculator.Point3D.from; @@ -238,7 +238,7 @@ private static Matcher samePoint(Simplifi return new TestPoint3DMatcher(expected, 1e-15); } - private static class TestPoint3DMatcher extends BaseMatcher { + private static class TestPoint3DMatcher extends TypeSafeMatcher { private final Matcher xMatcher; private final Matcher yMatcher; private final Matcher zMatcher; @@ -252,11 +252,8 @@ private static class TestPoint3DMatcher extends BaseMatcher p.resolve(libsystemd)).filter(Files::exists).findAny(); + if (foundpath.isPresent()) { + return foundpath.get().toAbsolutePath().toString(); + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + + } + throw new UnsatisfiedLinkError("Could not find " + libsystemd + " in java.library.path: " + libpath); + } + + private static final MethodHandle sd_notify$mh = downcallHandle("sd_notify", FunctionDescriptor.of(JAVA_INT, JAVA_INT, ADDRESS)); + + @Override + public int sd_notify(int unset_environment, String state) { + try (Arena arena = Arena.ofConfined()) { + MemorySegment nativeState = arena.allocateUtf8String(state); + return (int) sd_notify$mh.invokeExact(unset_environment, nativeState); + } catch (Throwable t) { + throw new AssertionError(t); + } + } +} diff --git a/libs/x-content/impl/build.gradle b/libs/x-content/impl/build.gradle index 4bf498b1b392e..41b65044735ca 100644 --- a/libs/x-content/impl/build.gradle +++ b/libs/x-content/impl/build.gradle @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -apply plugin: 'elasticsearch.java' +apply plugin: 'elasticsearch.build' base { archivesName = "x-content-impl" diff --git a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentGenerator.java b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentGenerator.java index 1046a09f53197..09cbdf2d571cd 100644 --- a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentGenerator.java +++ b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentGenerator.java @@ -496,6 +496,20 @@ public void writeRawValue(InputStream stream, XContentType xContentType) throws } } + @Override + public void writeRawValue(String value) throws IOException { + try { + if (supportsRawWrites()) { + generator.writeRawValue(value); + } else { + // fallback to a regular string for formats that don't allow writing the value as is + generator.writeString(value); + } + } catch (JsonGenerationException e) { + throw new XContentGenerationException(e); + } + } + private boolean mayWriteRawData(XContentType contentType) { // When the current generator is filtered (ie filter != null) // or the content is in a different format than the current generator, diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java index 41512af0f79d4..2143814565a51 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java +++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java @@ -1212,6 +1212,14 @@ public XContentBuilder rawValue(InputStream stream, XContentType contentType) th return this; } + /** + * Writes a value with the source coming directly from a pre-rendered string representation + */ + public XContentBuilder rawValue(String value) throws IOException { + generator.writeRawValue(value); + return this; + } + public XContentBuilder copyCurrentStructure(XContentParser parser) throws IOException { generator.copyCurrentStructure(parser); return this; diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentGenerator.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentGenerator.java index 97739635932a3..5037ed0b40664 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentGenerator.java +++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentGenerator.java @@ -105,6 +105,11 @@ public interface XContentGenerator extends Closeable, Flushable { */ void writeRawValue(InputStream value, XContentType xContentType) throws IOException; + /** + * Writes a raw value taken from a pre-rendered string representation + */ + void writeRawValue(String value) throws IOException; + void copyCurrentStructure(XContentParser parser) throws IOException; /** diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java index 8802ffd41571d..6e70e9263df47 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java @@ -11,11 +11,13 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.Maps; +import org.elasticsearch.core.Releasables; import org.elasticsearch.search.aggregations.AggregationReduceContext; import org.elasticsearch.search.aggregations.AggregatorReducer; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; +import org.elasticsearch.search.aggregations.bucket.MultiBucketAggregatorsReducer; import org.elasticsearch.search.aggregations.support.SamplingContext; import org.elasticsearch.xcontent.XContentBuilder; @@ -177,30 +179,38 @@ public InternalBucket getBucketByKey(String key) { @Override protected AggregatorReducer getLeaderReducer(AggregationReduceContext reduceContext, int size) { - Map> bucketsMap = new HashMap<>(); return new AggregatorReducer() { + final Map bucketsReducer = new HashMap<>(getBuckets().size()); + @Override public void accept(InternalAggregation aggregation) { - InternalAdjacencyMatrix filters = (InternalAdjacencyMatrix) aggregation; + final InternalAdjacencyMatrix filters = (InternalAdjacencyMatrix) aggregation; for (InternalBucket bucket : filters.buckets) { - List sameRangeList = bucketsMap.computeIfAbsent(bucket.key, k -> new ArrayList<>(size)); - sameRangeList.add(bucket); + MultiBucketAggregatorsReducer reducer = bucketsReducer.computeIfAbsent( + bucket.key, + k -> new MultiBucketAggregatorsReducer(reduceContext, size) + ); + reducer.accept(bucket); } } @Override public InternalAggregation get() { - List reducedBuckets = new ArrayList<>(bucketsMap.size()); - for (List sameRangeList : bucketsMap.values()) { - InternalBucket reducedBucket = reduceBucket(sameRangeList, reduceContext); - if (reducedBucket.docCount >= 1) { - reducedBuckets.add(reducedBucket); + List reducedBuckets = new ArrayList<>(bucketsReducer.size()); + for (Map.Entry entry : bucketsReducer.entrySet()) { + if (entry.getValue().getDocCount() >= 1) { + reducedBuckets.add(new InternalBucket(entry.getKey(), entry.getValue().getDocCount(), entry.getValue().get())); } } reduceContext.consumeBucketsAndMaybeBreak(reducedBuckets.size()); reducedBuckets.sort(Comparator.comparing(InternalBucket::getKey)); return new InternalAdjacencyMatrix(name, reducedBuckets, getMetadata()); } + + @Override + public void close() { + Releasables.close(bucketsReducer.values()); + } }; } @@ -209,21 +219,6 @@ public InternalAggregation finalizeSampling(SamplingContext samplingContext) { return new InternalAdjacencyMatrix(name, buckets.stream().map(b -> b.finalizeSampling(samplingContext)).toList(), getMetadata()); } - private InternalBucket reduceBucket(List buckets, AggregationReduceContext context) { - assert buckets.isEmpty() == false; - InternalBucket reduced = null; - for (InternalBucket bucket : buckets) { - if (reduced == null) { - reduced = new InternalBucket(bucket.key, bucket.docCount, bucket.aggregations); - } else { - reduced.docCount += bucket.docCount; - } - } - final List aggregations = new BucketAggregationList<>(buckets); - reduced.aggregations = InternalAggregations.reduce(aggregations, context); - return reduced; - } - @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { builder.startArray(CommonFields.BUCKETS.getPreferredName()); diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java index f0dfad88c87b4..f0f7984079d97 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java @@ -7,12 +7,13 @@ */ package org.elasticsearch.aggregations.bucket.histogram; -import org.apache.lucene.util.PriorityQueue; import org.elasticsearch.TransportVersions; import org.elasticsearch.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder.RoundingInfo; import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.util.LongObjectPagedHashMap; +import org.elasticsearch.core.Releasables; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.AggregationReduceContext; import org.elasticsearch.search.aggregations.AggregatorReducer; @@ -20,7 +21,7 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.KeyComparable; -import org.elasticsearch.search.aggregations.bucket.IteratorAndCurrent; +import org.elasticsearch.search.aggregations.bucket.MultiBucketAggregatorsReducer; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; @@ -286,78 +287,6 @@ public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) return new Bucket(prototype.key, prototype.docCount, prototype.format, aggregations); } - /** - * This method works almost exactly the same as - * InternalDateHistogram#reduceBuckets(List, ReduceContext), the different - * here is that we need to round all the keys we see using the highest level - * rounding returned across all the shards so the resolution of the buckets - * is the same and they can be reduced together. - */ - private BucketReduceResult reduceBuckets(List aggregations, AggregationReduceContext reduceContext) { - - // First we need to find the highest level rounding used across all the - // shards - int reduceRoundingIdx = 0; - long min = Long.MAX_VALUE; - long max = Long.MIN_VALUE; - for (InternalAutoDateHistogram agg : aggregations) { - reduceRoundingIdx = Math.max(agg.bucketInfo.roundingIdx, reduceRoundingIdx); - if (false == agg.buckets.isEmpty()) { - min = Math.min(min, agg.buckets.get(0).key); - max = Math.max(max, agg.buckets.get(agg.buckets.size() - 1).key); - } - } - Rounding.Prepared reduceRounding = prepare(reduceRoundingIdx, min, max); - - final PriorityQueue> pq = new PriorityQueue<>(aggregations.size()) { - @Override - protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurrent b) { - return a.current().key < b.current().key; - } - }; - for (InternalAutoDateHistogram histogram : aggregations) { - if (histogram.buckets.isEmpty() == false) { - pq.add(new IteratorAndCurrent<>(histogram.buckets.iterator())); - } - } - - List reducedBuckets = new ArrayList<>(); - if (pq.size() > 0) { - // list of buckets coming from different shards that have the same key - List currentBuckets = new ArrayList<>(); - long key = reduceRounding.round(pq.top().current().key); - - do { - final IteratorAndCurrent top = pq.top(); - - if (reduceRounding.round(top.current().key) != key) { - // the key changes, reduce what we already buffered and reset the buffer for current buckets - final Bucket reduced = reduceBucket(currentBuckets, reduceContext); - reducedBuckets.add(reduced); - currentBuckets.clear(); - key = reduceRounding.round(top.current().key); - } - - currentBuckets.add(top.current()); - - if (top.hasNext()) { - top.next(); - assert top.current().key > key : "shards must return data sorted by key"; - pq.updateTop(); - } else { - pq.pop(); - } - } while (pq.size() > 0); - - if (currentBuckets.isEmpty() == false) { - final Bucket reduced = reduceBucket(currentBuckets, reduceContext); - reducedBuckets.add(reduced); - } - } - - return mergeBucketsIfNeeded(new BucketReduceResult(reducedBuckets, reduceRoundingIdx, 1, reduceRounding, min, max), reduceContext); - } - private BucketReduceResult mergeBucketsIfNeeded(BucketReduceResult firstPassResult, AggregationReduceContext reduceContext) { int idx = firstPassResult.roundingIdx; RoundingInfo info = bucketInfo.roundingInfos[idx]; @@ -505,19 +434,87 @@ static int getAppropriateRounding(long minKey, long maxKey, int roundingIdx, Rou return currentRoundingIdx - 1; } + /** + * This method works almost exactly the same as + * InternalDateHistogram#reduceBuckets(List, ReduceContext), the different + * here is that we need to round all the keys we see using the highest level + * rounding returned across all the shards so the resolution of the buckets + * is the same and they can be reduced together. + */ @Override protected AggregatorReducer getLeaderReducer(AggregationReduceContext reduceContext, int size) { return new AggregatorReducer() { - final List aggregations = new ArrayList<>(size); + private final LongObjectPagedHashMap bucketsReducer = new LongObjectPagedHashMap<>( + getBuckets().size(), + reduceContext.bigArrays() + ); + int reduceRoundingIdx = 0; + long min = Long.MAX_VALUE; + long max = Long.MIN_VALUE; @Override public void accept(InternalAggregation aggregation) { - aggregations.add((InternalAutoDateHistogram) aggregation); + final InternalAutoDateHistogram histogram = (InternalAutoDateHistogram) aggregation; + reduceRoundingIdx = Math.max(histogram.bucketInfo.roundingIdx, reduceRoundingIdx); + if (false == histogram.buckets.isEmpty()) { + min = Math.min(min, histogram.buckets.get(0).key); + max = Math.max(max, histogram.buckets.get(histogram.buckets.size() - 1).key); + for (Bucket bucket : histogram.buckets) { + MultiBucketAggregatorsReducer reducer = bucketsReducer.get(bucket.key); + if (reducer == null) { + reducer = new MultiBucketAggregatorsReducer(reduceContext, size); + bucketsReducer.put(bucket.key, reducer); + } + reducer.accept(bucket); + } + } } @Override public InternalAggregation get() { - BucketReduceResult reducedBucketsResult = reduceBuckets(aggregations, reduceContext); + // First we need to find the highest level rounding used across all the + // shards + final Rounding.Prepared reduceRounding = prepare(reduceRoundingIdx, min, max); + + final long[] keys = new long[(int) bucketsReducer.size()]; + { + // fill the array and sort it + final int[] index = new int[] { 0 }; + bucketsReducer.iterator().forEachRemaining(c -> keys[index[0]++] = c.key); + Arrays.sort(keys); + } + + final List reducedBuckets = new ArrayList<>(); + if (keys.length > 0) { + // list of buckets coming from different shards that have the same key + MultiBucketAggregatorsReducer currentReducer = null; + long key = reduceRounding.round(keys[0]); + for (long top : keys) { + if (reduceRounding.round(top) != key) { + assert currentReducer != null; + // the key changes, reduce what we already buffered and reset the buffer for current buckets + reducedBuckets.add(createBucket(key, currentReducer.getDocCount(), currentReducer.get())); + currentReducer = null; + key = reduceRounding.round(top); + } + + final MultiBucketAggregatorsReducer nextReducer = bucketsReducer.get(top); + if (currentReducer == null) { + currentReducer = nextReducer; + } else { + currentReducer.accept(createBucket(key, nextReducer.getDocCount(), nextReducer.get())); + } + } + + if (currentReducer != null) { + reducedBuckets.add(createBucket(key, currentReducer.getDocCount(), currentReducer.get())); + } + } + + BucketReduceResult reducedBucketsResult = mergeBucketsIfNeeded( + new BucketReduceResult(reducedBuckets, reduceRoundingIdx, 1, reduceRounding, min, max), + reduceContext + ); if (reduceContext.isFinalReduce()) { // adding empty buckets if needed @@ -546,6 +543,12 @@ public InternalAggregation get() { reducedBucketsResult.innerInterval ); } + + @Override + public void close() { + bucketsReducer.iterator().forEachRemaining(c -> Releasables.close(c.value)); + Releasables.close(bucketsReducer); + } }; } diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSourceAggregationBuilder.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSourceAggregationBuilder.java index 0f277ecd6c478..4cfd55a240451 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSourceAggregationBuilder.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSourceAggregationBuilder.java @@ -10,7 +10,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; -import org.elasticsearch.search.aggregations.AggregationInitializationException; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -45,9 +44,7 @@ protected LeafOnly(String name) { protected LeafOnly(LeafOnly clone, Builder factoriesBuilder, Map metadata) { super(clone, factoriesBuilder, metadata); if (factoriesBuilder.count() > 0) { - throw new AggregationInitializationException( - "Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations" - ); + throw new IllegalArgumentException("Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations"); } } @@ -60,9 +57,7 @@ protected LeafOnly(StreamInput in) throws IOException { @Override public AB subAggregations(Builder subFactories) { - throw new AggregationInitializationException( - "Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations" - ); + throw new IllegalArgumentException("Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations"); } @Override diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java index 34f1701a595de..6c06511ccfbd1 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java @@ -1790,7 +1790,8 @@ public ClusterState execute(ClusterState currentState) throws Exception { original.getIndexMode(), original.getLifecycle(), original.isFailureStore(), - original.getFailureIndices() + original.getFailureIndices(), + null ); brokenDataStreamHolder.set(broken); return ClusterState.builder(currentState) diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudDataStreamLifecycleIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudDataStreamLifecycleIT.java index e33b1fdcfa57a..b772e0bb347e2 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudDataStreamLifecycleIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudDataStreamLifecycleIT.java @@ -164,7 +164,7 @@ public void testPutLifecycle() throws Exception { ).get(); assertThat(response.getDataStreamLifecycles().size(), equalTo(1)); assertThat(response.getDataStreamLifecycles().get(0).dataStreamName(), equalTo("my-data-stream")); - assertThat(response.getDataStreamLifecycles().get(0).lifecycle().getEffectiveDataRetention(), equalTo(dataRetention)); + assertThat(response.getDataStreamLifecycles().get(0).lifecycle().getDataStreamRetention(), equalTo(dataRetention)); assertThat(response.getDataStreamLifecycles().get(0).lifecycle().isEnabled(), equalTo(true)); } @@ -189,7 +189,7 @@ public void testPutLifecycle() throws Exception { ).get(); assertThat(response.getDataStreamLifecycles().size(), equalTo(1)); assertThat(response.getDataStreamLifecycles().get(0).dataStreamName(), equalTo("my-data-stream")); - assertThat(response.getDataStreamLifecycles().get(0).lifecycle().getEffectiveDataRetention(), equalTo(dataRetention)); + assertThat(response.getDataStreamLifecycles().get(0).lifecycle().getDataStreamRetention(), equalTo(dataRetention)); assertThat(response.getDataStreamLifecycles().get(0).lifecycle().isEnabled(), equalTo(false)); } } diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java index 471622489d9b2..a497eed121b0c 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java @@ -118,7 +118,7 @@ public void testExplainLifecycle() throws Exception { assertThat(explainIndex.isManagedByLifecycle(), is(true)); assertThat(explainIndex.getIndexCreationDate(), notNullValue()); assertThat(explainIndex.getLifecycle(), notNullValue()); - assertThat(explainIndex.getLifecycle().getEffectiveDataRetention(), nullValue()); + assertThat(explainIndex.getLifecycle().getDataStreamRetention(), nullValue()); if (internalCluster().numDataNodes() > 1) { // If the number of nodes is 1 then the cluster will be yellow so forcemerge will report an error if it has run assertThat(explainIndex.getError(), nullValue()); @@ -175,7 +175,7 @@ public void testExplainLifecycle() throws Exception { assertThat(explainIndex.isManagedByLifecycle(), is(true)); assertThat(explainIndex.getIndexCreationDate(), notNullValue()); assertThat(explainIndex.getLifecycle(), notNullValue()); - assertThat(explainIndex.getLifecycle().getEffectiveDataRetention(), nullValue()); + assertThat(explainIndex.getLifecycle().getDataStreamRetention(), nullValue()); if (explainIndex.getIndex().equals(DataStream.getDefaultBackingIndexName(dataStreamName, 1))) { // first generation index was rolled over @@ -243,7 +243,7 @@ public void testExplainLifecycleForIndicesWithErrors() throws Exception { assertThat(explainIndex.isManagedByLifecycle(), is(true)); assertThat(explainIndex.getIndexCreationDate(), notNullValue()); assertThat(explainIndex.getLifecycle(), notNullValue()); - assertThat(explainIndex.getLifecycle().getEffectiveDataRetention(), nullValue()); + assertThat(explainIndex.getLifecycle().getDataStreamRetention(), nullValue()); assertThat(explainIndex.getRolloverDate(), nullValue()); assertThat(explainIndex.getTimeSinceRollover(System::currentTimeMillis), nullValue()); // index has not been rolled over yet diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/FailureStoreQueryParamIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/FailureStoreQueryParamIT.java new file mode 100644 index 0000000000000..a6b235e8d566f --- /dev/null +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/FailureStoreQueryParamIT.java @@ -0,0 +1,216 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.datastreams; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +/** + * This should be a yaml test, but in order to write one we would need to expose the new parameter in the rest-api-spec. + * We do not want to do that until the feature flag is removed. For this reason, we temporarily, test the affected APIs here. + * Please convert this to a yaml test when the feature flag is removed. + */ +public class FailureStoreQueryParamIT extends DisabledSecurityDataStreamTestCase { + + private static final String DATA_STREAM_NAME = "failure-data-stream"; + private String backingIndex; + private String failureStoreIndex; + + @SuppressWarnings("unchecked") + @Before + public void setup() throws IOException { + Request putComposableIndexTemplateRequest = new Request("POST", "/_index_template/ds-template"); + putComposableIndexTemplateRequest.setJsonEntity(""" + { + "index_patterns": ["failure-data-stream"], + "template": { + "settings": { + "number_of_replicas": 0 + } + }, + "data_stream": { + "failure_store": true + } + } + """); + assertOK(client().performRequest(putComposableIndexTemplateRequest)); + + assertOK(client().performRequest(new Request("PUT", "/_data_stream/" + DATA_STREAM_NAME))); + ensureGreen(DATA_STREAM_NAME); + + final Response dataStreamResponse = client().performRequest(new Request("GET", "/_data_stream/" + DATA_STREAM_NAME)); + List dataStreams = (List) entityAsMap(dataStreamResponse).get("data_streams"); + assertThat(dataStreams.size(), is(1)); + Map dataStream = (Map) dataStreams.get(0); + assertThat(dataStream.get("name"), equalTo(DATA_STREAM_NAME)); + List backingIndices = getBackingIndices(dataStream); + assertThat(backingIndices.size(), is(1)); + List failureStore = getFailureStore(dataStream); + assertThat(failureStore.size(), is(1)); + backingIndex = backingIndices.get(0); + failureStoreIndex = failureStore.get(0); + } + + public void testGetIndexApi() throws IOException { + { + final Response indicesResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME)); + Map indices = entityAsMap(indicesResponse); + assertThat(indices.size(), is(2)); + assertThat(indices.containsKey(backingIndex), is(true)); + assertThat(indices.containsKey(failureStoreIndex), is(true)); + } + { + final Response indicesResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME + "?failure_store=false")); + Map indices = entityAsMap(indicesResponse); + assertThat(indices.size(), is(1)); + assertThat(indices.containsKey(backingIndex), is(true)); + } + { + final Response indicesResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME + "?failure_store=only")); + Map indices = entityAsMap(indicesResponse); + assertThat(indices.size(), is(1)); + assertThat(indices.containsKey(failureStoreIndex), is(true)); + } + } + + @SuppressWarnings("unchecked") + public void testGetIndexStatsApi() throws IOException { + { + final Response statsResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME + "/_stats")); + Map indices = (Map) entityAsMap(statsResponse).get("indices"); + assertThat(indices.size(), is(1)); + assertThat(indices.containsKey(backingIndex), is(true)); + } + { + final Response statsResponse = client().performRequest( + new Request("GET", "/" + DATA_STREAM_NAME + "/_stats?failure_store=true") + ); + Map indices = (Map) entityAsMap(statsResponse).get("indices"); + assertThat(indices.size(), is(2)); + assertThat(indices.containsKey(backingIndex), is(true)); + assertThat(indices.containsKey(failureStoreIndex), is(true)); + } + { + final Response statsResponse = client().performRequest( + new Request("GET", "/" + DATA_STREAM_NAME + "/_stats?failure_store=only") + ); + Map indices = (Map) entityAsMap(statsResponse).get("indices"); + assertThat(indices.size(), is(1)); + assertThat(indices.containsKey(failureStoreIndex), is(true)); + } + } + + public void testGetIndexSettingsApi() throws IOException { + { + final Response indicesResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME + "/_settings")); + Map indices = entityAsMap(indicesResponse); + assertThat(indices.size(), is(1)); + assertThat(indices.containsKey(backingIndex), is(true)); + } + { + final Response indicesResponse = client().performRequest( + new Request("GET", "/" + DATA_STREAM_NAME + "/_settings?failure_store=true") + ); + Map indices = entityAsMap(indicesResponse); + assertThat(indices.size(), is(2)); + assertThat(indices.containsKey(backingIndex), is(true)); + assertThat(indices.containsKey(failureStoreIndex), is(true)); + } + { + final Response indicesResponse = client().performRequest( + new Request("GET", "/" + DATA_STREAM_NAME + "/_settings?failure_store=only") + ); + Map indices = entityAsMap(indicesResponse); + assertThat(indices.size(), is(1)); + assertThat(indices.containsKey(failureStoreIndex), is(true)); + } + } + + public void testGetIndexMappingApi() throws IOException { + { + final Response indicesResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME + "/_mapping")); + Map indices = entityAsMap(indicesResponse); + assertThat(indices.size(), is(1)); + assertThat(indices.containsKey(backingIndex), is(true)); + } + { + final Response indicesResponse = client().performRequest( + new Request("GET", "/" + DATA_STREAM_NAME + "/_mapping?failure_store=true") + ); + Map indices = entityAsMap(indicesResponse); + assertThat(indices.size(), is(2)); + assertThat(indices.containsKey(backingIndex), is(true)); + assertThat(indices.containsKey(failureStoreIndex), is(true)); + } + { + final Response indicesResponse = client().performRequest( + new Request("GET", "/" + DATA_STREAM_NAME + "/_mapping?failure_store=only") + ); + Map indices = entityAsMap(indicesResponse); + assertThat(indices.size(), is(1)); + assertThat(indices.containsKey(failureStoreIndex), is(true)); + } + } + + @SuppressWarnings("unchecked") + public void testPutIndexMappingApi() throws IOException { + { + final Request mappingRequest = new Request("PUT", "/" + DATA_STREAM_NAME + "/_mapping"); + mappingRequest.setJsonEntity(""" + { + "properties": { + "email": { + "type": "keyword" + } + } + } + """); + assertAcknowledged(client().performRequest(mappingRequest)); + } + { + final Request mappingRequest = new Request("PUT", "/" + DATA_STREAM_NAME + "/_mapping?failure_store=true"); + mappingRequest.setJsonEntity(""" + { + "properties": { + "email": { + "type": "keyword" + } + } + } + """); + ResponseException responseException = expectThrows(ResponseException.class, () -> client().performRequest(mappingRequest)); + Map response = entityAsMap(responseException.getResponse()); + assertThat(((Map) response.get("error")).get("reason"), is("failure index not supported")); + } + } + + private List getBackingIndices(Map response) { + return getIndices(response, "indices"); + } + + private List getFailureStore(Map response) { + return getIndices(response, "failure_indices"); + + } + + @SuppressWarnings("unchecked") + private List getIndices(Map response, String fieldName) { + List> indices = (List>) response.get(fieldName); + return indices.stream().map(index -> index.get("index_name")).toList(); + } +} diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java index 8b15d6a4b7bdf..1b875c28f7f43 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java @@ -822,38 +822,40 @@ private void maybeExecuteRollover(ClusterState state, DataStream dataStream) { * @return The set of indices that delete requests have been sent for */ private Set maybeExecuteRetention(ClusterState state, DataStream dataStream, Set indicesToExcludeForRemainingRun) { - TimeValue retention = getRetentionConfiguration(dataStream); + Metadata metadata = state.metadata(); + List backingIndicesOlderThanRetention = dataStream.getIndicesPastRetention(metadata::index, nowSupplier); + if (backingIndicesOlderThanRetention.isEmpty()) { + return Set.of(); + } Set indicesToBeRemoved = new HashSet<>(); - if (retention != null) { - Metadata metadata = state.metadata(); - List backingIndicesOlderThanRetention = dataStream.getIndicesPastRetention(metadata::index, nowSupplier); - - for (Index index : backingIndicesOlderThanRetention) { - if (indicesToExcludeForRemainingRun.contains(index) == false) { - IndexMetadata backingIndex = metadata.index(index); - assert backingIndex != null : "the data stream backing indices must exist"; - - IndexMetadata.DownsampleTaskStatus downsampleStatus = INDEX_DOWNSAMPLE_STATUS.get(backingIndex.getSettings()); - // we don't want to delete the source index if they have an in-progress downsampling operation because the - // target downsample index will remain in the system as a standalone index - if (downsampleStatus.equals(UNKNOWN)) { - indicesToBeRemoved.add(index); - - // there's an opportunity here to batch the delete requests (i.e. delete 100 indices / request) - // let's start simple and reevaluate - String indexName = backingIndex.getIndex().getName(); - deleteIndexOnce(indexName, "the lapsed [" + retention + "] retention period"); - } else { - // there's an opportunity here to cancel downsampling and delete the source index now - logger.trace( - "Data stream lifecycle skips deleting index [{}] even though its retention period [{}] has lapsed " - + "because there's a downsampling operation currently in progress for this index. Current downsampling " - + "status is [{}]. When downsampling completes, DSL will delete this index.", - index.getName(), - retention, - downsampleStatus - ); - } + // We know that there is lifecycle and retention because there are indices to be deleted + assert dataStream.getLifecycle() != null; + TimeValue effectiveDataRetention = dataStream.getLifecycle().getEffectiveDataRetention(); + for (Index index : backingIndicesOlderThanRetention) { + if (indicesToExcludeForRemainingRun.contains(index) == false) { + IndexMetadata backingIndex = metadata.index(index); + assert backingIndex != null : "the data stream backing indices must exist"; + + IndexMetadata.DownsampleTaskStatus downsampleStatus = INDEX_DOWNSAMPLE_STATUS.get(backingIndex.getSettings()); + // we don't want to delete the source index if they have an in-progress downsampling operation because the + // target downsample index will remain in the system as a standalone index + if (downsampleStatus.equals(UNKNOWN)) { + indicesToBeRemoved.add(index); + + // there's an opportunity here to batch the delete requests (i.e. delete 100 indices / request) + // let's start simple and reevaluate + String indexName = backingIndex.getIndex().getName(); + deleteIndexOnce(indexName, "the lapsed [" + effectiveDataRetention + "] retention period"); + } else { + // there's an opportunity here to cancel downsampling and delete the source index now + logger.trace( + "Data stream lifecycle skips deleting index [{}] even though its retention period [{}] has lapsed " + + "because there's a downsampling operation currently in progress for this index. Current downsampling " + + "status is [{}]. When downsampling completes, DSL will delete this index.", + index.getName(), + effectiveDataRetention, + downsampleStatus + ); } } } @@ -1222,14 +1224,6 @@ private static boolean isForceMergeComplete(IndexMetadata backingIndex) { return customMetadata != null && customMetadata.containsKey(FORCE_MERGE_COMPLETED_TIMESTAMP_METADATA_KEY); } - @Nullable - static TimeValue getRetentionConfiguration(DataStream dataStream) { - if (dataStream.getLifecycle() == null) { - return null; - } - return dataStream.getLifecycle().getEffectiveDataRetention(); - } - /** * @return the duration of the last run in millis or null if the service hasn't completed a run yet. */ diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java index c65854903f7a9..01ad1bb09b20f 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java @@ -314,7 +314,8 @@ public void testGetAdditionalIndexSettingsDataStreamAlreadyCreatedTimeSettingsMi IndexMode.TIME_SERIES, ds.getLifecycle(), ds.isFailureStore(), - ds.getFailureIndices() + ds.getFailureIndices(), + null ) ); Metadata metadata = mb.build(); diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java index e7339cc3f334a..d1e07aacaddce 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java @@ -151,7 +151,7 @@ public void testLifecycleComposition() { DataStreamLifecycle result = composeDataLifecycles(lifecycles); // Defaults to true assertThat(result.isEnabled(), equalTo(true)); - assertThat(result.getEffectiveDataRetention(), equalTo(lifecycle.getEffectiveDataRetention())); + assertThat(result.getDataStreamRetention(), equalTo(lifecycle.getDataStreamRetention())); assertThat(result.getDownsamplingRounds(), equalTo(lifecycle.getDownsamplingRounds())); } // If the last lifecycle is missing a property (apart from enabled) we keep the latest from the previous ones @@ -165,7 +165,7 @@ public void testLifecycleComposition() { List lifecycles = List.of(lifecycle, new DataStreamLifecycle()); DataStreamLifecycle result = composeDataLifecycles(lifecycles); assertThat(result.isEnabled(), equalTo(true)); - assertThat(result.getEffectiveDataRetention(), equalTo(lifecycle.getEffectiveDataRetention())); + assertThat(result.getDataStreamRetention(), equalTo(lifecycle.getDataStreamRetention())); assertThat(result.getDownsamplingRounds(), equalTo(lifecycle.getDownsamplingRounds())); } // If both lifecycle have all properties, then the latest one overwrites all the others @@ -183,7 +183,7 @@ public void testLifecycleComposition() { List lifecycles = List.of(lifecycle1, lifecycle2); DataStreamLifecycle result = composeDataLifecycles(lifecycles); assertThat(result.isEnabled(), equalTo(lifecycle2.isEnabled())); - assertThat(result.getEffectiveDataRetention(), equalTo(lifecycle2.getEffectiveDataRetention())); + assertThat(result.getDataStreamRetention(), equalTo(lifecycle2.getDataStreamRetention())); assertThat(result.getDownsamplingRounds(), equalTo(lifecycle2.getDownsamplingRounds())); } } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeServiceTests.java index dbb48ea3ddc26..abd5132edde16 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeServiceTests.java @@ -153,7 +153,8 @@ public void testUpdateTimeSeriesTemporalRange_NoUpdateBecauseReplicated() { d.getIndexMode(), d.getLifecycle(), d.isFailureStore(), - d.getFailureIndices() + d.getFailureIndices(), + null ) ) .build(); diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsResponseTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsResponseTests.java index 13054379dd666..e200ff7cba2e1 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsResponseTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsResponseTests.java @@ -89,7 +89,8 @@ public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Excepti IndexMode.STANDARD, new DataStreamLifecycle(), true, - failureStores + failureStores, + null ); String ilmPolicyName = "rollover-30days"; @@ -198,7 +199,8 @@ public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Excepti IndexMode.STANDARD, new DataStreamLifecycle(null, null, false), true, - failureStores + failureStores, + null ); String ilmPolicyName = "rollover-30days"; diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java index 15f526d0a06d6..d0456d669663d 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java @@ -295,7 +295,8 @@ public void testRetentionNotExecutedForTSIndicesWithinTimeBounds() { dataStream.getIndexMode(), DataStreamLifecycle.newBuilder().dataRetention(0L).build(), dataStream.isFailureStore(), - dataStream.getFailureIndices() + dataStream.getFailureIndices(), + null ) ); clusterState = ClusterState.builder(clusterState).metadata(builder).build(); diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml index 114d968eb5e6c..20eb33ecefdee 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml @@ -196,9 +196,10 @@ index without timestamp with pipeline: dynamic templates: - skip: version: " - 8.12.99" - features: "default_shards" reason: "Support for dynamic fields was added in 8.13" - do: + allowed_warnings: + - "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation" indices.put_index_template: name: my-dynamic-template body: @@ -326,9 +327,10 @@ dynamic templates: dynamic templates - conflicting aliases: - skip: version: " - 8.12.99" - features: "default_shards" reason: "Support for dynamic fields was added in 8.13" - do: + allowed_warnings: + - "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation" indices.put_index_template: name: my-dynamic-template body: @@ -422,9 +424,10 @@ dynamic templates - conflicting aliases: dynamic templates with nesting: - skip: version: " - 8.12.99" - features: "default_shards" reason: "Support for dynamic fields was added in 8.13" - do: + allowed_warnings: + - "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation" indices.put_index_template: name: my-dynamic-template body: @@ -556,14 +559,15 @@ dynamic templates with nesting: - match: { aggregations.filterA.tsids.buckets.0.doc_count: 2 } --- -dynamic templates - subobject in passthrough object error: +subobject in passthrough object auto flatten: - skip: version: " - 8.12.99" - reason: "Support for dynamic fields was added in 8.13" + reason: "Support for passthrough fields was added in 8.13" - do: - catch: /Tried to add subobject \[subcategory\] to object \[attributes\] which does not support subobjects/ + allowed_warnings: + - "index template [my-passthrough-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-passthrough-template] will take precedence during new index creation" indices.put_index_template: - name: my-dynamic-template + name: my-passthrough-template body: index_patterns: [k9s*] data_stream: {} @@ -576,13 +580,34 @@ dynamic templates - subobject in passthrough object error: properties: attributes: type: passthrough + time_series_dimension: true properties: subcategory: type: object properties: dim: type: keyword + - do: + indices.create_data_stream: + name: k9s + - is_true: acknowledged + # save the backing index names for later use + - do: + indices.get_data_stream: + name: k9s + - set: { data_streams.0.indices.0.index_name: idx0name } + + - do: + indices.get_mapping: + index: $idx0name + expand_wildcards: hidden + - match: { .$idx0name.mappings.properties.attributes.properties.subcategory\.dim.type: 'keyword' } +--- +enable subobjects in passthrough object: + - skip: + version: " - 8.12.99" + reason: "Support for passthrough fields was added in 8.13" - do: catch: /Mapping definition for \[attributes\] has unsupported parameters:\ \[subobjects \:\ true\]/ indices.put_index_template: diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/KeyValueProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/KeyValueProcessor.java index 8c90beed4d01c..0c6e37f675e1d 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/KeyValueProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/KeyValueProcessor.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.core.Predicates; import org.elasticsearch.ingest.AbstractProcessor; import org.elasticsearch.ingest.ConfigurationUtils; import org.elasticsearch.ingest.IngestDocument; @@ -100,7 +101,7 @@ private static Consumer buildExecution( final Predicate keyFilter; if (includeKeys == null) { if (excludeKeys == null) { - keyFilter = key -> true; + keyFilter = Predicates.always(); } else { keyFilter = key -> excludeKeys.contains(key) == false; } diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/UriPartsProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/UriPartsProcessor.java index 66e6df5fde58d..c476c6a9d3b9d 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/UriPartsProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/UriPartsProcessor.java @@ -140,9 +140,16 @@ private static Map getUriParts(URI uri, URL fallbackUrl) { } if (path != null) { uriParts.put("path", path); - if (path.contains(".")) { - int periodIndex = path.lastIndexOf('.'); - uriParts.put("extension", periodIndex < path.length() ? path.substring(periodIndex + 1) : ""); + // To avoid any issues with extracting the extension from a path that contains a dot, we explicitly extract the extension + // from the last segment in the path. + var lastSegmentIndex = path.lastIndexOf('/'); + if (lastSegmentIndex >= 0) { + var lastSegment = path.substring(lastSegmentIndex); + int periodIndex = lastSegment.lastIndexOf('.'); + if (periodIndex >= 0) { + // Don't include the dot in the extension field. + uriParts.put("extension", lastSegment.substring(periodIndex + 1)); + } } } if (port != -1) { diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/NetworkDirectionProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/NetworkDirectionProcessorTests.java index 7c53df0ca3f45..72e10e5ba3711 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/NetworkDirectionProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/NetworkDirectionProcessorTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -33,20 +34,10 @@ private Map buildEvent(String source) { } private Map buildEvent(String source, String destination) { - return new HashMap<>() { - { - put("source", new HashMap() { - { - put("ip", source); - } - }); - put("destination", new HashMap() { - { - put("ip", destination); - } - }); - } - }; + Map event = new HashMap<>(); + event.put("source", Collections.singletonMap("ip", source)); + event.put("destination", Collections.singletonMap("ip", destination)); + return event; } public void testNoInternalNetworks() throws Exception { diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RegisteredDomainProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RegisteredDomainProcessorTests.java index 3f706a8925810..7d6fa99f81580 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RegisteredDomainProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RegisteredDomainProcessorTests.java @@ -12,7 +12,6 @@ import org.elasticsearch.ingest.TestIngestDocument; import org.elasticsearch.test.ESTestCase; -import java.util.HashMap; import java.util.Map; import static org.hamcrest.Matchers.containsString; @@ -27,11 +26,7 @@ */ public class RegisteredDomainProcessorTests extends ESTestCase { private Map buildEvent(String domain) { - return new HashMap<>() { - { - put("domain", domain); - } - }; + return Map.of("domain", domain); } public void testBasic() throws Exception { diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/UriPartsProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/UriPartsProcessorTests.java index c7d3052eaa9f3..e7552d23d659a 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/UriPartsProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/UriPartsProcessorTests.java @@ -181,6 +181,31 @@ public void testUrlWithCharactersNotToleratedByUri() throws Exception { ); } + public void testDotPathWithoutExtension() throws Exception { + testUriParsing( + "https://www.google.com/path.withdot/filenamewithoutextension", + Map.of("scheme", "https", "domain", "www.google.com", "path", "/path.withdot/filenamewithoutextension") + ); + } + + public void testDotPathWithExtension() throws Exception { + testUriParsing( + "https://www.google.com/path.withdot/filenamewithextension.txt", + Map.of("scheme", "https", "domain", "www.google.com", "path", "/path.withdot/filenamewithextension.txt", "extension", "txt") + ); + } + + /** + * This test verifies that we return an empty extension instead of null if the URI ends with a period. This is probably + * not behaviour we necessarily want to keep forever, but this test ensures that we're conscious about changing that behaviour. + */ + public void testEmptyExtension() throws Exception { + testUriParsing( + "https://www.google.com/foo/bar.", + Map.of("scheme", "https", "domain", "www.google.com", "path", "/foo/bar.", "extension", "") + ); + } + public void testRemoveIfSuccessfulDoesNotRemoveTargetField() throws Exception { String field = "field"; UriPartsProcessor processor = new UriPartsProcessor(null, null, field, field, true, false, false); diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/320_uri_parts_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/320_uri_parts_processor.yml new file mode 100644 index 0000000000000..53512a4a505f2 --- /dev/null +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/320_uri_parts_processor.yml @@ -0,0 +1,49 @@ +--- +teardown: + - do: + ingest.delete_pipeline: + id: "uri-parts-pipeline" + ignore: 404 + +--- +"Test URI parts Processor": + - do: + ingest.put_pipeline: + id: "uri-parts-pipeline" + body: > + { + "processors": [ + { + "uri_parts" : { + "field" : "my_uri" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: "1" + pipeline: "uri-parts-pipeline" + body: { + my_uri: "https://user:pw@testing.google.com:8080/foo/bar.txt?foo1=bar1&foo2=bar2#anchorVal" + } + + - do: + get: + index: test + id: "1" + - match: { _source.my_uri: "https://user:pw@testing.google.com:8080/foo/bar.txt?foo1=bar1&foo2=bar2#anchorVal" } + - match: { _source.url.original: "https://user:pw@testing.google.com:8080/foo/bar.txt?foo1=bar1&foo2=bar2#anchorVal" } + - match: { _source.url.scheme: "https" } + - match: { _source.url.domain: "testing.google.com" } + - match: { _source.url.fragment: "anchorVal" } + - match: { _source.url.path: "/foo/bar.txt" } + - match: { _source.url.port: 8080 } + - match: { _source.url.username: "user" } + - match: { _source.url.password: "pw" } + - match: { _source.url.user_info: "user:pw" } + - match: { _source.url.query: "foo1=bar1&foo2=bar2" } + - match: { _source.url.extension: "txt" } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java index 615d1c37bf0cf..299e55d4d60a8 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java @@ -345,6 +345,7 @@ private void startTask(Runnable onFailure) { GEOIP_DOWNLOADER, GEOIP_DOWNLOADER, new GeoIpTaskParams(), + null, ActionListener.wrap(r -> logger.debug("Started geoip downloader task"), e -> { Throwable t = e instanceof RemoteTransportException ? e.getCause() : e; if (t instanceof ResourceAlreadyExistsException == false) { @@ -366,7 +367,7 @@ private void stopTask(Runnable onFailure) { } } ); - persistentTasksService.sendRemoveRequest(GEOIP_DOWNLOADER, ActionListener.runAfter(listener, () -> { + persistentTasksService.sendRemoveRequest(GEOIP_DOWNLOADER, null, ActionListener.runAfter(listener, () -> { IndexAbstraction databasesAbstraction = clusterService.state().metadata().getIndicesLookup().get(DATABASES_INDEX); if (databasesAbstraction != null) { // regardless of whether DATABASES_INDEX is an alias, resolve it to a concrete index diff --git a/modules/lang-painless/src/doc/java/org/elasticsearch/painless/ContextDocGenerator.java b/modules/lang-painless/src/doc/java/org/elasticsearch/painless/ContextDocGenerator.java index af702bd2e2fe3..d99a085d784b5 100644 --- a/modules/lang-painless/src/doc/java/org/elasticsearch/painless/ContextDocGenerator.java +++ b/modules/lang-painless/src/doc/java/org/elasticsearch/painless/ContextDocGenerator.java @@ -27,6 +27,7 @@ import java.nio.file.StandardOpenOption; import java.util.ArrayList; import java.util.Collections; +import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -176,7 +177,7 @@ private static void printSharedIndexPage( PrintStream sharedIndexStream = new PrintStream( Files.newOutputStream(sharedIndexPath, StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE), false, - StandardCharsets.UTF_8.name() + StandardCharsets.UTF_8 ) ) { @@ -205,7 +206,7 @@ private static void printContextIndexPage( PrintStream contextIndexStream = new PrintStream( Files.newOutputStream(contextIndexPath, StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE), false, - StandardCharsets.UTF_8.name() + StandardCharsets.UTF_8 ) ) { @@ -306,7 +307,7 @@ private static void printSharedPackagesPages( PrintStream sharedPackagesStream = new PrintStream( Files.newOutputStream(sharedClassesPath, StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE), false, - StandardCharsets.UTF_8.name() + StandardCharsets.UTF_8 ) ) { @@ -329,7 +330,7 @@ private static void printContextPackagesPages( PrintStream contextPackagesStream = new PrintStream( Files.newOutputStream(contextPackagesPath, StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE), false, - StandardCharsets.UTF_8.name() + StandardCharsets.UTF_8 ) ) { @@ -413,7 +414,7 @@ private static void printRootIndexPage(Path rootDir, List c PrintStream rootIndexStream = new PrintStream( Files.newOutputStream(rootIndexPath, StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE), false, - StandardCharsets.UTF_8.name() + StandardCharsets.UTF_8 ) ) { @@ -598,18 +599,7 @@ private static String getConstructorJavaDocLink(PainlessContextConstructorInfo c javaDocLink.append(constructorInfo.getDeclaring().replace('.', '/')); javaDocLink.append(".html#("); - for (int parameterIndex = 0; parameterIndex < constructorInfo.getParameters().size(); ++parameterIndex) { - - javaDocLink.append(getLinkType(constructorInfo.getParameters().get(parameterIndex))); - - if (parameterIndex + 1 < constructorInfo.getParameters().size()) { - javaDocLink.append(","); - } - } - - javaDocLink.append(")"); - - return javaDocLink.toString(); + return collectParameters(javaDocLink, constructorInfo.getParameters()); } private static String getMethodJavaDocLink(PainlessContextMethodInfo methodInfo) { @@ -621,11 +611,15 @@ private static String getMethodJavaDocLink(PainlessContextMethodInfo methodInfo) javaDocLink.append(methodInfo.getName()); javaDocLink.append("("); - for (int parameterIndex = 0; parameterIndex < methodInfo.getParameters().size(); ++parameterIndex) { + return collectParameters(javaDocLink, methodInfo.getParameters()); + } - javaDocLink.append(getLinkType(methodInfo.getParameters().get(parameterIndex))); + private static String collectParameters(StringBuilder javaDocLink, List parameters) { + for (int parameterIndex = 0; parameterIndex < parameters.size(); ++parameterIndex) { - if (parameterIndex + 1 < methodInfo.getParameters().size()) { + javaDocLink.append(getLinkType(parameters.get(parameterIndex))); + + if (parameterIndex + 1 < parameters.size()) { javaDocLink.append(","); } } @@ -708,32 +702,19 @@ private static List sortStaticInfos(Set staticExcludes, List(staticInfos); staticInfos.removeIf(staticExcludes::contains); - staticInfos.sort((si1, si2) -> { - String sv1; - String sv2; - - if (si1 instanceof PainlessContextMethodInfo) { - sv1 = ((PainlessContextMethodInfo) si1).getSortValue(); - } else if (si1 instanceof PainlessContextClassBindingInfo) { - sv1 = ((PainlessContextClassBindingInfo) si1).getSortValue(); - } else if (si1 instanceof PainlessContextInstanceBindingInfo) { - sv1 = ((PainlessContextInstanceBindingInfo) si1).getSortValue(); - } else { - throw new IllegalArgumentException("unexpected static info type"); - } - - if (si2 instanceof PainlessContextMethodInfo) { - sv2 = ((PainlessContextMethodInfo) si2).getSortValue(); - } else if (si2 instanceof PainlessContextClassBindingInfo) { - sv2 = ((PainlessContextClassBindingInfo) si2).getSortValue(); - } else if (si2 instanceof PainlessContextInstanceBindingInfo) { - sv2 = ((PainlessContextInstanceBindingInfo) si2).getSortValue(); + staticInfos.sort(Comparator.comparing(si -> { + String sv; + if (si instanceof PainlessContextMethodInfo) { + sv = ((PainlessContextMethodInfo) si).getSortValue(); + } else if (si instanceof PainlessContextClassBindingInfo) { + sv = ((PainlessContextClassBindingInfo) si).getSortValue(); + } else if (si instanceof PainlessContextInstanceBindingInfo) { + sv = ((PainlessContextInstanceBindingInfo) si).getSortValue(); } else { throw new IllegalArgumentException("unexpected static info type"); } - - return sv1.compareTo(sv2); - }); + return sv; + })); return staticInfos; } @@ -742,48 +723,9 @@ private static List sortClassInfos( Set classExcludes, List classInfos ) { - classInfos = new ArrayList<>(classInfos); - classInfos.removeIf( - v -> "void".equals(v.getName()) - || "boolean".equals(v.getName()) - || "byte".equals(v.getName()) - || "short".equals(v.getName()) - || "char".equals(v.getName()) - || "int".equals(v.getName()) - || "long".equals(v.getName()) - || "float".equals(v.getName()) - || "double".equals(v.getName()) - || "org.elasticsearch.painless.lookup.def".equals(v.getName()) - || isInternalClass(v.getName()) - || classExcludes.contains(v) - ); - - classInfos.sort((c1, c2) -> { - String n1 = c1.getName(); - String n2 = c2.getName(); - boolean i1 = c1.isImported(); - boolean i2 = c2.isImported(); - - String p1 = n1.substring(0, n1.lastIndexOf('.')); - String p2 = n2.substring(0, n2.lastIndexOf('.')); - - int compare = p1.compareTo(p2); - - if (compare == 0) { - if (i1 && i2) { - compare = n1.substring(n1.lastIndexOf('.') + 1).compareTo(n2.substring(n2.lastIndexOf('.') + 1)); - } else if (i1 == false && i2 == false) { - compare = n1.compareTo(n2); - } else { - compare = Boolean.compare(i1, i2) * -1; - } - } - - return compare; - }); - - return classInfos; + classInfos.removeIf(v -> ContextGeneratorCommon.isExcludedClassInfo(v) || classExcludes.contains(v)); + return ContextGeneratorCommon.sortFilteredClassInfos(classInfos); } private static Map getDisplayNames(List classInfos) { @@ -802,19 +744,5 @@ private static Map getDisplayNames(List getDisplayNames(Collection sortClassInfos(Collection unsortedClassInfos) { - List classInfos = new ArrayList<>(unsortedClassInfos); - classInfos.removeIf( - v -> "void".equals(v.getName()) - || "boolean".equals(v.getName()) - || "byte".equals(v.getName()) - || "short".equals(v.getName()) - || "char".equals(v.getName()) - || "int".equals(v.getName()) - || "long".equals(v.getName()) - || "float".equals(v.getName()) - || "double".equals(v.getName()) - || "org.elasticsearch.painless.lookup.def".equals(v.getName()) - || isInternalClass(v.getName()) - ); + classInfos.removeIf(ContextGeneratorCommon::isExcludedClassInfo); + return sortFilteredClassInfos(classInfos); + } + + static boolean isExcludedClassInfo(PainlessContextClassInfo v) { + return "void".equals(v.getName()) + || "boolean".equals(v.getName()) + || "byte".equals(v.getName()) + || "short".equals(v.getName()) + || "char".equals(v.getName()) + || "int".equals(v.getName()) + || "long".equals(v.getName()) + || "float".equals(v.getName()) + || "double".equals(v.getName()) + || "org.elasticsearch.painless.lookup.def".equals(v.getName()) + || isInternalClass(v.getName()); + } + static List sortFilteredClassInfos(List classInfos) { classInfos.sort((c1, c2) -> { String n1 = c1.getName(); String n2 = c2.getName(); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/Augmentation.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/Augmentation.java index 9267a8e963045..21f940efda5ac 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/Augmentation.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/Augmentation.java @@ -673,6 +673,10 @@ public static String sha256(String source) { return MessageDigests.toHexString(MessageDigests.sha256().digest(source.getBytes(StandardCharsets.UTF_8))); } + public static String sha512(String source) { + return MessageDigests.toHexString(MessageDigests.sha512().digest(source.getBytes(StandardCharsets.UTF_8))); + } + public static final int UNLIMITED_PATTERN_FACTOR = 0; public static final int DISABLED_PATTERN_FACTOR = -1; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/CIDR.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/CIDR.java index 8ce32e182cb18..c3e39b5905cdc 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/CIDR.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/CIDR.java @@ -8,10 +8,10 @@ package org.elasticsearch.painless.api; +import org.elasticsearch.common.network.CIDRUtils; import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.core.Tuple; -import java.net.InetAddress; import java.util.Arrays; /** @@ -28,7 +28,7 @@ public class CIDR { */ public CIDR(String cidr) { if (cidr.contains("/")) { - final Tuple range = getLowerUpper(InetAddresses.parseCidr(cidr)); + final Tuple range = CIDRUtils.getLowerUpper(InetAddresses.parseCidr(cidr)); lower = range.v1(); upper = range.v2(); } else { @@ -51,48 +51,13 @@ public boolean contains(String addressToCheck) { return isBetween(parsedAddress, lower, upper); } - private static Tuple getLowerUpper(Tuple cidr) { - final InetAddress value = cidr.v1(); - final Integer prefixLength = cidr.v2(); - - if (prefixLength < 0 || prefixLength > 8 * value.getAddress().length) { - throw new IllegalArgumentException( - "illegal prefixLength '" + prefixLength + "'. Must be 0-32 for IPv4 ranges, 0-128 for IPv6 ranges" - ); - } - - byte[] lower = value.getAddress(); - byte[] upper = value.getAddress(); - // Borrowed from Lucene - for (int i = prefixLength; i < 8 * lower.length; i++) { - int m = 1 << (7 - (i & 7)); - lower[i >> 3] &= (byte) ~m; - upper[i >> 3] |= (byte) m; - } - return new Tuple<>(lower, upper); - } - private static boolean isBetween(byte[] addr, byte[] lower, byte[] upper) { if (addr.length != lower.length) { - addr = encode(addr); - lower = encode(lower); - upper = encode(upper); + addr = CIDRUtils.encode(addr); + lower = CIDRUtils.encode(lower); + upper = CIDRUtils.encode(upper); } return Arrays.compareUnsigned(lower, addr) <= 0 && Arrays.compareUnsigned(upper, addr) >= 0; } - // Borrowed from Lucene to make this consistent IP fields matching for the mix of IPv4 and IPv6 values - // Modified signature to avoid extra conversions - private static byte[] encode(byte[] address) { - final byte[] IPV4_PREFIX = new byte[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, -1 }; - if (address.length == 4) { - byte[] mapped = new byte[16]; - System.arraycopy(IPV4_PREFIX, 0, mapped, 0, IPV4_PREFIX.length); - System.arraycopy(address, 0, mapped, IPV4_PREFIX.length, address.length); - address = mapped; - } else if (address.length != 16) { - throw new UnsupportedOperationException("Only IPv4 and IPv6 addresses are supported"); - } - return address; - } } diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.ingest.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.ingest.txt index 7f2282eaa714a..13678c4216d7a 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.ingest.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.ingest.txt @@ -11,6 +11,7 @@ class java.lang.String { String org.elasticsearch.painless.api.Augmentation sha1() String org.elasticsearch.painless.api.Augmentation sha256() + String org.elasticsearch.painless.api.Augmentation sha512() } class org.elasticsearch.painless.api.Json { diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.reindex.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.reindex.txt index a90d3525e1203..18d658d797b60 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.reindex.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.reindex.txt @@ -11,6 +11,7 @@ class java.lang.String { String org.elasticsearch.painless.api.Augmentation sha1() String org.elasticsearch.painless.api.Augmentation sha256() + String org.elasticsearch.painless.api.Augmentation sha512() } class org.elasticsearch.painless.api.Json { diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.update.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.update.txt index b58a8e720b21b..214fdaae26394 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.update.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.update.txt @@ -11,6 +11,7 @@ class java.lang.String { String org.elasticsearch.painless.api.Augmentation sha1() String org.elasticsearch.painless.api.Augmentation sha256() + String org.elasticsearch.painless.api.Augmentation sha512() } class org.elasticsearch.painless.api.Json { diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.update_by_query.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.update_by_query.txt index 7c0bf5b2985fe..6c569a165336b 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.update_by_query.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.update_by_query.txt @@ -11,6 +11,7 @@ class java.lang.String { String org.elasticsearch.painless.api.Augmentation sha1() String org.elasticsearch.painless.api.Augmentation sha256() + String org.elasticsearch.painless.api.Augmentation sha512() } class org.elasticsearch.painless.api.Json { diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/AugmentationTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/AugmentationTests.java index b51f0f2657278..e97bd1bb123ca 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/AugmentationTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/AugmentationTests.java @@ -293,6 +293,24 @@ public void testSha256() { assertEquals("97df3588b5a3f24babc3851b372f0ba71a9dcdded43b14b9d06961bfc1707d9d", execDigest("'foobarbaz'.sha256()")); } + public void testSha512() { + assertEquals( + "f7fbba6e0636f890e56fbbf3283e524c6fa3204ae298382d624741d0dc663832" + + "6e282c41be5e4254d8820772c5518a2c5a8c0c7f7eda19594a7eb539453e1ed7", + execDigest("'foo'.sha512()") + ); + assertEquals( + "d82c4eb5261cb9c8aa9855edd67d1bd10482f41529858d925094d173fa662aa9" + + "1ff39bc5b188615273484021dfb16fd8284cf684ccf0fc795be3aa2fc1e6c181", + execDigest("'bar'.sha512()") + ); + assertEquals( + "cb377c10b0f5a62c803625a799d9e908be45e767f5d147d4744907cb05597aa4" + + "edd329a0af147add0cf4181ed328fa1e7994265826b3ed3d7ef6f067ca99185a", + execDigest("'foobarbaz'.sha512()") + ); + } + public void testToEpochMilli() { assertEquals(0L, exec("ZonedDateTime.parse('1970-01-01T00:00:00Z').toEpochMilli()")); assertEquals(1602097376782L, exec("ZonedDateTime.parse('2020-10-07T19:02:56.782Z').toEpochMilli()")); diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldMapper.java index f63f290bf58fc..c058dddd8f875 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldMapper.java @@ -9,7 +9,6 @@ package org.elasticsearch.index.mapper.extras; import org.apache.lucene.document.FeatureField; -import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.FieldInfos; import org.apache.lucene.index.Term; import org.apache.lucene.search.Query; @@ -136,12 +135,7 @@ public Query existsQuery(SearchExecutionContext context) { @Override public boolean fieldHasValue(FieldInfos fieldInfos) { - for (FieldInfo fieldInfo : fieldInfos) { - if (fieldInfo.getName().equals(NAME)) { - return true; - } - } - return false; + return fieldInfos.fieldInfo(NAME) != null; } @Override diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureMetaFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureMetaFieldMapper.java index 07fe64c7466bd..c45065037b5a8 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureMetaFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureMetaFieldMapper.java @@ -8,7 +8,6 @@ package org.elasticsearch.index.mapper.extras; -import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.FieldInfos; import org.apache.lucene.search.Query; import org.elasticsearch.index.mapper.MappedFieldType; @@ -59,12 +58,7 @@ public Query existsQuery(SearchExecutionContext context) { @Override public boolean fieldHasValue(FieldInfos fieldInfos) { - for (FieldInfo fieldInfo : fieldInfos) { - if (fieldInfo.getName().equals(NAME)) { - return true; - } - } - return false; + return fieldInfos.fieldInfo(NAME) != null; } @Override diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/SourceConfirmedTextQuery.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/SourceConfirmedTextQuery.java index dc51afe5d420d..3d0f26e8cc130 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/SourceConfirmedTextQuery.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/SourceConfirmedTextQuery.java @@ -9,9 +9,7 @@ package org.elasticsearch.index.mapper.extras; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.FieldInvertState; -import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermStates; @@ -300,19 +298,23 @@ public RuntimePhraseScorer scorer(LeafReaderContext context) throws IOException @Override public Matches matches(LeafReaderContext context, int doc) throws IOException { - FieldInfo fi = context.reader().getFieldInfos().fieldInfo(field); - if (fi == null) { + var terms = context.reader().terms(field); + if (terms == null) { return null; } - // Some highlighters will already have reindexed the source with positions and offsets, + // Some highlighters will already have re-indexed the source with positions and offsets, // so rather than doing it again we check to see if this data is available on the // current context and if so delegate directly to the inner query - if (fi.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) > 0) { + if (terms.hasOffsets()) { Weight innerWeight = in.createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, 1); return innerWeight.matches(context, doc); } RuntimePhraseScorer scorer = scorer(context); - if (scorer == null || scorer.iterator().advance(doc) != doc) { + if (scorer == null) { + return null; + } + final TwoPhaseIterator twoPhase = scorer.twoPhaseIterator(); + if (twoPhase.approximation().advance(doc) != doc || scorer.twoPhaseIterator().matches() == false) { return null; } return scorer.matches(); @@ -321,13 +323,14 @@ public Matches matches(LeafReaderContext context, int doc) throws IOException { } private class RuntimePhraseScorer extends Scorer { - private final LeafSimScorer scorer; private final CheckedIntFunction, IOException> valueFetcher; private final String field; private final Query query; private final TwoPhaseIterator twoPhase; + private final MemoryIndexEntry cacheEntry = new MemoryIndexEntry(); + private int doc = -1; private float freq; @@ -357,7 +360,6 @@ public float matchCost() { // Defaults to a high-ish value so that it likely runs last. return 10_000f; } - }; } @@ -394,35 +396,35 @@ private float freq() throws IOException { return freq; } - private float computeFreq() throws IOException { - MemoryIndex index = new MemoryIndex(); - index.setSimilarity(FREQ_SIMILARITY); - List values = valueFetcher.apply(docID()); - float frequency = 0; - for (Object value : values) { - if (value == null) { - continue; + private MemoryIndex getOrCreateMemoryIndex() throws IOException { + if (cacheEntry.docID != docID()) { + cacheEntry.docID = docID(); + cacheEntry.memoryIndex = new MemoryIndex(true, false); + cacheEntry.memoryIndex.setSimilarity(FREQ_SIMILARITY); + List values = valueFetcher.apply(docID()); + for (Object value : values) { + if (value == null) { + continue; + } + cacheEntry.memoryIndex.addField(field, value.toString(), indexAnalyzer); } - index.addField(field, value.toString(), indexAnalyzer); - frequency += index.search(query); - index.reset(); } - return frequency; + return cacheEntry.memoryIndex; + } + + private float computeFreq() throws IOException { + return getOrCreateMemoryIndex().search(query); } private Matches matches() throws IOException { - MemoryIndex index = new MemoryIndex(true, false); - List values = valueFetcher.apply(docID()); - for (Object value : values) { - if (value == null) { - continue; - } - index.addField(field, value.toString(), indexAnalyzer); - } - IndexSearcher searcher = index.createSearcher(); + IndexSearcher searcher = getOrCreateMemoryIndex().createSearcher(); Weight w = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1); return w.matches(searcher.getLeafContexts().get(0), 0); } } + private static class MemoryIndexEntry { + private int docID = -1; + private MemoryIndex memoryIndex; + } } diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/SourceConfirmedTextQueryTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/SourceConfirmedTextQueryTests.java index 2b8d5870cb8aa..81e1dd7099860 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/SourceConfirmedTextQueryTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/SourceConfirmedTextQueryTests.java @@ -49,13 +49,19 @@ import java.io.IOException; import java.util.Collections; import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; public class SourceConfirmedTextQueryTests extends ESTestCase { + private static final AtomicInteger sourceFetchCount = new AtomicInteger(); private static final IOFunction, IOException>> SOURCE_FETCHER_PROVIDER = - context -> docID -> Collections.singletonList(context.reader().document(docID).get("body")); + context -> docID -> { + sourceFetchCount.incrementAndGet(); + return Collections.singletonList(context.reader().document(docID).get("body")); + }; public void testTerm() throws Exception { try (Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(Lucene.STANDARD_ANALYZER))) { @@ -440,11 +446,11 @@ public void testEmptyIndex() throws Exception { } public void testMatches() throws Exception { - checkMatches(new TermQuery(new Term("body", "d")), "a b c d e", new int[] { 3, 3 }); - checkMatches(new PhraseQuery("body", "b", "c"), "a b c d c b c a", new int[] { 1, 2, 5, 6 }); + checkMatches(new TermQuery(new Term("body", "d")), "a b c d e", new int[] { 3, 3 }, false); + checkMatches(new PhraseQuery("body", "b", "c"), "a b c d c b c a", new int[] { 1, 2, 5, 6 }, true); } - private static void checkMatches(Query query, String inputDoc, int[] expectedMatches) throws IOException { + private static void checkMatches(Query query, String inputDoc, int[] expectedMatches, boolean expectedFetch) throws IOException { try (Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(Lucene.STANDARD_ANALYZER))) { Document doc = new Document(); doc.add(new TextField("body", "xxxxxnomatchxxxx", Store.YES)); @@ -464,30 +470,48 @@ private static void checkMatches(Query query, String inputDoc, int[] expectedMat Query sourceConfirmedQuery = new SourceConfirmedTextQuery(query, SOURCE_FETCHER_PROVIDER, Lucene.STANDARD_ANALYZER); try (IndexReader ir = DirectoryReader.open(w)) { - - IndexSearcher searcher = new IndexSearcher(ir); - TopDocs td = searcher.search( - sourceConfirmedQuery, - 3, - new Sort(KeywordField.newSortField("sort", false, SortedSetSelector.Type.MAX)) - ); - - Weight weight = searcher.createWeight(searcher.rewrite(sourceConfirmedQuery), ScoreMode.COMPLETE_NO_SCORES, 1); - - int firstDoc = td.scoreDocs[0].doc; - LeafReaderContext firstCtx = searcher.getLeafContexts().get(ReaderUtil.subIndex(firstDoc, searcher.getLeafContexts())); - checkMatches(weight, firstCtx, firstDoc - firstCtx.docBase, expectedMatches, 0); - - int secondDoc = td.scoreDocs[1].doc; - LeafReaderContext secondCtx = searcher.getLeafContexts().get(ReaderUtil.subIndex(secondDoc, searcher.getLeafContexts())); - checkMatches(weight, secondCtx, secondDoc - secondCtx.docBase, expectedMatches, 1); - + { + IndexSearcher searcher = new IndexSearcher(ir); + TopDocs td = searcher.search( + sourceConfirmedQuery, + 3, + new Sort(KeywordField.newSortField("sort", false, SortedSetSelector.Type.MAX)) + ); + + Weight weight = searcher.createWeight(searcher.rewrite(sourceConfirmedQuery), ScoreMode.COMPLETE_NO_SCORES, 1); + + int firstDoc = td.scoreDocs[0].doc; + LeafReaderContext firstCtx = searcher.getLeafContexts().get(ReaderUtil.subIndex(firstDoc, searcher.getLeafContexts())); + checkMatches(weight, firstCtx, firstDoc - firstCtx.docBase, expectedMatches, 0, expectedFetch); + + int secondDoc = td.scoreDocs[1].doc; + LeafReaderContext secondCtx = searcher.getLeafContexts() + .get(ReaderUtil.subIndex(secondDoc, searcher.getLeafContexts())); + checkMatches(weight, secondCtx, secondDoc - secondCtx.docBase, expectedMatches, 1, expectedFetch); + } + + { + IndexSearcher searcher = new IndexSearcher(ir); + TopDocs td = searcher.search(KeywordField.newExactQuery("sort", "0"), 1); + + Weight weight = searcher.createWeight(searcher.rewrite(sourceConfirmedQuery), ScoreMode.COMPLETE_NO_SCORES, 1); + int firstDoc = td.scoreDocs[0].doc; + LeafReaderContext firstCtx = searcher.getLeafContexts().get(ReaderUtil.subIndex(firstDoc, searcher.getLeafContexts())); + checkMatches(weight, firstCtx, firstDoc - firstCtx.docBase, new int[0], 0, false); + } } } } - private static void checkMatches(Weight w, LeafReaderContext ctx, int doc, int[] expectedMatches, int offset) throws IOException { + private static void checkMatches(Weight w, LeafReaderContext ctx, int doc, int[] expectedMatches, int offset, boolean expectedFetch) + throws IOException { + int count = sourceFetchCount.get(); Matches matches = w.matches(ctx, doc); + if (expectedMatches.length == 0) { + assertNull(matches); + assertThat(sourceFetchCount.get() - count, equalTo(expectedFetch ? 1 : 0)); + return; + } assertNotNull(matches); MatchesIterator mi = matches.getMatches("body"); int i = 0; @@ -498,6 +522,7 @@ private static void checkMatches(Weight w, LeafReaderContext ctx, int doc, int[] i += 2; } assertEquals(expectedMatches.length, i); + assertThat(sourceFetchCount.get() - count, equalTo(expectedFetch ? 1 : 0)); } } diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java b/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java index d6b7ccad4f3c5..508e438932e68 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java @@ -12,6 +12,8 @@ import org.apache.lucene.document.SortedDocValuesField; import org.apache.lucene.document.StringField; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.logging.DeprecationCategory; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.NamedAnalyzer; @@ -52,6 +54,8 @@ */ public final class ParentJoinFieldMapper extends FieldMapper { + private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(ParentJoinFieldMapper.class); + public static final String NAME = "join"; public static final String CONTENT_TYPE = "join"; @@ -112,6 +116,13 @@ protected Parameter[] getParameters() { @Override public ParentJoinFieldMapper build(MapperBuilderContext context) { + if (multiFieldsBuilder.hasMultiFields()) { + DEPRECATION_LOGGER.warn( + DeprecationCategory.MAPPINGS, + CONTENT_TYPE + "_multifields", + "Adding multifields to [" + CONTENT_TYPE + "] mappers has no effect and will be forbidden in future" + ); + } checkObjectOrNested(context, name()); final Map parentIdFields = new HashMap<>(); relations.get() diff --git a/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/20_parent_join.yml b/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/20_parent_join.yml index 4f80e4bcb3b38..2ac3a8dd8315a 100644 --- a/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/20_parent_join.yml +++ b/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/20_parent_join.yml @@ -144,3 +144,20 @@ teardown: parent_id: type: child id: "1" + +--- +"deprecated use of multi-fields": + - skip: + version: " - 8.13.99" + reason: "deprecation added in 8.14" + features: warnings + + - do: + warnings: + - "Adding multifields to [join] mappers has no effect and will be forbidden in future" + indices.create: + index: join-multi-field + body: + mappings: + properties: + join_field: { "type": "join", "relations": { "parent": "child", "child": "grand_child" }, "fields": {"keyword": {"type": "keyword"}} } diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java index 7ba83f9ce71b5..b571766e12b8f 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java @@ -138,7 +138,7 @@ public PercolatorFieldMapper build(MapperBuilderContext context) { PercolatorFieldType fieldType = new PercolatorFieldType(context.buildFullName(name()), meta.getValue()); // TODO should percolator even allow multifields? MultiFields multiFields = multiFieldsBuilder.build(this, context); - context = context.createChildContext(name()); + context = context.createChildContext(name(), null); KeywordFieldMapper extractedTermsField = createExtractQueryFieldBuilder( EXTRACTED_TERMS_FIELD_NAME, context, diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index f58611cb0567a..388474acc75ea 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -21,7 +21,6 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.indices.recovery.RecoverySettings; -import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.blobstore.MeteredBlobStoreRepository; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -108,8 +107,7 @@ public AzureRepository( bigArrays, recoverySettings, buildBasePath(metadata), - buildLocation(metadata), - RepositoriesMetrics.NOOP + buildLocation(metadata) ); this.chunkSize = Repository.CHUNK_SIZE_SETTING.get(metadata.settings()); this.storageService = storageService; diff --git a/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java b/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java index 94d0abe17909f..e2338371cf837 100644 --- a/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java +++ b/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java @@ -19,7 +19,6 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.indices.recovery.RecoverySettings; -import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.blobstore.MeteredBlobStoreRepository; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -77,8 +76,7 @@ class GoogleCloudStorageRepository extends MeteredBlobStoreRepository { bigArrays, recoverySettings, buildBasePath(metadata), - buildLocation(metadata), - RepositoriesMetrics.NOOP + buildLocation(metadata) ); this.storageService = storageService; diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index 248ccc119794e..4080a47c7dabe 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -39,7 +39,6 @@ import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.PluginsService; -import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; @@ -460,9 +459,9 @@ protected S3Repository createRepository( ClusterService clusterService, BigArrays bigArrays, RecoverySettings recoverySettings, - RepositoriesMetrics repositoriesMetrics + S3RepositoriesMetrics s3RepositoriesMetrics ) { - return new S3Repository(metadata, registry, getService(), clusterService, bigArrays, recoverySettings, repositoriesMetrics) { + return new S3Repository(metadata, registry, getService(), clusterService, bigArrays, recoverySettings, s3RepositoriesMetrics) { @Override public BlobStore blobStore() { diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java index f182b54b0c696..b8fea485c6276 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java @@ -30,7 +30,6 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.repositories.AbstractThirdPartyRepositoryTestCase; -import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.fixtures.minio.MinioTestContainer; @@ -145,7 +144,7 @@ public long absoluteTimeInMillis() { ClusterServiceUtils.createClusterService(threadpool), BigArrays.NON_RECYCLING_INSTANCE, new RecoverySettings(node().settings(), node().injector().getInstance(ClusterService.class).getClusterSettings()), - RepositoriesMetrics.NOOP + S3RepositoriesMetrics.NOOP ) ) { repository.start(); diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java index 78b1e2dba98b3..6b9937b01a433 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java @@ -33,7 +33,6 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; @@ -84,7 +83,7 @@ class S3BlobStore implements BlobStore { private final ThreadPool threadPool; private final Executor snapshotExecutor; - private final RepositoriesMetrics repositoriesMetrics; + private final S3RepositoriesMetrics s3RepositoriesMetrics; private final StatsCollectors statsCollectors = new StatsCollectors(); @@ -98,7 +97,7 @@ class S3BlobStore implements BlobStore { RepositoryMetadata repositoryMetadata, BigArrays bigArrays, ThreadPool threadPool, - RepositoriesMetrics repositoriesMetrics + S3RepositoriesMetrics s3RepositoriesMetrics ) { this.service = service; this.bigArrays = bigArrays; @@ -110,7 +109,7 @@ class S3BlobStore implements BlobStore { this.repositoryMetadata = repositoryMetadata; this.threadPool = threadPool; this.snapshotExecutor = threadPool.executor(ThreadPool.Names.SNAPSHOT); - this.repositoriesMetrics = repositoriesMetrics; + this.s3RepositoriesMetrics = s3RepositoriesMetrics; } RequestMetricCollector getMetricCollector(Operation operation, OperationPurpose purpose) { @@ -174,19 +173,19 @@ public final void collectMetrics(Request request, Response response) { .map(List::size) .orElse(0); - repositoriesMetrics.operationCounter().incrementBy(1, attributes); + s3RepositoriesMetrics.common().operationCounter().incrementBy(1, attributes); if (numberOfAwsErrors == requestCount) { - repositoriesMetrics.unsuccessfulOperationCounter().incrementBy(1, attributes); + s3RepositoriesMetrics.common().unsuccessfulOperationCounter().incrementBy(1, attributes); } - repositoriesMetrics.requestCounter().incrementBy(requestCount, attributes); + s3RepositoriesMetrics.common().requestCounter().incrementBy(requestCount, attributes); if (exceptionCount > 0) { - repositoriesMetrics.exceptionCounter().incrementBy(exceptionCount, attributes); - repositoriesMetrics.exceptionHistogram().record(exceptionCount, attributes); + s3RepositoriesMetrics.common().exceptionCounter().incrementBy(exceptionCount, attributes); + s3RepositoriesMetrics.common().exceptionHistogram().record(exceptionCount, attributes); } if (throttleCount > 0) { - repositoriesMetrics.throttleCounter().incrementBy(throttleCount, attributes); - repositoriesMetrics.throttleHistogram().record(throttleCount, attributes); + s3RepositoriesMetrics.common().throttleCounter().incrementBy(throttleCount, attributes); + s3RepositoriesMetrics.common().throttleHistogram().record(throttleCount, attributes); } maybeRecordHttpRequestTime(request); } @@ -207,7 +206,7 @@ private void maybeRecordHttpRequestTime(Request request) { if (totalTimeInMicros == 0) { logger.warn("Expected HttpRequestTime to be tracked for request [{}] but found no count.", request); } else { - repositoriesMetrics.httpRequestTimeInMicroHistogram().record(totalTimeInMicros, attributes); + s3RepositoriesMetrics.common().httpRequestTimeInMicroHistogram().record(totalTimeInMicros, attributes); } } @@ -293,6 +292,14 @@ public long bufferSizeInBytes() { return bufferSize.getBytes(); } + public RepositoryMetadata getRepositoryMetadata() { + return repositoryMetadata; + } + + public S3RepositoriesMetrics getS3RepositoriesMetrics() { + return s3RepositoriesMetrics; + } + @Override public BlobContainer blobContainer(BlobPath path) { return new S3BlobContainer(path, this); diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoriesMetrics.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoriesMetrics.java new file mode 100644 index 0000000000000..e025214998d5b --- /dev/null +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoriesMetrics.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.repositories.s3; + +import org.elasticsearch.repositories.RepositoriesMetrics; +import org.elasticsearch.telemetry.metric.LongCounter; +import org.elasticsearch.telemetry.metric.LongHistogram; + +public record S3RepositoriesMetrics( + RepositoriesMetrics common, + LongCounter retryStartedCounter, + LongCounter retryCompletedCounter, + LongHistogram retryHistogram +) { + + public static S3RepositoriesMetrics NOOP = new S3RepositoriesMetrics(RepositoriesMetrics.NOOP); + + public static final String METRIC_RETRY_EVENT_TOTAL = "es.repositories.s3.input_stream.retry.event.total"; + public static final String METRIC_RETRY_SUCCESS_TOTAL = "es.repositories.s3.input_stream.retry.success.total"; + public static final String METRIC_RETRY_ATTEMPTS_HISTOGRAM = "es.repositories.s3.input_stream.retry.attempts.histogram"; + + public S3RepositoriesMetrics(RepositoriesMetrics common) { + this( + common, + common.meterRegistry().registerLongCounter(METRIC_RETRY_EVENT_TOTAL, "s3 input stream retry event count", "unit"), + common.meterRegistry().registerLongCounter(METRIC_RETRY_SUCCESS_TOTAL, "s3 input stream retry success count", "unit"), + common.meterRegistry() + .registerLongHistogram(METRIC_RETRY_ATTEMPTS_HISTOGRAM, "s3 input stream retry attempts histogram", "unit") + ); + } +} diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index 624867a2f0c41..26b1b1158dea0 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -31,7 +31,6 @@ import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.repositories.FinalizeSnapshotContext; -import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.blobstore.MeteredBlobStoreRepository; @@ -195,6 +194,8 @@ class S3Repository extends MeteredBlobStoreRepository { private final Executor snapshotExecutor; + private final S3RepositoriesMetrics s3RepositoriesMetrics; + /** * Constructs an s3 backed repository */ @@ -205,7 +206,7 @@ class S3Repository extends MeteredBlobStoreRepository { final ClusterService clusterService, final BigArrays bigArrays, final RecoverySettings recoverySettings, - final RepositoriesMetrics repositoriesMetrics + final S3RepositoriesMetrics s3RepositoriesMetrics ) { super( metadata, @@ -214,10 +215,10 @@ class S3Repository extends MeteredBlobStoreRepository { bigArrays, recoverySettings, buildBasePath(metadata), - buildLocation(metadata), - repositoriesMetrics + buildLocation(metadata) ); this.service = service; + this.s3RepositoriesMetrics = s3RepositoriesMetrics; this.snapshotExecutor = threadPool().executor(ThreadPool.Names.SNAPSHOT); // Parse and validate the user's S3 Storage Class setting @@ -408,7 +409,7 @@ protected S3BlobStore createBlobStore() { metadata, bigArrays, threadPool, - repositoriesMetrics + s3RepositoriesMetrics ); } diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java index 83668cc271922..26047c3b416a7 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java @@ -78,9 +78,9 @@ protected S3Repository createRepository( final ClusterService clusterService, final BigArrays bigArrays, final RecoverySettings recoverySettings, - final RepositoriesMetrics repositoriesMetrics + final S3RepositoriesMetrics s3RepositoriesMetrics ) { - return new S3Repository(metadata, registry, service.get(), clusterService, bigArrays, recoverySettings, repositoriesMetrics); + return new S3Repository(metadata, registry, service.get(), clusterService, bigArrays, recoverySettings, s3RepositoriesMetrics); } @Override @@ -101,11 +101,12 @@ public Map getRepositories( final ClusterService clusterService, final BigArrays bigArrays, final RecoverySettings recoverySettings, - RepositoriesMetrics repositoriesMetrics + final RepositoriesMetrics repositoriesMetrics ) { + final S3RepositoriesMetrics s3RepositoriesMetrics = new S3RepositoriesMetrics(repositoriesMetrics); return Collections.singletonMap( S3Repository.TYPE, - metadata -> createRepository(metadata, registry, clusterService, bigArrays, recoverySettings, repositoriesMetrics) + metadata -> createRepository(metadata, registry, clusterService, bigArrays, recoverySettings, s3RepositoriesMetrics) ); } diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java index c457b9d51e8b9..f7a99a399f59f 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java @@ -27,6 +27,7 @@ import java.nio.file.NoSuchFileException; import java.util.ArrayList; import java.util.List; +import java.util.Map; import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.repositories.s3.S3BlobStore.configureRequestForMetrics; @@ -80,7 +81,7 @@ class S3RetryingInputStream extends InputStream { this.end = end; final int initialAttempt = attempt; openStreamWithRetry(); - maybeLogForSuccessAfterRetries(initialAttempt, "opened"); + maybeLogAndRecordMetricsForSuccess(initialAttempt, "open"); } private void openStreamWithRetry() throws IOException { @@ -105,6 +106,9 @@ private void openStreamWithRetry() throws IOException { ); } + if (attempt == 1) { + blobStore.getS3RepositoriesMetrics().retryStartedCounter().incrementBy(1, metricAttributes("open")); + } final long delayInMillis = maybeLogAndComputeRetryDelay("opening", e); delayBeforeRetry(delayInMillis); } @@ -142,9 +146,12 @@ public int read() throws IOException { } else { currentOffset += 1; } - maybeLogForSuccessAfterRetries(initialAttempt, "read"); + maybeLogAndRecordMetricsForSuccess(initialAttempt, "read"); return result; } catch (IOException e) { + if (attempt == initialAttempt) { + blobStore.getS3RepositoriesMetrics().retryStartedCounter().incrementBy(1, metricAttributes("read")); + } reopenStreamOrFail(e); } } @@ -162,9 +169,12 @@ public int read(byte[] b, int off, int len) throws IOException { } else { currentOffset += bytesRead; } - maybeLogForSuccessAfterRetries(initialAttempt, "read"); + maybeLogAndRecordMetricsForSuccess(initialAttempt, "read"); return bytesRead; } catch (IOException e) { + if (attempt == initialAttempt) { + blobStore.getS3RepositoriesMetrics().retryStartedCounter().incrementBy(1, metricAttributes("read")); + } reopenStreamOrFail(e); } } @@ -246,16 +256,20 @@ private void logForRetry(Level level, String action, Exception e) { ); } - private void maybeLogForSuccessAfterRetries(int initialAttempt, String action) { + private void maybeLogAndRecordMetricsForSuccess(int initialAttempt, String action) { if (attempt > initialAttempt) { + final int numberOfRetries = attempt - initialAttempt; logger.info( "successfully {} input stream for [{}/{}] with purpose [{}] after [{}] retries", action, blobStore.bucket(), blobKey, purpose.getKey(), - attempt - initialAttempt + numberOfRetries ); + final Map attributes = metricAttributes(action); + blobStore.getS3RepositoriesMetrics().retryCompletedCounter().incrementBy(1, attributes); + blobStore.getS3RepositoriesMetrics().retryHistogram().record(numberOfRetries, attributes); } } @@ -294,6 +308,21 @@ protected long getRetryDelayInMillis() { return 10L << (Math.min(attempt - 1, 10)); } + private Map metricAttributes(String action) { + return Map.of( + "repo_type", + S3Repository.TYPE, + "repo_name", + blobStore.getRepositoryMetadata().name(), + "operation", + Operation.GET_OBJECT.getKey(), + "purpose", + purpose.getKey(), + "action", + action + ); + } + @Override public void close() throws IOException { maybeAbort(currentStream); diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java index 28a48c2968f59..cf3bc21526bf6 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java @@ -26,7 +26,6 @@ import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.PluginsService; -import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.rest.AbstractRestChannel; import org.elasticsearch.rest.RestRequest; @@ -264,9 +263,9 @@ protected S3Repository createRepository( ClusterService clusterService, BigArrays bigArrays, RecoverySettings recoverySettings, - RepositoriesMetrics repositoriesMetrics + S3RepositoriesMetrics s3RepositoriesMetrics ) { - return new S3Repository(metadata, registry, getService(), clusterService, bigArrays, recoverySettings, repositoriesMetrics) { + return new S3Repository(metadata, registry, getService(), clusterService, bigArrays, recoverySettings, s3RepositoriesMetrics) { @Override protected void assertSnapshotOrGenericThread() { // eliminate thread name check as we create repo manually on test/main threads diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java index 0ddd29171b3bd..05268d750637c 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java @@ -43,6 +43,9 @@ import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.blobstore.AbstractBlobContainerRetriesTestCase; import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil; +import org.elasticsearch.telemetry.InstrumentType; +import org.elasticsearch.telemetry.Measurement; +import org.elasticsearch.telemetry.RecordingMeterRegistry; import org.elasticsearch.watcher.ResourceWatcherService; import org.hamcrest.Matcher; import org.junit.After; @@ -59,7 +62,9 @@ import java.nio.charset.StandardCharsets; import java.nio.file.NoSuchFileException; import java.util.Arrays; +import java.util.List; import java.util.Locale; +import java.util.Map; import java.util.Objects; import java.util.OptionalInt; import java.util.concurrent.atomic.AtomicBoolean; @@ -74,10 +79,13 @@ import static org.elasticsearch.repositories.s3.S3ClientSettings.READ_TIMEOUT_SETTING; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThan; @@ -91,6 +99,7 @@ public class S3BlobContainerRetriesTests extends AbstractBlobContainerRetriesTes private S3Service service; private AtomicBoolean shouldErrorOnDns; + private RecordingMeterRegistry recordingMeterRegistry; @Before public void setUp() throws Exception { @@ -109,6 +118,7 @@ protected AmazonS3ClientBuilder buildClientBuilder(S3ClientSettings clientSettin return builder; } }; + recordingMeterRegistry = new RecordingMeterRegistry(); super.setUp(); } @@ -185,7 +195,7 @@ protected BlobContainer createBlobContainer( repositoryMetadata, BigArrays.NON_RECYCLING_INSTANCE, new DeterministicTaskQueue().getThreadPool(), - RepositoriesMetrics.NOOP + new S3RepositoriesMetrics(new RepositoriesMetrics(recordingMeterRegistry)) ); return new S3BlobContainer(randomBoolean() ? BlobPath.EMPTY : BlobPath.EMPTY.add("foo"), s3BlobStore) { @Override @@ -669,8 +679,8 @@ public void handle(HttpExchange exchange) throws IOException { } exchange.getResponseBody().write(bytes, rangeStart, length); } else { - failures.incrementAndGet(); if (randomBoolean()) { + failures.incrementAndGet(); exchange.sendResponseHeaders( randomFrom( HttpStatus.SC_INTERNAL_SERVER_ERROR, @@ -686,6 +696,8 @@ public void handle(HttpExchange exchange) throws IOException { if (bytesSent >= meaningfulProgressBytes) { exchange.getResponseBody().flush(); } + } else { + failures.incrementAndGet(); } } } @@ -700,16 +712,28 @@ public void handle(HttpExchange exchange) throws IOException { final int length = between(0, randomBoolean() ? bytes.length : Integer.MAX_VALUE); logger.info("--> position={}, length={}", position, length); try (InputStream inputStream = blobContainer.readBlob(OperationPurpose.INDICES, "read_blob_retries_forever", position, length)) { + assertMetricsForOpeningStream(); + recordingMeterRegistry.getRecorder().resetCalls(); + failures.set(0); + final byte[] bytesRead = BytesReference.toBytes(Streams.readFully(inputStream)); assertArrayEquals(Arrays.copyOfRange(bytes, position, Math.min(bytes.length, position + length)), bytesRead); + assertMetricsForReadingStream(); } assertThat(failures.get(), greaterThan(totalFailures)); // Read the whole blob failures.set(0); + recordingMeterRegistry.getRecorder().resetCalls(); try (InputStream inputStream = blobContainer.readBlob(OperationPurpose.INDICES, "read_blob_retries_forever")) { + assertMetricsForOpeningStream(); + recordingMeterRegistry.getRecorder().resetCalls(); + failures.set(0); + final byte[] bytesRead = BytesReference.toBytes(Streams.readFully(inputStream)); assertArrayEquals(bytes, bytesRead); + + assertMetricsForReadingStream(); } assertThat(failures.get(), greaterThan(totalFailures)); } @@ -737,9 +761,13 @@ public void handle(HttpExchange exchange) throws IOException { : blobContainer.readBlob(randomRetryingPurpose(), "read_blob_not_found", between(0, 100), between(1, 100)) ) { Streams.readFully(inputStream); + } }); assertThat(numberOfReads.get(), equalTo(1)); + assertThat(getRetryStartedMeasurements(), empty()); + assertThat(getRetryCompletedMeasurements(), empty()); + assertThat(getRetryHistogramMeasurements(), empty()); } @Override @@ -761,6 +789,77 @@ protected OperationPurpose randomFiniteRetryingPurpose() { ); } + private void assertMetricsForOpeningStream() { + final long numberOfOperations = getOperationMeasurements(); + // S3 client sdk internally also retries within the configured maxRetries for retryable errors. + // The retries in S3RetryingInputStream are triggered when the client internal retries are unsuccessful + if (numberOfOperations > 1) { + // For opening the stream, there should be exactly one pair of started and completed records. + // There should be one histogram record, the number of retries must be greater than 0 + final Map attributes = metricAttributes("open"); + assertThat(getRetryStartedMeasurements(), contains(new Measurement(1L, attributes, false))); + assertThat(getRetryCompletedMeasurements(), contains(new Measurement(1L, attributes, false))); + final List retryHistogramMeasurements = getRetryHistogramMeasurements(); + assertThat(retryHistogramMeasurements, hasSize(1)); + assertThat(retryHistogramMeasurements.get(0).getLong(), equalTo(numberOfOperations - 1)); + assertThat(retryHistogramMeasurements.get(0).attributes(), equalTo(attributes)); + } else { + assertThat(getRetryStartedMeasurements(), empty()); + assertThat(getRetryCompletedMeasurements(), empty()); + assertThat(getRetryHistogramMeasurements(), empty()); + } + } + + private void assertMetricsForReadingStream() { + // For reading the stream, there could be multiple pairs of started and completed records. + // It is important that they always come in pairs and the number of pairs match the number + // of histogram records. + final Map attributes = metricAttributes("read"); + final List retryHistogramMeasurements = getRetryHistogramMeasurements(); + final int numberOfReads = retryHistogramMeasurements.size(); + retryHistogramMeasurements.forEach(measurement -> { + assertThat(measurement.getLong(), greaterThan(0L)); + assertThat(measurement.attributes(), equalTo(attributes)); + }); + + final List retryStartedMeasurements = getRetryStartedMeasurements(); + assertThat(retryStartedMeasurements, hasSize(1)); + assertThat(retryStartedMeasurements.get(0).getLong(), equalTo((long) numberOfReads)); + assertThat(retryStartedMeasurements.get(0).attributes(), equalTo(attributes)); + assertThat(retryStartedMeasurements, equalTo(getRetryCompletedMeasurements())); + } + + private long getOperationMeasurements() { + final List operationMeasurements = Measurement.combine( + recordingMeterRegistry.getRecorder().getMeasurements(InstrumentType.LONG_COUNTER, RepositoriesMetrics.METRIC_OPERATIONS_TOTAL) + ); + assertThat(operationMeasurements, hasSize(1)); + return operationMeasurements.get(0).getLong(); + } + + private List getRetryStartedMeasurements() { + return Measurement.combine( + recordingMeterRegistry.getRecorder() + .getMeasurements(InstrumentType.LONG_COUNTER, S3RepositoriesMetrics.METRIC_RETRY_EVENT_TOTAL) + ); + } + + private List getRetryCompletedMeasurements() { + return Measurement.combine( + recordingMeterRegistry.getRecorder() + .getMeasurements(InstrumentType.LONG_COUNTER, S3RepositoriesMetrics.METRIC_RETRY_SUCCESS_TOTAL) + ); + } + + private List getRetryHistogramMeasurements() { + return recordingMeterRegistry.getRecorder() + .getMeasurements(InstrumentType.LONG_HISTOGRAM, S3RepositoriesMetrics.METRIC_RETRY_ATTEMPTS_HISTOGRAM); + } + + private Map metricAttributes(String action) { + return Map.of("repo_type", "s3", "repo_name", "repository", "operation", "GetObject", "purpose", "Indices", "action", action); + } + /** * Asserts that an InputStream is fully consumed, or aborted, when it is closed */ diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java index 0a92ed0a28973..50470ec499ef6 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java @@ -18,7 +18,6 @@ import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.env.Environment; import org.elasticsearch.indices.recovery.RecoverySettings; -import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil; import org.elasticsearch.test.ESTestCase; @@ -130,7 +129,7 @@ private S3Repository createS3Repo(RepositoryMetadata metadata) { BlobStoreTestUtil.mockClusterService(), MockBigArrays.NON_RECYCLING_INSTANCE, new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), - RepositoriesMetrics.NOOP + S3RepositoriesMetrics.NOOP ) { @Override protected void assertSnapshotOrGenericThread() { diff --git a/modules/systemd/build.gradle b/modules/systemd/build.gradle index 0f5c2a4c2fb19..351211ffd3c0e 100644 --- a/modules/systemd/build.gradle +++ b/modules/systemd/build.gradle @@ -11,3 +11,7 @@ esplugin { classname 'org.elasticsearch.systemd.SystemdPlugin' } +dependencies { + implementation project(':libs:elasticsearch-native') +} + diff --git a/modules/systemd/src/main/java/module-info.java b/modules/systemd/src/main/java/module-info.java index bd92851fde3a6..b3f5b64ff312f 100644 --- a/modules/systemd/src/main/java/module-info.java +++ b/modules/systemd/src/main/java/module-info.java @@ -12,5 +12,5 @@ requires org.elasticsearch.xcontent; requires org.apache.logging.log4j; requires org.apache.lucene.core; - requires com.sun.jna; + requires org.elasticsearch.nativeaccess; } diff --git a/modules/systemd/src/main/java/org/elasticsearch/systemd/Libsystemd.java b/modules/systemd/src/main/java/org/elasticsearch/systemd/Libsystemd.java deleted file mode 100644 index ba34a18c83e37..0000000000000 --- a/modules/systemd/src/main/java/org/elasticsearch/systemd/Libsystemd.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.systemd; - -import com.sun.jna.Native; - -import java.security.AccessController; -import java.security.PrivilegedAction; - -/** - * Provides access to the native method sd_notify from libsystemd. - */ -class Libsystemd { - - static { - AccessController.doPrivileged((PrivilegedAction) () -> { - Native.register(Libsystemd.class, "libsystemd.so.0"); - return null; - }); - } - - /** - * Notify systemd of state changes. - * - * @param unset_environment if non-zero, the NOTIFY_SOCKET environment variable will be unset before returning and further calls to - * sd_notify will fail - * @param state a new-line separated list of variable assignments; some assignments are understood directly by systemd - * @return a negative error code on failure, and positive if status was successfully sent - */ - static native int sd_notify(int unset_environment, String state); - -} diff --git a/modules/systemd/src/main/java/org/elasticsearch/systemd/SystemdPlugin.java b/modules/systemd/src/main/java/org/elasticsearch/systemd/SystemdPlugin.java index e3dca57472ade..947d1fa58e963 100644 --- a/modules/systemd/src/main/java/org/elasticsearch/systemd/SystemdPlugin.java +++ b/modules/systemd/src/main/java/org/elasticsearch/systemd/SystemdPlugin.java @@ -14,6 +14,8 @@ import org.elasticsearch.Build; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.nativeaccess.NativeAccess; +import org.elasticsearch.nativeaccess.Systemd; import org.elasticsearch.plugins.ClusterPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.threadpool.Scheduler; @@ -26,6 +28,7 @@ public class SystemdPlugin extends Plugin implements ClusterPlugin { private static final Logger logger = LogManager.getLogger(SystemdPlugin.class); private final boolean enabled; + private final Systemd systemd; final boolean isEnabled() { return enabled; @@ -44,18 +47,21 @@ public SystemdPlugin() { } if (isPackageDistribution == false) { logger.debug("disabling sd_notify as the build type [{}] is not a package distribution", buildType); - enabled = false; + this.enabled = false; + this.systemd = null; return; } logger.trace("ES_SD_NOTIFY is set to [{}]", esSDNotify); if (esSDNotify == null) { - enabled = false; + this.enabled = false; + this.systemd = null; return; } if (Boolean.TRUE.toString().equals(esSDNotify) == false && Boolean.FALSE.toString().equals(esSDNotify) == false) { throw new RuntimeException("ES_SD_NOTIFY set to unexpected value [" + esSDNotify + "]"); } - enabled = Boolean.TRUE.toString().equals(esSDNotify); + this.enabled = Boolean.TRUE.toString().equals(esSDNotify); + this.systemd = enabled ? NativeAccess.instance().systemd() : null; } private final SetOnce extender = new SetOnce<>(); @@ -77,19 +83,25 @@ public Collection createComponents(PluginServices services) { * Therefore, every fifteen seconds we send systemd a message via sd_notify to extend the timeout by thirty seconds. We will cancel * this scheduled task after we successfully notify systemd that we are ready. */ - extender.set(services.threadPool().scheduleWithFixedDelay(() -> { - final int rc = sd_notify(0, "EXTEND_TIMEOUT_USEC=30000000"); - if (rc < 0) { - logger.warn("extending startup timeout via sd_notify failed with [{}]", rc); - } - }, TimeValue.timeValueSeconds(15), EsExecutors.DIRECT_EXECUTOR_SERVICE)); + extender.set( + services.threadPool() + .scheduleWithFixedDelay( + () -> { systemd.notify_extend_timeout(30); }, + TimeValue.timeValueSeconds(15), + EsExecutors.DIRECT_EXECUTOR_SERVICE + ) + ); return List.of(); } - int sd_notify(@SuppressWarnings("SameParameterValue") final int unset_environment, final String state) { - final int rc = Libsystemd.sd_notify(unset_environment, state); - logger.trace("sd_notify({}, {}) returned [{}]", unset_environment, state, rc); - return rc; + void notifyReady() { + assert systemd != null; + systemd.notify_ready(); + } + + void notifyStopping() { + assert systemd != null; + systemd.notify_stopping(); } @Override @@ -98,11 +110,7 @@ public void onNodeStarted() { assert extender.get() == null; return; } - final int rc = sd_notify(0, "READY=1"); - if (rc < 0) { - // treat failure to notify systemd of readiness as a startup failure - throw new RuntimeException("sd_notify returned error [" + rc + "]"); - } + notifyReady(); assert extender.get() != null; final boolean cancelled = extender.get().cancel(); assert cancelled; @@ -113,11 +121,7 @@ public void close() { if (enabled == false) { return; } - final int rc = sd_notify(0, "STOPPING=1"); - if (rc < 0) { - // do not treat failure to notify systemd of stopping as a failure - logger.warn("sd_notify returned error [{}]", rc); - } + notifyStopping(); } } diff --git a/modules/systemd/src/test/java/org/elasticsearch/systemd/SystemdPluginTests.java b/modules/systemd/src/test/java/org/elasticsearch/systemd/SystemdPluginTests.java index c2d0983e4f825..712483e9c603c 100644 --- a/modules/systemd/src/test/java/org/elasticsearch/systemd/SystemdPluginTests.java +++ b/modules/systemd/src/test/java/org/elasticsearch/systemd/SystemdPluginTests.java @@ -21,16 +21,14 @@ import java.io.IOException; import java.util.Optional; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; import static org.elasticsearch.test.hamcrest.OptionalMatchers.isPresentWith; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.ArgumentMatchers.same; @@ -104,83 +102,68 @@ public void testInvalid() { } public void testOnNodeStartedSuccess() { - runTestOnNodeStarted(Boolean.TRUE.toString(), randomIntBetween(0, Integer.MAX_VALUE), (maybe, plugin) -> { + runTestOnNodeStarted(Boolean.TRUE.toString(), false, (maybe, plugin) -> { assertThat(maybe, OptionalMatchers.isEmpty()); + assertThat(plugin.invokedReady.get(), is(true)); verify(plugin.extender()).cancel(); }); } public void testOnNodeStartedFailure() { - final int rc = randomIntBetween(Integer.MIN_VALUE, -1); - runTestOnNodeStarted( - Boolean.TRUE.toString(), - rc, - (maybe, plugin) -> assertThat( - maybe, - isPresentWith( - allOf(instanceOf(RuntimeException.class), hasToString(containsString("sd_notify returned error [" + rc + "]"))) - ) - ) - ); + runTestOnNodeStarted(Boolean.TRUE.toString(), true, (maybe, plugin) -> { + assertThat(maybe, isPresentWith(allOf(instanceOf(RuntimeException.class), hasToString(containsString("notify ready failed"))))); + assertThat(plugin.invokedReady.get(), is(true)); + }); } public void testOnNodeStartedNotEnabled() { - runTestOnNodeStarted(Boolean.FALSE.toString(), randomInt(), (maybe, plugin) -> assertThat(maybe, OptionalMatchers.isEmpty())); + runTestOnNodeStarted(Boolean.FALSE.toString(), randomBoolean(), (maybe, plugin) -> assertThat(maybe, OptionalMatchers.isEmpty())); } private void runTestOnNodeStarted( final String esSDNotify, - final int rc, - final BiConsumer, SystemdPlugin> assertions + final boolean invokeFailure, + final BiConsumer, TestSystemdPlugin> assertions ) { - runTest(esSDNotify, rc, assertions, SystemdPlugin::onNodeStarted, "READY=1"); + runTest(esSDNotify, invokeFailure, assertions, SystemdPlugin::onNodeStarted); } public void testCloseSuccess() { - runTestClose( - Boolean.TRUE.toString(), - randomIntBetween(1, Integer.MAX_VALUE), - (maybe, plugin) -> assertThat(maybe, OptionalMatchers.isEmpty()) - ); + runTestClose(Boolean.TRUE.toString(), false, (maybe, plugin) -> { + assertThat(maybe, OptionalMatchers.isEmpty()); + assertThat(plugin.invokedStopping.get(), is(true)); + }); } public void testCloseFailure() { - runTestClose( - Boolean.TRUE.toString(), - randomIntBetween(Integer.MIN_VALUE, -1), - (maybe, plugin) -> assertThat(maybe, OptionalMatchers.isEmpty()) - ); + runTestClose(Boolean.TRUE.toString(), true, (maybe, plugin) -> { + assertThat(maybe, OptionalMatchers.isEmpty()); + assertThat(plugin.invokedStopping.get(), is(true)); + }); } public void testCloseNotEnabled() { - runTestClose(Boolean.FALSE.toString(), randomInt(), (maybe, plugin) -> assertThat(maybe, OptionalMatchers.isEmpty())); + runTestClose(Boolean.FALSE.toString(), randomBoolean(), (maybe, plugin) -> { + assertThat(maybe, OptionalMatchers.isEmpty()); + assertThat(plugin.invokedStopping.get(), is(false)); + }); } - private void runTestClose(final String esSDNotify, final int rc, final BiConsumer, SystemdPlugin> assertions) { - runTest(esSDNotify, rc, assertions, SystemdPlugin::close, "STOPPING=1"); + private void runTestClose( + final String esSDNotify, + boolean invokeFailure, + final BiConsumer, TestSystemdPlugin> assertions + ) { + runTest(esSDNotify, invokeFailure, assertions, SystemdPlugin::close); } private void runTest( final String esSDNotify, - final int rc, - final BiConsumer, SystemdPlugin> assertions, - final CheckedConsumer invocation, - final String expectedState + final boolean invokeReadyFailure, + final BiConsumer, TestSystemdPlugin> assertions, + final CheckedConsumer invocation ) { - final AtomicBoolean invoked = new AtomicBoolean(); - final AtomicInteger invokedUnsetEnvironment = new AtomicInteger(); - final AtomicReference invokedState = new AtomicReference<>(); - final SystemdPlugin plugin = new SystemdPlugin(false, randomPackageBuildType, esSDNotify) { - - @Override - int sd_notify(final int unset_environment, final String state) { - invoked.set(true); - invokedUnsetEnvironment.set(unset_environment); - invokedState.set(state); - return rc; - } - - }; + final TestSystemdPlugin plugin = new TestSystemdPlugin(esSDNotify, invokeReadyFailure); startPlugin(plugin); if (Boolean.TRUE.toString().equals(esSDNotify)) { assertNotNull(plugin.extender()); @@ -198,13 +181,29 @@ int sd_notify(final int unset_environment, final String state) { if (success) { assertions.accept(Optional.empty(), plugin); } - if (Boolean.TRUE.toString().equals(esSDNotify)) { - assertTrue(invoked.get()); - assertThat(invokedUnsetEnvironment.get(), equalTo(0)); - assertThat(invokedState.get(), equalTo(expectedState)); - } else { - assertFalse(invoked.get()); - } } + class TestSystemdPlugin extends SystemdPlugin { + final AtomicBoolean invokedReady = new AtomicBoolean(); + final AtomicBoolean invokedStopping = new AtomicBoolean(); + final boolean invokeReadyFailure; + + TestSystemdPlugin(String esSDNotify, boolean invokeFailure) { + super(false, randomPackageBuildType, esSDNotify); + this.invokeReadyFailure = invokeFailure; + } + + @Override + void notifyReady() { + invokedReady.set(true); + if (invokeReadyFailure) { + throw new RuntimeException("notify ready failed"); + } + } + + @Override + void notifyStopping() { + invokedStopping.set(true); + } + } } diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/ESNetty4IntegTestCase.java b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/ESNetty4IntegTestCase.java index c996f55198bf6..65fbde5d42005 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/ESNetty4IntegTestCase.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/ESNetty4IntegTestCase.java @@ -12,7 +12,6 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.transport.netty4.Netty4Plugin; -import org.elasticsearch.transport.netty4.Netty4Transport; import java.util.Collection; import java.util.Collections; @@ -29,7 +28,7 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings)); // randomize netty settings if (randomBoolean()) { - builder.put(Netty4Transport.WORKER_COUNT.getKey(), random().nextInt(3) + 1); + builder.put(Netty4Plugin.WORKER_COUNT.getKey(), random().nextInt(3) + 1); } builder.put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4Plugin.NETTY_TRANSPORT_NAME); builder.put(NetworkModule.HTTP_TYPE_KEY, Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME); diff --git a/modules/transport-netty4/src/javaRestTest/java/org/elasticsearch/rest/Netty4BadRequestIT.java b/modules/transport-netty4/src/javaRestTest/java/org/elasticsearch/rest/Netty4BadRequestIT.java index 31e8c4765d4f2..a7bc031448087 100644 --- a/modules/transport-netty4/src/javaRestTest/java/org/elasticsearch/rest/Netty4BadRequestIT.java +++ b/modules/transport-netty4/src/javaRestTest/java/org/elasticsearch/rest/Netty4BadRequestIT.java @@ -22,12 +22,13 @@ import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.Map; +import java.util.regex.Pattern; import static org.elasticsearch.rest.RestStatus.BAD_REQUEST; -import static org.elasticsearch.test.hamcrest.RegexMatcher.matches; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasToString; +import static org.hamcrest.Matchers.matchesRegex; public class Netty4BadRequestIT extends ESRestTestCase { @@ -63,7 +64,7 @@ public void testBadRequest() throws IOException { ); assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(BAD_REQUEST.getStatus())); assertThat(e, hasToString(containsString("too_long_http_line_exception"))); - assertThat(e, hasToString(matches("An HTTP line is larger than \\d+ bytes"))); + assertThat(e, hasToString(matchesRegex(Pattern.compile(".*An HTTP line is larger than \\d+ bytes.*", Pattern.DOTALL)))); } public void testInvalidParameterValue() throws IOException { diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java index 86fa635078d4f..b86e168e2e620 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java @@ -133,46 +133,34 @@ protected void handlePipelinedRequest(ChannelHandlerContext ctx, Netty4HttpReque } @Override - public void write(final ChannelHandlerContext ctx, final Object msg, final ChannelPromise promise) throws IOException { + public void write(final ChannelHandlerContext ctx, final Object msg, final ChannelPromise promise) { assert msg instanceof Netty4HttpResponse : "Invalid message type: " + msg.getClass(); - boolean success = false; - try { - final Netty4HttpResponse restResponse = (Netty4HttpResponse) msg; - if (restResponse.getSequence() != writeSequence) { - assert restResponse.getSequence() > writeSequence - : "response sequence [" + restResponse.getSequence() + "] we below write sequence [" + writeSequence + "]"; - if (outboundHoldingQueue.size() >= maxEventsHeld) { - int eventCount = outboundHoldingQueue.size() + 1; - throw new IllegalStateException( - "Too many pipelined events [" + eventCount + "]. Max events allowed [" + maxEventsHeld + "]." - ); - } - // response is not at the current sequence number so we add it to the outbound queue and return - assert outboundHoldingQueue.stream().noneMatch(t -> t.v1().getSequence() == writeSequence) - : "duplicate outbound entries for seqno " + writeSequence; - outboundHoldingQueue.add(new Tuple<>(restResponse, promise)); - success = true; - return; - } - - // response is at the current sequence number and does not need to wait for any other response to be written so we write - // it out directly + final Netty4HttpResponse restResponse = (Netty4HttpResponse) msg; + if (restResponse.getSequence() != writeSequence) { + // response is not at the current sequence number so we add it to the outbound queue + enqueuePipelinedResponse(ctx, restResponse, promise); + } else { + // response is at the current sequence number and does not need to wait for any other response to be written doWrite(ctx, restResponse, promise); - success = true; // see if we have any queued up responses that became writeable due to the above write doWriteQueued(ctx); - } catch (IllegalStateException e) { + } + } + + private void enqueuePipelinedResponse(ChannelHandlerContext ctx, Netty4HttpResponse restResponse, ChannelPromise promise) { + assert restResponse.getSequence() > writeSequence + : "response sequence [" + restResponse.getSequence() + "] we below write sequence [" + writeSequence + "]"; + if (outboundHoldingQueue.size() >= maxEventsHeld) { ctx.channel().close(); - } finally { - if (success == false && promise.isDone() == false) { - // The preceding failure may already have failed the promise; use tryFailure() to avoid log noise about double-completion, - // but also check isDone() first to avoid even constructing another exception in most cases. - promise.tryFailure(new ClosedChannelException()); - } + promise.tryFailure(new ClosedChannelException()); + } else { + assert outboundHoldingQueue.stream().noneMatch(t -> t.v1().getSequence() == restResponse.getSequence()) + : "duplicate outbound entries for seqno " + restResponse.getSequence(); + outboundHoldingQueue.add(new Tuple<>(restResponse, promise)); } } - private void doWriteQueued(ChannelHandlerContext ctx) throws IOException { + private void doWriteQueued(ChannelHandlerContext ctx) { while (outboundHoldingQueue.isEmpty() == false && outboundHoldingQueue.peek().v1().getSequence() == writeSequence) { final Tuple top = outboundHoldingQueue.poll(); assert top != null : "we know the outbound holding queue to not be empty at this point"; diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java index 274240a40bd46..7844f7bbb8ce2 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java @@ -39,10 +39,7 @@ import org.elasticsearch.common.network.CloseableChannel; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.IOUtils; @@ -59,6 +56,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.netty4.AcceptChannelHandler; import org.elasticsearch.transport.netty4.NetUtils; +import org.elasticsearch.transport.netty4.Netty4Plugin; import org.elasticsearch.transport.netty4.Netty4Utils; import org.elasticsearch.transport.netty4.Netty4WriteThrottlingHandler; import org.elasticsearch.transport.netty4.NettyAllocator; @@ -73,7 +71,6 @@ import java.util.function.BiPredicate; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE; -import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_READ_TIMEOUT; @@ -90,56 +87,6 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport { private static final Logger logger = LogManager.getLogger(Netty4HttpServerTransport.class); - /* - * Size in bytes of an individual message received by io.netty.handler.codec.MessageAggregator which accumulates the content for an - * HTTP request. This number is used for estimating the maximum number of allowed buffers before the MessageAggregator's internal - * collection of buffers is resized. - * - * By default we assume the Ethernet MTU (1500 bytes) but users can override it with a system property. - */ - private static final ByteSizeValue MTU = ByteSizeValue.ofBytes(Long.parseLong(System.getProperty("es.net.mtu", "1500"))); - - private static final String SETTING_KEY_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = "http.netty.max_composite_buffer_components"; - - public static Setting SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = new Setting<>( - SETTING_KEY_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS, - (s) -> { - ByteSizeValue maxContentLength = SETTING_HTTP_MAX_CONTENT_LENGTH.get(s); - /* - * Netty accumulates buffers containing data from all incoming network packets that make up one HTTP request in an instance of - * io.netty.buffer.CompositeByteBuf (think of it as a buffer of buffers). Once its capacity is reached, the buffer will iterate - * over its individual entries and put them into larger buffers (see io.netty.buffer.CompositeByteBuf#consolidateIfNeeded() - * for implementation details). We want to to resize that buffer because this leads to additional garbage on the heap and also - * increases the application's native memory footprint (as direct byte buffers hold their contents off-heap). - * - * With this setting we control the CompositeByteBuf's capacity (which is by default 1024, see - * io.netty.handler.codec.MessageAggregator#DEFAULT_MAX_COMPOSITEBUFFER_COMPONENTS). To determine a proper default capacity for - * that buffer, we need to consider that the upper bound for the size of HTTP requests is determined by `maxContentLength`. The - * number of buffers that are needed depend on how often Netty reads network packets which depends on the network type (MTU). - * We assume here that Elasticsearch receives HTTP requests via an Ethernet connection which has a MTU of 1500 bytes. - * - * Note that we are *not* pre-allocating any memory based on this setting but rather determine the CompositeByteBuf's capacity. - * The tradeoff is between less (but larger) buffers that are contained in the CompositeByteBuf and more (but smaller) buffers. - * With the default max content length of 100MB and a MTU of 1500 bytes we would allow 69905 entries. - */ - long maxBufferComponentsEstimate = Math.round((double) (maxContentLength.getBytes() / MTU.getBytes())); - // clamp value to the allowed range - long maxBufferComponents = Math.max(2, Math.min(maxBufferComponentsEstimate, Integer.MAX_VALUE)); - return String.valueOf(maxBufferComponents); - // Netty's CompositeByteBuf implementation does not allow less than two components. - }, - s -> Setting.parseInt(s, 2, Integer.MAX_VALUE, SETTING_KEY_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS), - Property.NodeScope - ); - - public static final Setting SETTING_HTTP_WORKER_COUNT = Setting.intSetting("http.netty.worker_count", 0, Property.NodeScope); - - public static final Setting SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE = Setting.byteSizeSetting( - "http.netty.receive_predictor_size", - new ByteSizeValue(64, ByteSizeUnit.KB), - Property.NodeScope - ); - private final int pipeliningMaxEvents; private final SharedGroupFactory sharedGroupFactory; @@ -186,11 +133,11 @@ public Netty4HttpServerTransport( this.pipeliningMaxEvents = SETTING_PIPELINING_MAX_EVENTS.get(settings); - this.maxCompositeBufferComponents = SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS.get(settings); + this.maxCompositeBufferComponents = Netty4Plugin.SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS.get(settings); this.readTimeoutMillis = Math.toIntExact(SETTING_HTTP_READ_TIMEOUT.get(settings).getMillis()); - ByteSizeValue receivePredictor = SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE.get(settings); + ByteSizeValue receivePredictor = Netty4Plugin.SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE.get(settings); recvByteBufAllocator = new FixedRecvByteBufAllocator(receivePredictor.bytesAsInt()); logger.debug( diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Plugin.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Plugin.java index 2934d425709f2..5fd69f8d9e537 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Plugin.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Plugin.java @@ -16,8 +16,11 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.http.HttpPreRequest; import org.elasticsearch.http.HttpServerTransport; @@ -38,24 +41,99 @@ import java.util.function.BiConsumer; import java.util.function.Supplier; +import static org.elasticsearch.common.settings.Setting.byteSizeSetting; +import static org.elasticsearch.common.settings.Setting.intSetting; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH; + public class Netty4Plugin extends Plugin implements NetworkPlugin { public static final String NETTY_TRANSPORT_NAME = "netty4"; public static final String NETTY_HTTP_TRANSPORT_NAME = "netty4"; + public static final Setting SETTING_HTTP_WORKER_COUNT = Setting.intSetting( + "http.netty.worker_count", + 0, + Setting.Property.NodeScope + ); + public static final Setting SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE = byteSizeSetting( + "http.netty.receive_predictor_size", + new ByteSizeValue(64, ByteSizeUnit.KB), + Setting.Property.NodeScope + ); + public static final Setting WORKER_COUNT = new Setting<>( + "transport.netty.worker_count", + (s) -> Integer.toString(EsExecutors.allocatedProcessors(s)), + (s) -> Setting.parseInt(s, 1, "transport.netty.worker_count"), + Setting.Property.NodeScope + ); + private static final Setting NETTY_RECEIVE_PREDICTOR_SIZE = byteSizeSetting( + "transport.netty.receive_predictor_size", + new ByteSizeValue(64, ByteSizeUnit.KB), + Setting.Property.NodeScope + ); + public static final Setting NETTY_RECEIVE_PREDICTOR_MAX = byteSizeSetting( + "transport.netty.receive_predictor_max", + NETTY_RECEIVE_PREDICTOR_SIZE, + Setting.Property.NodeScope + ); + public static final Setting NETTY_RECEIVE_PREDICTOR_MIN = byteSizeSetting( + "transport.netty.receive_predictor_min", + NETTY_RECEIVE_PREDICTOR_SIZE, + Setting.Property.NodeScope + ); + public static final Setting NETTY_BOSS_COUNT = intSetting("transport.netty.boss_count", 1, 1, Setting.Property.NodeScope); + /* + * Size in bytes of an individual message received by io.netty.handler.codec.MessageAggregator which accumulates the content for an + * HTTP request. This number is used for estimating the maximum number of allowed buffers before the MessageAggregator's internal + * collection of buffers is resized. + * + * By default we assume the Ethernet MTU (1500 bytes) but users can override it with a system property. + */ + private static final ByteSizeValue MTU = ByteSizeValue.ofBytes(Long.parseLong(System.getProperty("es.net.mtu", "1500"))); + private static final String SETTING_KEY_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = "http.netty.max_composite_buffer_components"; + public static Setting SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = new Setting<>( + SETTING_KEY_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS, + (s) -> { + ByteSizeValue maxContentLength = SETTING_HTTP_MAX_CONTENT_LENGTH.get(s); + /* + * Netty accumulates buffers containing data from all incoming network packets that make up one HTTP request in an instance of + * io.netty.buffer.CompositeByteBuf (think of it as a buffer of buffers). Once its capacity is reached, the buffer will iterate + * over its individual entries and put them into larger buffers (see io.netty.buffer.CompositeByteBuf#consolidateIfNeeded() + * for implementation details). We want to to resize that buffer because this leads to additional garbage on the heap and also + * increases the application's native memory footprint (as direct byte buffers hold their contents off-heap). + * + * With this setting we control the CompositeByteBuf's capacity (which is by default 1024, see + * io.netty.handler.codec.MessageAggregator#DEFAULT_MAX_COMPOSITEBUFFER_COMPONENTS). To determine a proper default capacity for + * that buffer, we need to consider that the upper bound for the size of HTTP requests is determined by `maxContentLength`. The + * number of buffers that are needed depend on how often Netty reads network packets which depends on the network type (MTU). + * We assume here that Elasticsearch receives HTTP requests via an Ethernet connection which has a MTU of 1500 bytes. + * + * Note that we are *not* pre-allocating any memory based on this setting but rather determine the CompositeByteBuf's capacity. + * The tradeoff is between less (but larger) buffers that are contained in the CompositeByteBuf and more (but smaller) buffers. + * With the default max content length of 100MB and a MTU of 1500 bytes we would allow 69905 entries. + */ + long maxBufferComponentsEstimate = Math.round((double) (maxContentLength.getBytes() / MTU.getBytes())); + // clamp value to the allowed range + long maxBufferComponents = Math.max(2, Math.min(maxBufferComponentsEstimate, Integer.MAX_VALUE)); + return String.valueOf(maxBufferComponents); + // Netty's CompositeByteBuf implementation does not allow less than two components. + }, + s -> Setting.parseInt(s, 2, Integer.MAX_VALUE, SETTING_KEY_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS), + Setting.Property.NodeScope + ); private final SetOnce groupFactory = new SetOnce<>(); @Override public List> getSettings() { return Arrays.asList( - Netty4HttpServerTransport.SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS, - Netty4HttpServerTransport.SETTING_HTTP_WORKER_COUNT, - Netty4HttpServerTransport.SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE, - Netty4Transport.WORKER_COUNT, - Netty4Transport.NETTY_RECEIVE_PREDICTOR_SIZE, - Netty4Transport.NETTY_RECEIVE_PREDICTOR_MIN, - Netty4Transport.NETTY_RECEIVE_PREDICTOR_MAX, - Netty4Transport.NETTY_BOSS_COUNT + SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS, + SETTING_HTTP_WORKER_COUNT, + SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE, + WORKER_COUNT, + NETTY_RECEIVE_PREDICTOR_SIZE, + NETTY_RECEIVE_PREDICTOR_MIN, + NETTY_RECEIVE_PREDICTOR_MAX, + NETTY_BOSS_COUNT ); } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java index 9a0d6692723e3..6d8f950ef1cf4 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java @@ -31,10 +31,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.recycler.Recycler; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.util.concurrent.EsExecutors; @@ -54,8 +51,6 @@ import java.net.InetSocketAddress; import java.util.Map; -import static org.elasticsearch.common.settings.Setting.byteSizeSetting; -import static org.elasticsearch.common.settings.Setting.intSetting; import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.transport.RemoteClusterPortSettings.REMOTE_CLUSTER_PROFILE; @@ -70,30 +65,6 @@ public class Netty4Transport extends TcpTransport { private static final Logger logger = LogManager.getLogger(Netty4Transport.class); - public static final Setting WORKER_COUNT = new Setting<>( - "transport.netty.worker_count", - (s) -> Integer.toString(EsExecutors.allocatedProcessors(s)), - (s) -> Setting.parseInt(s, 1, "transport.netty.worker_count"), - Property.NodeScope - ); - - public static final Setting NETTY_RECEIVE_PREDICTOR_SIZE = Setting.byteSizeSetting( - "transport.netty.receive_predictor_size", - new ByteSizeValue(64, ByteSizeUnit.KB), - Property.NodeScope - ); - public static final Setting NETTY_RECEIVE_PREDICTOR_MIN = byteSizeSetting( - "transport.netty.receive_predictor_min", - NETTY_RECEIVE_PREDICTOR_SIZE, - Property.NodeScope - ); - public static final Setting NETTY_RECEIVE_PREDICTOR_MAX = byteSizeSetting( - "transport.netty.receive_predictor_max", - NETTY_RECEIVE_PREDICTOR_SIZE, - Property.NodeScope - ); - - public static final Setting NETTY_BOSS_COUNT = intSetting("transport.netty.boss_count", 1, 1, Property.NodeScope); public static final ChannelOption OPTION_TCP_KEEP_IDLE = NioChannelOption.of(NetUtils.getTcpKeepIdleSocketOption()); public static final ChannelOption OPTION_TCP_KEEP_INTERVAL = NioChannelOption.of(NetUtils.getTcpKeepIntervalSocketOption()); public static final ChannelOption OPTION_TCP_KEEP_COUNT = NioChannelOption.of(NetUtils.getTcpKeepCountSocketOption()); @@ -123,8 +94,8 @@ public Netty4Transport( this.sharedGroupFactory = sharedGroupFactory; // See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use higher ones for us, even fixed one - this.receivePredictorMin = NETTY_RECEIVE_PREDICTOR_MIN.get(settings); - this.receivePredictorMax = NETTY_RECEIVE_PREDICTOR_MAX.get(settings); + this.receivePredictorMin = Netty4Plugin.NETTY_RECEIVE_PREDICTOR_MIN.get(settings); + this.receivePredictorMax = Netty4Plugin.NETTY_RECEIVE_PREDICTOR_MAX.get(settings); if (receivePredictorMax.getBytes() == receivePredictorMin.getBytes()) { recvByteBufAllocator = new FixedRecvByteBufAllocator((int) receivePredictorMax.getBytes()); } else { diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/NettyAllocator.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/NettyAllocator.java index f5d566d977d09..863334af85144 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/NettyAllocator.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/NettyAllocator.java @@ -152,6 +152,11 @@ public void close() { } }; } + + @Override + public int pageSize() { + return PageCacheRecycler.BYTE_PAGE_SIZE; + } }; } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/SharedGroupFactory.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/SharedGroupFactory.java index 14c2c13ed7669..849597b1d9915 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/SharedGroupFactory.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/SharedGroupFactory.java @@ -18,7 +18,6 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.http.HttpServerTransport; -import org.elasticsearch.http.netty4.Netty4HttpServerTransport; import org.elasticsearch.transport.TcpTransport; import java.util.concurrent.TimeUnit; @@ -29,7 +28,7 @@ /** * Creates and returns {@link io.netty.channel.EventLoopGroup} instances. It will return a shared group for * both {@link #getHttpGroup()} and {@link #getTransportGroup()} if - * {@link org.elasticsearch.http.netty4.Netty4HttpServerTransport#SETTING_HTTP_WORKER_COUNT} is configured to be 0. + * {@link Netty4Plugin#SETTING_HTTP_WORKER_COUNT} is configured to be 0. * If that setting is not 0, then it will return a different group in the {@link #getHttpGroup()} call. */ public final class SharedGroupFactory { @@ -45,8 +44,8 @@ public final class SharedGroupFactory { public SharedGroupFactory(Settings settings) { this.settings = settings; - this.workerCount = Netty4Transport.WORKER_COUNT.get(settings); - this.httpWorkerCount = Netty4HttpServerTransport.SETTING_HTTP_WORKER_COUNT.get(settings); + this.workerCount = Netty4Plugin.WORKER_COUNT.get(settings); + this.httpWorkerCount = Netty4Plugin.SETTING_HTTP_WORKER_COUNT.get(settings); } public Settings getSettings() { diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java index 56ba3ae1958f7..7ce962ff56b67 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.core.Tuple; import org.elasticsearch.tasks.Task; +import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.netty4.NettyAllocator; import java.io.Closeable; @@ -139,9 +140,20 @@ private synchronized List sendRequests(final SocketAddress rem channelFuture = clientBootstrap.connect(remoteAddress); channelFuture.sync(); + boolean needsFinalFlush = false; for (HttpRequest request : requests) { - channelFuture.channel().writeAndFlush(request); + if (ESTestCase.randomBoolean()) { + channelFuture.channel().writeAndFlush(request); + needsFinalFlush = false; + } else { + channelFuture.channel().write(request); + needsFinalFlush = true; + } + } + if (needsFinalFlush) { + channelFuture.channel().flush(); } + if (latch.await(30L, TimeUnit.SECONDS) == false) { fail("Failed to get all expected responses."); } @@ -157,7 +169,7 @@ private synchronized List sendRequests(final SocketAddress rem @Override public void close() { - clientBootstrap.config().group().shutdownGracefully().awaitUninterruptibly(); + clientBootstrap.config().group().shutdownGracefully(0L, 0L, TimeUnit.SECONDS).awaitUninterruptibly(); } /** diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java index 4d44c37ac094a..5ce989fba214a 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java @@ -78,6 +78,7 @@ import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.netty4.Netty4Plugin; import org.elasticsearch.transport.netty4.NettyAllocator; import org.elasticsearch.transport.netty4.SharedGroupFactory; import org.elasticsearch.transport.netty4.TLSConfig; @@ -889,7 +890,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th public void testMultipleValidationsOnTheSameChannel() throws InterruptedException { // ensure that there is a single channel active - final Settings settings = createBuilderWithPort().put(Netty4HttpServerTransport.SETTING_HTTP_WORKER_COUNT.getKey(), 1).build(); + final Settings settings = createBuilderWithPort().put(Netty4Plugin.SETTING_HTTP_WORKER_COUNT.getKey(), 1).build(); final Set okURIs = ConcurrentHashMap.newKeySet(); final Set nokURIs = ConcurrentHashMap.newKeySet(); final SetOnce channelSetOnce = new SetOnce<>(); diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SharedGroupFactoryTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SharedGroupFactoryTests.java index 7cd34ad02d5a9..a72e2c7b69465 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SharedGroupFactoryTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SharedGroupFactoryTests.java @@ -9,7 +9,6 @@ package org.elasticsearch.transport.netty4; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.http.netty4.Netty4HttpServerTransport; import org.elasticsearch.test.ESTestCase; public final class SharedGroupFactoryTests extends ESTestCase { @@ -37,9 +36,7 @@ public void testSharedEventLoops() throws Exception { } public void testNonSharedEventLoops() throws Exception { - Settings settings = Settings.builder() - .put(Netty4HttpServerTransport.SETTING_HTTP_WORKER_COUNT.getKey(), randomIntBetween(1, 10)) - .build(); + Settings settings = Settings.builder().put(Netty4Plugin.SETTING_HTTP_WORKER_COUNT.getKey(), randomIntBetween(1, 10)).build(); SharedGroupFactory sharedGroupFactory = new SharedGroupFactory(settings); SharedGroupFactory.SharedGroup httpGroup = sharedGroupFactory.getHttpGroup(); SharedGroupFactory.SharedGroup transportGroup = sharedGroupFactory.getTransportGroup(); diff --git a/plugins/examples/rescore/src/test/java/org/elasticsearch/example/rescore/ExampleRescoreBuilderFieldDataTests.java b/plugins/examples/rescore/src/test/java/org/elasticsearch/example/rescore/ExampleRescoreBuilderFieldDataTests.java new file mode 100644 index 0000000000000..b486a6730a9ad --- /dev/null +++ b/plugins/examples/rescore/src/test/java/org/elasticsearch/example/rescore/ExampleRescoreBuilderFieldDataTests.java @@ -0,0 +1,111 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.example.rescore; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FloatField; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TotalHits; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.search.rescore.RescoreContext; +import org.elasticsearch.test.AbstractBuilderTestCase; + +import java.io.IOException; + +public class ExampleRescoreBuilderFieldDataTests extends AbstractBuilderTestCase { + + //to test that the rescore plugin is able to pull data from the indexed documents + //these following helper methods are called from the test below, + //some helpful examples related to this are located circa feb 14 2024 at: + //https://github.com/apache/lucene/blob/main/lucene/core/src/test/org/apache/lucene/search/TestQueryRescorer.java + + private String fieldFactorFieldName = "literalNameOfFieldUsedAsFactor"; + private float fieldFactorValue = 2.0f; + + private IndexSearcher getSearcher(IndexReader r) { + IndexSearcher searcher = newSearcher(r); + return searcher; + } + + private IndexReader publishDocs(int numDocs, String fieldName, Directory dir) throws Exception { + //here we populate a collection of documents into the mock search context + //note they all have the same field factor value for convenience + RandomIndexWriter w = new RandomIndexWriter(random(), dir, newIndexWriterConfig()); + for (int i = 0; i < numDocs; i++) { + Document d = new Document(); + d.add(newStringField("id", Integer.toString(i), Field.Store.YES)); + d.add(new FloatField(fieldName, fieldFactorValue, Field.Store.YES )); + w.addDocument(d); + } + IndexReader reader = w.getReader(); + w.close(); + return reader; + } + @Override + protected void initializeAdditionalMappings(MapperService mapperService) throws IOException { + + mapperService.merge( + "_doc", + new CompressedXContent(Strings.toString(PutMappingRequest.simpleMapping(fieldFactorFieldName, "type=float"))), + MapperService.MergeReason.MAPPING_UPDATE + ); + } + + + public void testRescoreUsingFieldData() throws Exception { + //we want the originalScoreOfTopDocs to be lower than the rescored values + //so that the order of the result has moved the rescored window to the top of the results + float originalScoreOfTopDocs = 1.0f; + + //just like in the associated rescore builder factor testing + //we will test a random factor on the incoming score docs + //the division is just to leave room for whatever values are picked + float factor = (float) randomDoubleBetween(1.0d, Float.MAX_VALUE/(fieldFactorValue * originalScoreOfTopDocs)-1, false); + + // Testing factorField specifically here for more example rescore debugging + // setup a mock search context that will be able to locate fieldIndexData + // provided from the index reader that follows + + Directory dir = newDirectory(); + //the rest of this test does not actually need more than 3 docs in the mock + //however any number >= 3 is fine + int numDocs = 3; + IndexReader reader = publishDocs(numDocs, fieldFactorFieldName, dir); + IndexSearcher searcher = getSearcher(reader); + + ExampleRescoreBuilder builder = new ExampleRescoreBuilder(factor, fieldFactorFieldName).windowSize(2); + + RescoreContext context = builder.buildContext(createSearchExecutionContext(searcher)); + + //create and populate the TopDocs that will be provided to the rescore function + TopDocs docs = new TopDocs(new TotalHits(10, TotalHits.Relation.EQUAL_TO), new ScoreDoc[3]); + docs.scoreDocs[0] = new ScoreDoc(0, originalScoreOfTopDocs); + docs.scoreDocs[1] = new ScoreDoc(1, originalScoreOfTopDocs); + docs.scoreDocs[2] = new ScoreDoc(2, originalScoreOfTopDocs); + context.rescorer().rescore(docs, searcher, context); + + //here we expect that windowSize docs have been re-scored, with remaining doc in the original state + assertEquals(originalScoreOfTopDocs*factor*fieldFactorValue, docs.scoreDocs[0].score, 0.0f); + assertEquals(originalScoreOfTopDocs*factor*fieldFactorValue, docs.scoreDocs[1].score, 0.0f); + assertEquals(originalScoreOfTopDocs, docs.scoreDocs[2].score, 0.0f); + + //just to clean up the mocks + reader.close(); + dir.close(); + } +} diff --git a/plugins/examples/settings.gradle b/plugins/examples/settings.gradle index f71eca0d1f966..af2596fdbafe3 100644 --- a/plugins/examples/settings.gradle +++ b/plugins/examples/settings.gradle @@ -7,7 +7,7 @@ */ plugins { - id "com.gradle.enterprise" version "3.16.1" + id "com.gradle.enterprise" version "3.16.2" } // Include all subdirectories as example projects diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java index 1ff9397bc8b08..956d3c0e104ae 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java @@ -26,7 +26,6 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.node.Node; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.hamcrest.RegexMatcher; import java.io.IOException; import java.io.PrintWriter; @@ -48,7 +47,9 @@ import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.matchesRegex; import static org.hamcrest.Matchers.startsWith; public class EvilLoggerTests extends ESTestCase { @@ -82,14 +83,14 @@ public void testLocationInfoTest() throws IOException { + System.getProperty("es.logs.cluster_name") + ".log"; final List events = Files.readAllLines(PathUtils.get(path)); - assertThat(events.size(), equalTo(5)); + assertThat(events, hasSize(5)); final String location = "org.elasticsearch.common.logging.EvilLoggerTests.testLocationInfoTest"; // the first message is a warning for unsupported configuration files - assertLogLine(events.get(0), Level.ERROR, location, "This is an error message"); - assertLogLine(events.get(1), Level.WARN, location, "This is a warning message"); - assertLogLine(events.get(2), Level.INFO, location, "This is an info message"); - assertLogLine(events.get(3), Level.DEBUG, location, "This is a debug message"); - assertLogLine(events.get(4), Level.TRACE, location, "This is a trace message"); + assertLogLine(events.get(0), Level.ERROR, location, ".*This is an error message"); + assertLogLine(events.get(1), Level.WARN, location, ".*This is a warning message"); + assertLogLine(events.get(2), Level.INFO, location, ".*This is an info message"); + assertLogLine(events.get(3), Level.DEBUG, location, ".*This is a debug message"); + assertLogLine(events.get(4), Level.TRACE, location, ".*This is a trace message"); } public void testConcurrentDeprecationLogger() throws IOException, BrokenBarrierException, InterruptedException { @@ -166,14 +167,14 @@ public void testConcurrentDeprecationLogger() throws IOException, BrokenBarrierE matcher.matches(); return Integer.parseInt(matcher.group(1)); })); - assertThat(deprecationEvents.size(), equalTo(128)); + assertThat(deprecationEvents, hasSize(128)); for (int i = 0; i < 128; i++) { assertLogLine( deprecationEvents.get(i), DeprecationLogger.CRITICAL, "org.elasticsearch.common.logging.DeprecationLogger.lambda\\$doPrivilegedLog\\$0", - "This is a maybe logged deprecation message" + i + ".*This is a maybe logged deprecation message" + i + ".*" ); } @@ -201,12 +202,12 @@ public void testDeprecatedSettings() throws IOException { + "_deprecation.log"; final List deprecationEvents = Files.readAllLines(PathUtils.get(deprecationPath)); if (iterations > 0) { - assertThat(deprecationEvents.size(), equalTo(1)); + assertThat(deprecationEvents, hasSize(1)); assertLogLine( deprecationEvents.get(0), DeprecationLogger.CRITICAL, "org.elasticsearch.common.logging.DeprecationLogger.lambda\\$doPrivilegedLog\\$0", - "\\[deprecated.foo\\] setting was deprecated in Elasticsearch and will be removed in a future release." + ".*\\[deprecated.foo\\] setting was deprecated in Elasticsearch and will be removed in a future release..*" ); } } @@ -246,7 +247,7 @@ public void testPrefixLogger() throws IOException { e.printStackTrace(pw); final int stackTraceLength = sw.toString().split(System.getProperty("line.separator")).length; final int expectedLogLines = 3; - assertThat(events.size(), equalTo(expectedLogLines + stackTraceLength)); + assertThat(events, hasSize(expectedLogLines + stackTraceLength)); for (int i = 0; i < expectedLogLines; i++) { assertThat("Contents of [" + path + "] are wrong", events.get(i), startsWith("[" + getTestName() + "]" + prefix + " test")); } @@ -287,8 +288,8 @@ public void testNoNodeNameInPatternWarning() throws IOException { + System.getProperty("es.logs.cluster_name") + ".log"; final List events = Files.readAllLines(PathUtils.get(path)); - assertThat(events.size(), equalTo(2)); - final String location = "org.elasticsearch.common.logging.LogConfigurator"; + assertThat(events, hasSize(2)); + final String location = "org.elasticsearch.common.logging.LogConfigurator.*"; // the first message is a warning for unsupported configuration files assertLogLine( events.get(0), @@ -324,12 +325,14 @@ private void setupLogging(final String config, final Settings settings) throws I LogConfigurator.configure(environment, true); } + private static final Pattern LOG_LINE = Pattern.compile("\\[(.*)]\\[(.*)\\(.*\\)] (.*)"); + private void assertLogLine(final String logLine, final Level level, final String location, final String message) { - final Matcher matcher = Pattern.compile("\\[(.*)\\]\\[(.*)\\(.*\\)\\] (.*)").matcher(logLine); + Matcher matcher = LOG_LINE.matcher(logLine); assertTrue(logLine, matcher.matches()); assertThat(matcher.group(1), equalTo(level.toString())); - assertThat(matcher.group(2), RegexMatcher.matches(location)); - assertThat(matcher.group(3), RegexMatcher.matches(message)); + assertThat(matcher.group(2), matchesRegex(location)); + assertThat(matcher.group(3), matchesRegex(message)); } } diff --git a/qa/logging-config/src/javaRestTest/java/org/elasticsearch/common/logging/CustomLoggingConfigIT.java b/qa/logging-config/src/javaRestTest/java/org/elasticsearch/common/logging/CustomLoggingConfigIT.java index 1736533aa526e..4ec12ed135d65 100644 --- a/qa/logging-config/src/javaRestTest/java/org/elasticsearch/common/logging/CustomLoggingConfigIT.java +++ b/qa/logging-config/src/javaRestTest/java/org/elasticsearch/common/logging/CustomLoggingConfigIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.common.logging; import org.elasticsearch.core.SuppressForbidden; -import org.elasticsearch.test.hamcrest.RegexMatcher; import org.elasticsearch.test.rest.ESRestTestCase; import org.hamcrest.Matchers; @@ -22,6 +21,8 @@ import java.security.PrivilegedAction; import java.util.List; +import static org.hamcrest.Matchers.matchesRegex; + /** * This test verifies that Elasticsearch can startup successfully with a custom logging config using variables introduced in * ESJsonLayout @@ -35,14 +36,14 @@ public class CustomLoggingConfigIT extends ESRestTestCase { public void testSuccessfulStartupWithCustomConfig() throws Exception { assertBusy(() -> { List lines = readAllLines(getPlaintextLogFile()); - assertThat(lines, Matchers.hasItem(RegexMatcher.matches(NODE_STARTED))); + assertThat(lines, Matchers.hasItem(matchesRegex(NODE_STARTED))); }); } public void testParseAllV7JsonLines() throws Exception { assertBusy(() -> { List lines = readAllLines(getJSONLogFile()); - assertThat(lines, Matchers.hasItem(RegexMatcher.matches(NODE_STARTED))); + assertThat(lines, Matchers.hasItem(matchesRegex(NODE_STARTED))); }); } diff --git a/qa/mixed-cluster/build.gradle b/qa/mixed-cluster/build.gradle index 28d372671ee99..3953237a0e8d9 100644 --- a/qa/mixed-cluster/build.gradle +++ b/qa/mixed-cluster/build.gradle @@ -67,6 +67,11 @@ BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> numberOfNodes = 4 setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" setting 'xpack.security.enabled', 'false' + /* There is a chance we have more master changes than "normal", so to avoid this test from failing, + we increase the threshold (as this purpose of this test isn't to test that specific indicator). */ + if (bwcVersion.onOrAfter(Version.fromString("8.4.0"))) { + setting 'health.master_history.no_master_transitions_threshold', '10' + } requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0") } diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/EnrollNodeToClusterTests.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/EnrollNodeToClusterTests.java index 3ca61ccacae17..eac86cf7aac23 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/EnrollNodeToClusterTests.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/EnrollNodeToClusterTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.packaging.test; -import org.elasticsearch.Version; import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.packaging.util.Archives; import org.elasticsearch.packaging.util.Distribution; @@ -93,7 +92,6 @@ private String generateMockEnrollmentToken() throws Exception { EnrollmentToken enrollmentToken = new EnrollmentToken( "some-api-key", "e8864fa9cb5a8053ea84a48581a6c9bef619f8f6aaa58a632aac3e0a25d43ea9", - Version.CURRENT.toString(), List.of("localhost:9200") ); return enrollmentToken.getEncoded(); diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/PackagesSecurityAutoConfigurationTests.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/PackagesSecurityAutoConfigurationTests.java index fa68da1725edc..9dab7e5eb8d16 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/PackagesSecurityAutoConfigurationTests.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/PackagesSecurityAutoConfigurationTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.packaging.test; -import org.elasticsearch.Version; import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.common.Strings; import org.elasticsearch.common.ssl.PemKeyConfig; @@ -300,7 +299,6 @@ public void test73ReconfigureCreatesFilesWithCorrectPermissions() throws Excepti final EnrollmentToken enrollmentToken = new EnrollmentToken( "some-api-key", "b0150fd8a29f9012207912de9a01aa1d1f0dd696c847d3a9353881f9045bf442", // fingerprint of http_ca.crt - Version.CURRENT.toString(), List.of(mockNode.getHostName() + ":" + mockNode.getPort()) ); Shell.Result result = installation.executables().nodeReconfigureTool.run( diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java index e9f4106433771..88d910b61fa52 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; +import org.elasticsearch.action.admin.cluster.snapshots.get.SnapshotSortKey; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.cluster.SnapshotsInProgress; @@ -101,38 +102,38 @@ private void doTestSortOrder(String repoName, Collection allSnapshotName .getSnapshots(); assertSnapshotListSorted(defaultSorting, null, order); assertSnapshotListSorted( - allSnapshotsSorted(allSnapshotNames, repoName, GetSnapshotsRequest.SortBy.NAME, order, includeIndexNames), - GetSnapshotsRequest.SortBy.NAME, + allSnapshotsSorted(allSnapshotNames, repoName, SnapshotSortKey.NAME, order, includeIndexNames), + SnapshotSortKey.NAME, order ); assertSnapshotListSorted( - allSnapshotsSorted(allSnapshotNames, repoName, GetSnapshotsRequest.SortBy.DURATION, order, includeIndexNames), - GetSnapshotsRequest.SortBy.DURATION, + allSnapshotsSorted(allSnapshotNames, repoName, SnapshotSortKey.DURATION, order, includeIndexNames), + SnapshotSortKey.DURATION, order ); assertSnapshotListSorted( - allSnapshotsSorted(allSnapshotNames, repoName, GetSnapshotsRequest.SortBy.INDICES, order, includeIndexNames), - GetSnapshotsRequest.SortBy.INDICES, + allSnapshotsSorted(allSnapshotNames, repoName, SnapshotSortKey.INDICES, order, includeIndexNames), + SnapshotSortKey.INDICES, order ); assertSnapshotListSorted( - allSnapshotsSorted(allSnapshotNames, repoName, GetSnapshotsRequest.SortBy.START_TIME, order, includeIndexNames), - GetSnapshotsRequest.SortBy.START_TIME, + allSnapshotsSorted(allSnapshotNames, repoName, SnapshotSortKey.START_TIME, order, includeIndexNames), + SnapshotSortKey.START_TIME, order ); assertSnapshotListSorted( - allSnapshotsSorted(allSnapshotNames, repoName, GetSnapshotsRequest.SortBy.SHARDS, order, includeIndexNames), - GetSnapshotsRequest.SortBy.SHARDS, + allSnapshotsSorted(allSnapshotNames, repoName, SnapshotSortKey.SHARDS, order, includeIndexNames), + SnapshotSortKey.SHARDS, order ); assertSnapshotListSorted( - allSnapshotsSorted(allSnapshotNames, repoName, GetSnapshotsRequest.SortBy.FAILED_SHARDS, order, includeIndexNames), - GetSnapshotsRequest.SortBy.FAILED_SHARDS, + allSnapshotsSorted(allSnapshotNames, repoName, SnapshotSortKey.FAILED_SHARDS, order, includeIndexNames), + SnapshotSortKey.FAILED_SHARDS, order ); assertSnapshotListSorted( - allSnapshotsSorted(allSnapshotNames, repoName, GetSnapshotsRequest.SortBy.REPOSITORY, order, includeIndexNames), - GetSnapshotsRequest.SortBy.REPOSITORY, + allSnapshotsSorted(allSnapshotNames, repoName, SnapshotSortKey.REPOSITORY, order, includeIndexNames), + SnapshotSortKey.REPOSITORY, order ); } @@ -141,7 +142,7 @@ public void testResponseSizeLimit() throws Exception { final String repoName = "test-repo"; AbstractSnapshotIntegTestCase.createRepository(logger, repoName, "fs"); final List names = AbstractSnapshotIntegTestCase.createNSnapshots(logger, repoName, randomIntBetween(6, 20)); - for (GetSnapshotsRequest.SortBy sort : GetSnapshotsRequest.SortBy.values()) { + for (SnapshotSortKey sort : SnapshotSortKey.values()) { for (SortOrder order : SortOrder.values()) { logger.info("--> testing pagination for [{}] [{}]", sort, order); doTestPagination(repoName, names, sort, order); @@ -149,8 +150,7 @@ public void testResponseSizeLimit() throws Exception { } } - private void doTestPagination(String repoName, List names, GetSnapshotsRequest.SortBy sort, SortOrder order) - throws IOException { + private void doTestPagination(String repoName, List names, SnapshotSortKey sort, SortOrder order) throws IOException { final boolean includeIndexNames = randomBoolean(); final List allSnapshotsSorted = allSnapshotsSorted(names, repoName, sort, order, includeIndexNames); final GetSnapshotsResponse batch1 = sortedWithLimit(repoName, sort, null, 2, order, includeIndexNames); @@ -220,18 +220,18 @@ public void testSortAndPaginateWithInProgress() throws Exception { .equals(Map.of(SnapshotsInProgress.ShardState.INIT, 1L, SnapshotsInProgress.ShardState.QUEUED, (long) inProgressCount - 1)); return firstIndexSuccessfullySnapshot && secondIndexIsBlocked; }); - assertStablePagination(repoName, allSnapshotNames, GetSnapshotsRequest.SortBy.START_TIME); - assertStablePagination(repoName, allSnapshotNames, GetSnapshotsRequest.SortBy.NAME); - assertStablePagination(repoName, allSnapshotNames, GetSnapshotsRequest.SortBy.INDICES); + assertStablePagination(repoName, allSnapshotNames, SnapshotSortKey.START_TIME); + assertStablePagination(repoName, allSnapshotNames, SnapshotSortKey.NAME); + assertStablePagination(repoName, allSnapshotNames, SnapshotSortKey.INDICES); AbstractSnapshotIntegTestCase.unblockAllDataNodes(repoName); for (ActionFuture inProgressSnapshot : inProgressSnapshots) { AbstractSnapshotIntegTestCase.assertSuccessful(logger, inProgressSnapshot); } - assertStablePagination(repoName, allSnapshotNames, GetSnapshotsRequest.SortBy.START_TIME); - assertStablePagination(repoName, allSnapshotNames, GetSnapshotsRequest.SortBy.NAME); - assertStablePagination(repoName, allSnapshotNames, GetSnapshotsRequest.SortBy.INDICES); + assertStablePagination(repoName, allSnapshotNames, SnapshotSortKey.START_TIME); + assertStablePagination(repoName, allSnapshotNames, SnapshotSortKey.NAME); + assertStablePagination(repoName, allSnapshotNames, SnapshotSortKey.INDICES); } public void testFilterBySLMPolicy() throws Exception { @@ -240,7 +240,7 @@ public void testFilterBySLMPolicy() throws Exception { AbstractSnapshotIntegTestCase.createNSnapshots(logger, repoName, randomIntBetween(1, 5)); final List snapshotsWithoutPolicy = clusterAdmin().prepareGetSnapshots("*") .setSnapshots("*") - .setSort(GetSnapshotsRequest.SortBy.NAME) + .setSort(SnapshotSortKey.NAME) .get() .getSnapshots(); final String snapshotWithPolicy = "snapshot-with-policy"; @@ -277,7 +277,7 @@ public void testFilterBySLMPolicy() throws Exception { assertThat(getAllSnapshotsForPolicies(policyName, otherPolicyName, "no-such-policy*"), is(List.of(withOtherPolicy, withPolicy))); final List allSnapshots = clusterAdmin().prepareGetSnapshots("*") .setSnapshots("*") - .setSort(GetSnapshotsRequest.SortBy.NAME) + .setSort(SnapshotSortKey.NAME) .get() .getSnapshots(); assertThat(getAllSnapshotsForPolicies(GetSnapshotsRequest.NO_POLICY_PATTERN, policyName, otherPolicyName), is(allSnapshots)); @@ -294,7 +294,7 @@ public void testSortAfterStartTime() throws Exception { final List allSnapshotInfo = clusterAdmin().prepareGetSnapshots(matchAllPattern()) .setSnapshots(matchAllPattern()) - .setSort(GetSnapshotsRequest.SortBy.START_TIME) + .setSort(SnapshotSortKey.START_TIME) .get() .getSnapshots(); assertThat(allSnapshotInfo, is(List.of(snapshot1, snapshot2, snapshot3))); @@ -311,7 +311,7 @@ public void testSortAfterStartTime() throws Exception { final List allSnapshotInfoDesc = clusterAdmin().prepareGetSnapshots(matchAllPattern()) .setSnapshots(matchAllPattern()) - .setSort(GetSnapshotsRequest.SortBy.START_TIME) + .setSort(SnapshotSortKey.START_TIME) .setOrder(SortOrder.DESC) .get() .getSnapshots(); @@ -340,7 +340,7 @@ private SnapshotInfo createFullSnapshotWithUniqueStartTime(String repoName, Stri private List allAfterStartTimeAscending(long timestamp) throws IOException { final Request request = baseGetSnapshotsRequest("*"); - request.addParameter("sort", GetSnapshotsRequest.SortBy.START_TIME.toString()); + request.addParameter("sort", SnapshotSortKey.START_TIME.toString()); request.addParameter("from_sort_value", String.valueOf(timestamp)); final Response response = getRestClient().performRequest(request); return readSnapshotInfos(response).getSnapshots(); @@ -348,7 +348,7 @@ private List allAfterStartTimeAscending(long timestamp) throws IOE private List allBeforeStartTimeDescending(long timestamp) throws IOException { final Request request = baseGetSnapshotsRequest("*"); - request.addParameter("sort", GetSnapshotsRequest.SortBy.START_TIME.toString()); + request.addParameter("sort", SnapshotSortKey.START_TIME.toString()); request.addParameter("from_sort_value", String.valueOf(timestamp)); request.addParameter("order", SortOrder.DESC.toString()); final Response response = getRestClient().performRequest(request); @@ -358,7 +358,7 @@ private List allBeforeStartTimeDescending(long timestamp) throws I private static List getAllSnapshotsForPolicies(String... policies) throws IOException { final Request requestWithPolicy = new Request(HttpGet.METHOD_NAME, "/_snapshot/*/*"); requestWithPolicy.addParameter("slm_policy_filter", Strings.arrayToCommaDelimitedString(policies)); - requestWithPolicy.addParameter("sort", GetSnapshotsRequest.SortBy.NAME.toString()); + requestWithPolicy.addParameter("sort", SnapshotSortKey.NAME.toString()); return readSnapshotInfos(getRestClient().performRequest(requestWithPolicy)).getSnapshots(); } @@ -369,10 +369,10 @@ private void createIndexWithContent(String indexName) { indexDoc(indexName, "some_id", "foo", "bar"); } - private static void assertStablePagination(String repoName, Collection allSnapshotNames, GetSnapshotsRequest.SortBy sort) + private static void assertStablePagination(String repoName, Collection allSnapshotNames, SnapshotSortKey sort) throws IOException { final SortOrder order = randomFrom(SortOrder.values()); - final boolean includeIndexNames = sort == GetSnapshotsRequest.SortBy.INDICES || randomBoolean(); + final boolean includeIndexNames = sort == SnapshotSortKey.INDICES || randomBoolean(); final List allSorted = allSnapshotsSorted(allSnapshotNames, repoName, sort, order, includeIndexNames); for (int i = 1; i <= allSnapshotNames.size(); i++) { @@ -386,7 +386,7 @@ private static void assertStablePagination(String repoName, Collection a final GetSnapshotsResponse getSnapshotsResponse = sortedWithLimit( repoName, sort, - GetSnapshotsRequest.After.from(after, sort).asQueryParam(), + sort.encodeAfterQueryParam(after), i, order, includeIndexNames @@ -413,7 +413,7 @@ private static void assertStablePagination(String repoName, Collection a private static List allSnapshotsSorted( Collection allSnapshotNames, String repoName, - GetSnapshotsRequest.SortBy sortBy, + SnapshotSortKey sortBy, SortOrder order, boolean includeIndices ) throws IOException { @@ -454,7 +454,7 @@ private static GetSnapshotsResponse readSnapshotInfos(Response response) throws private static GetSnapshotsResponse sortedWithLimit( String repoName, - GetSnapshotsRequest.SortBy sortBy, + SnapshotSortKey sortBy, String after, int size, SortOrder order, @@ -486,7 +486,7 @@ private static void addIndexNamesParameter(boolean includeIndices, Request reque private static GetSnapshotsResponse sortedWithLimit( String repoName, - GetSnapshotsRequest.SortBy sortBy, + SnapshotSortKey sortBy, int offset, int size, SortOrder order, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/text_structure.find_field_structure.json b/rest-api-spec/src/main/resources/rest-api-spec/api/text_structure.find_field_structure.json new file mode 100644 index 0000000000000..f82e2ca2d190f --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/text_structure.find_field_structure.json @@ -0,0 +1,90 @@ +{ + "text_structure.find_field_structure":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/find-field-structure.html", + "description":"Finds the structure of a text field in an index." + }, + "stability":"stable", + "visibility":"public", + "headers":{ + "accept": ["application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_text_structure/find_field_structure", + "methods":["GET"] + } + ] + }, + "params":{ + "index":{ + "type":"string", + "description":"The index containing the analyzed field", + "required":true + }, + "field":{ + "type":"string", + "description":"The field that should be analyzed", + "required":true + }, + "documents_to_sample":{ + "type":"int", + "description":"How many documents should be included in the analysis", + "default":1000 + }, + "timeout":{ + "type":"time", + "description":"Timeout after which the analysis will be aborted", + "default":"25s" + }, + "format":{ + "type":"enum", + "options":[ + "ndjson", + "xml", + "delimited", + "semi_structured_text" + ], + "description":"Optional parameter to specify the high level file format" + }, + "column_names":{ + "type":"list", + "description":"Optional parameter containing a comma separated list of the column names for a delimited file" + }, + "delimiter":{ + "type":"string", + "description":"Optional parameter to specify the delimiter character for a delimited file - must be a single character" + }, + "quote":{ + "type":"string", + "description":"Optional parameter to specify the quote character for a delimited file - must be a single character" + }, + "should_trim_fields":{ + "type":"boolean", + "description":"Optional parameter to specify whether the values between delimiters in a delimited file should have whitespace trimmed from them" + }, + "grok_pattern":{ + "type":"string", + "description":"Optional parameter to specify the Grok pattern that should be used to extract fields from messages in a semi-structured text file" + }, + "ecs_compatibility":{ + "type":"string", + "description":"Optional parameter to specify the compatibility mode with ECS Grok patterns - may be either 'v1' or 'disabled'" + }, + "timestamp_field":{ + "type":"string", + "description":"Optional parameter to specify the timestamp field in the file" + }, + "timestamp_format":{ + "type":"string", + "description":"Optional parameter to specify the timestamp format in the file - may be either a Joda or Java time format" + }, + "explain":{ + "type":"boolean", + "description":"Whether to include a commentary on how the structure was derived", + "default":false + } + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/text_structure.find_message_structure.json b/rest-api-spec/src/main/resources/rest-api-spec/api/text_structure.find_message_structure.json new file mode 100644 index 0000000000000..d839e4b048f7d --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/text_structure.find_message_structure.json @@ -0,0 +1,80 @@ +{ + "text_structure.find_message_structure":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/find-message-structure.html", + "description":"Finds the structure of a list of messages. The messages must contain data that is suitable to be ingested into Elasticsearch." + }, + "stability":"stable", + "visibility":"public", + "headers":{ + "accept": [ "application/json"], + "content_type": ["application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_text_structure/find_message_structure", + "methods":["GET", "POST"] + } + ] + }, + "params":{ + "timeout":{ + "type":"time", + "description":"Timeout after which the analysis will be aborted", + "default":"25s" + }, + "format":{ + "type":"enum", + "options":[ + "ndjson", + "xml", + "delimited", + "semi_structured_text" + ], + "description":"Optional parameter to specify the high level file format" + }, + "column_names":{ + "type":"list", + "description":"Optional parameter containing a comma separated list of the column names for a delimited file" + }, + "delimiter":{ + "type":"string", + "description":"Optional parameter to specify the delimiter character for a delimited file - must be a single character" + }, + "quote":{ + "type":"string", + "description":"Optional parameter to specify the quote character for a delimited file - must be a single character" + }, + "should_trim_fields":{ + "type":"boolean", + "description":"Optional parameter to specify whether the values between delimiters in a delimited file should have whitespace trimmed from them" + }, + "grok_pattern":{ + "type":"string", + "description":"Optional parameter to specify the Grok pattern that should be used to extract fields from messages in a semi-structured text file" + }, + "ecs_compatibility":{ + "type":"string", + "description":"Optional parameter to specify the compatibility mode with ECS Grok patterns - may be either 'v1' or 'disabled'" + }, + "timestamp_field":{ + "type":"string", + "description":"Optional parameter to specify the timestamp field in the file" + }, + "timestamp_format":{ + "type":"string", + "description":"Optional parameter to specify the timestamp format in the file - may be either a Joda or Java time format" + }, + "explain":{ + "type":"boolean", + "description":"Whether to include a commentary on how the structure was derived", + "default":false + } + }, + "body":{ + "description":"JSON object with one field [messages], containing an array of messages to be analyzed", + "required":true + } + } +} diff --git a/rest-api-spec/src/yamlRestTest/java/org/elasticsearch/test/rest/ClientYamlTestSuiteIT.java b/rest-api-spec/src/yamlRestTest/java/org/elasticsearch/test/rest/ClientYamlTestSuiteIT.java index 465f17eca5532..2b3bab21e8ae6 100644 --- a/rest-api-spec/src/yamlRestTest/java/org/elasticsearch/test/rest/ClientYamlTestSuiteIT.java +++ b/rest-api-spec/src/yamlRestTest/java/org/elasticsearch/test/rest/ClientYamlTestSuiteIT.java @@ -13,6 +13,7 @@ import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; import org.apache.lucene.tests.util.TimeUnits; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; @@ -39,9 +40,15 @@ public ClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate super(testCandidate); } + @UpdateForV9 // remove restCompat check @ParametersFactory public static Iterable parameters() throws Exception { - return createParameters(); + String restCompatProperty = System.getProperty("tests.restCompat"); + if ("true".equals(restCompatProperty)) { + return createParametersWithLegacyNodeSelectorSupport(); + } else { + return createParameters(); + } } @Override diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml index 96998a2a6218e..9eebb281795b0 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml @@ -1,8 +1,8 @@ --- "Help": - - skip: - version: " - 7.3.99" + - requires: + cluster_features: "cat_aliases_show_write_index" reason: "is_write_index is shown in cat.aliases starting version 7.4.0" - do: @@ -22,13 +22,10 @@ --- "Help (pre 7.4.0)": - skip: - version: "7.4.0 - " - features: node_selector + cluster_features: "cat_aliases_show_write_index" reason: "is_write_index is shown in cat.aliases starting version 7.4.0" - do: - node_selector: - version: " - 7.3.99" cat.aliases: help: true @@ -54,8 +51,8 @@ --- "Simple alias": - - skip: - version: " - 7.3.99" + - requires: + cluster_features: "cat_aliases_show_write_index" reason: "is_write_index is shown in cat.aliases starting version 7.4.0" - do: @@ -84,8 +81,7 @@ --- "Simple alias (pre 7.4.0)": - skip: - version: "7.4.0 - " - features: node_selector + cluster_features: "cat_aliases_show_write_index" reason: "is_write_index is shown in cat.aliases starting version 7.4.0" - do: @@ -98,8 +94,6 @@ name: test_alias - do: - node_selector: - version: " - 7.3.99" cat.aliases: {} - match: @@ -114,8 +108,8 @@ --- "Complex alias": - - skip: - version: " - 7.3.99" + - requires: + cluster_features: "cat_aliases_show_write_index" reason: "is_write_index is shown in cat.aliases starting version 7.4.0" - do: @@ -155,8 +149,7 @@ --- "Complex alias (pre 7.4.0)": - skip: - version: "7.4.0 - " - features: node_selector + cluster_features: "cat_aliases_show_write_index" reason: "is_write_index is shown in cat.aliases starting version 7.4.0" - do: @@ -179,8 +172,6 @@ term: foo: bar - do: - node_selector: - version: " - 7.3.99" cat.aliases: {} - match: @@ -278,8 +269,8 @@ --- "Column headers": - - skip: - version: " - 7.3.99" + - requires: + cluster_features: "cat_aliases_show_write_index" reason: "is_write_index is shown in cat.aliases starting version 7.4.0" - do: @@ -316,8 +307,7 @@ --- "Column headers (pre 7.4.0)": - skip: - version: "7.4.0 - " - features: node_selector + cluster_features: "cat_aliases_show_write_index" reason: "is_write_index is shown in cat.aliases starting version 7.4.0" - do: @@ -330,8 +320,6 @@ name: test_1 - do: - node_selector: - version: " - 7.3.99" cat.aliases: v: true @@ -385,10 +373,10 @@ --- "Alias against closed index": - - skip: - version: " - 7.3.99" + - requires: + cluster_features: "cat_aliases_show_write_index" reason: "is_write_index is shown in cat.aliases starting version 7.4.0" - features: ["allowed_warnings"] + test_runner_features: ["allowed_warnings"] - do: indices.create: @@ -421,10 +409,12 @@ --- "Alias against closed index (pre 7.4.0)": - skip: - version: "7.4.0 - " - features: ["node_selector", "allowed_warnings"] + cluster_features: "cat_aliases_show_write_index" reason: "is_write_index is shown in cat.aliases starting version 7.4.0" + - requires: + test_runner_features: ["allowed_warnings"] + - do: indices.create: index: test_index @@ -439,8 +429,6 @@ - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" - do: - node_selector: - version: " - 7.3.99" cat.aliases: {} - match: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.allocation/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.allocation/10_basic.yml index ed519438f1b1e..2ba01c3b5711e 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.allocation/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.allocation/10_basic.yml @@ -2,8 +2,8 @@ "Help": - skip: - version: " - 8.9.99" - reason: "node.role column added in 8.10.0" + version: " - 8.13.99" + reason: "shards.undesired, write_load.forecast and disk.indices.forecast are added in 8.14.0" - do: cat.allocation: @@ -11,24 +11,27 @@ - match: $body: | - /^ shards .+ \n - disk.indices .+ \n - disk.used .+ \n - disk.avail .+ \n - disk.total .+ \n - disk.percent .+ \n - host .+ \n - ip .+ \n - node .+ \n - node.role .+ \n + /^ shards .+ \n + shards.undesired .+ \n + write_load.forecast .+ \n + disk.indices.forecast .+ \n + disk.indices .+ \n + disk.used .+ \n + disk.avail .+ \n + disk.total .+ \n + disk.percent .+ \n + host .+ \n + ip .+ \n + node .+ \n + node.role .+ \n $/ --- "One index": - skip: - version: " - 8.11.99" - reason: "node.role column shown by default from 8.12.0 onwards" + version: " - 8.13.99" + reason: "shards.undesired, write_load.forecast and disk.indices.forecast columns are added in 8.14.0" - do: indices.create: @@ -42,6 +45,9 @@ /^ ( \s* #allow leading spaces to account for right-justified text \d+ \s+ + \d+ \s+ + \d+(\.\d+)? \s+ + \d+(\.\d+)?[kmgt]?b \s+ \d+(\.\d+)?[kmgt]?b \s+ \d+(\.\d+)?[kmgt]?b \s+ (\d+(\.\d+)?[kmgt]b \s+) #always should return value since we filter out non data nodes by default @@ -65,8 +71,8 @@ "Node ID": - skip: - version: " - 8.11.99" - reason: "node.role column shown by default from 8.12.0 onwards" + version: " - 8.13.99" + reason: "shards.undesired, write_load.forecast and disk.indices.forecast columns are added in 8.14.0" - do: cat.allocation: @@ -76,6 +82,9 @@ $body: | /^ ( \d+ \s+ #usually 0, unless some system index has been recreated before this runs + \d+ \s+ + \d+(\.\d+)? \s+ + \d+(\.\d+)?[kmgt]?b \s+ \d+(\.\d+)?[kmgt]?b \s+ \d+(\.\d+)?[kmgt]?b \s+ (\d+(\.\d+)?[kmgt]b \s+)? #no value from client nodes @@ -99,12 +108,11 @@ $/ --- - "All Nodes": - skip: - version: " - 8.11.99" - reason: "node.role column shown by default from 8.12.0 onwards" + version: " - 8.13.99" + reason: "shards.undesired, write_load.forecast and disk.indices.forecast columns are added in 8.14.0" - do: cat.allocation: @@ -115,6 +123,9 @@ /^ ( \s* #allow leading spaces to account for right-justified text \d+ \s+ #usually 0, unless some system index has been recreated before this runs + \d+ \s+ + \d+(\.\d+)? \s+ + \d+(\.\d+)?[kmgt]?b \s+ \d+(\.\d+)?[kmgt]?b \s+ \d+(\.\d+)?[kmgt]?b \s+ (\d+(\.\d+)?[kmgt]b \s+)? #no value from client nodes @@ -138,8 +149,8 @@ "Column headers": - skip: - version: " - 8.11.99" - reason: "node.role column shown by default from 8.12.0 onwards" + version: " - 8.13.99" + reason: "shards.undesired, write_load.forecast and disk.indices.forecast columns are added in 8.14.0" - do: cat.allocation: @@ -148,6 +159,9 @@ $body: | /^ shards \s+ + shards.undesired \s+ + write_load.forecast \s+ + disk.indices.forecast \s+ disk.indices \s+ disk.used \s+ disk.avail \s+ @@ -161,6 +175,9 @@ ( \s* #allow leading spaces to account for right-justified text \d+ \s+ #usually 0, unless some system index has been recreated before this runs + \d+ \s+ + \d+(\.\d+)? \s+ + \d+(\.\d+)?[kmgt]?b \s+ \d+(\.\d+)?[kmgt]?b \s+ \d+(\.\d+)?[kmgt]?b \s+ (\d+(\.\d+)?[kmgt]b \s+) #always should return value since we filter out non data nodes by default @@ -211,12 +228,11 @@ --- - "Bytes": - skip: - version: " - 8.11.99" - reason: "node.role column shown by default from 8.12.0 onwards" + version: " - 8.13.99" + reason: "shards.undesired, write_load.forecast and disk.indices.forecast columns are added in 8.14.0" - do: cat.allocation: @@ -226,6 +242,9 @@ $body: | /^ ( \d+ \s+ #usually 0, unless some system index has been recreated before this runs + \d+ \s+ + \d+(\.\d+)? \s+ + \d+ \s+ 0 \s+ \d+ \s+ (\d+ \s+) #always should return value since we filter out non data nodes by default @@ -240,7 +259,6 @@ $/ --- - "Node roles": - skip: @@ -259,3 +277,25 @@ \n )+ $/ + +--- +"Node forecasts": + + - skip: + version: " - 8.13.99" + reason: "write_load.forecast and disk.indices.forecast columns added in 8.14.0" + + - do: + cat.allocation: + h: [node, shards.undesired, write_load.forecast, disk.indices.forecast] + + - match: + $body: | + /^ + ( [-\w.]+ \s+ + [-\w.]+ \s+ + [-\w.]+ \s+ + [\w]+ + \n + )+ + $/ diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/health/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/health/10_basic.yml index 5e6ca8247997c..a000a9eac16ad 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/health/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/health/10_basic.yml @@ -1,10 +1,8 @@ --- "cluster health basic test": - skip: - version: all - reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/90183" - # version: "- 8.3.99" - # reason: "health was only added in 8.2.0, and master_is_stable in 8.4.0" + version: "- 8.6.99" + reason: "health was added in 8.2.0, master_is_stable in 8.4.0, and REST API updated in 8.7" - do: health_report: { } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_field_mapping/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_field_mapping/10_basic.yml index 350e9ff37f43b..1878ae0997649 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_field_mapping/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_field_mapping/10_basic.yml @@ -55,12 +55,12 @@ setup: "Get field mapping with local parameter should fail": - skip: - features: ["warnings", "node_selector"] + features: ["warnings"] + version: " - 7.99.99" + reason: "local parameter for get field mapping API was allowed before v8" - do: catch: bad_request - node_selector: - version: "8.0.0 - " indices.get_field_mapping: fields: text local: true diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.open/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.open/10_basic.yml index 93447612406b9..76dfa552b5630 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.open/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.open/10_basic.yml @@ -127,7 +127,7 @@ - skip: version: " - 7.99.99" reason: "required deprecation warning is only emitted in 8.0 and later" - features: ["warnings", "node_selector"] + features: ["warnings"] - do: indices.create: @@ -140,7 +140,5 @@ indices.close: index: "index_*" wait_for_active_shards: index-setting - node_selector: - version: "8.0.0 - " warnings: - "?wait_for_active_shards=index-setting is now the default behaviour; the 'index-setting' value for this parameter should no longer be used since it will become unsupported in version 9" diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_mapping/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_mapping/10_basic.yml index db3d2f349dcef..ef121411d8351 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_mapping/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_mapping/10_basic.yml @@ -96,7 +96,7 @@ - do: node_selector: - version: " - 7.99.99" + version: "original" catch: bad_request indices.put_mapping: index: test_index diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/80_allocation_stats.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/80_allocation_stats.yml new file mode 100644 index 0000000000000..a2e1117073cde --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/80_allocation_stats.yml @@ -0,0 +1,22 @@ +--- +"Allocation stats": + - skip: + version: " - 8.13.99" + reason: "allocation stats was added in 8.14.0" + features: [arbitrary_key] + + - do: + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id + + - do: + nodes.stats: + metric: [ allocations ] + + - exists: nodes.$node_id.allocations + - exists: nodes.$node_id.allocations.shards + - exists: nodes.$node_id.allocations.undesired_shards + - exists: nodes.$node_id.allocations.forecasted_ingest_load + - exists: nodes.$node_id.allocations.forecasted_disk_usage_in_bytes + - exists: nodes.$node_id.allocations.current_disk_usage_in_bytes diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/100_knn_nested_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/100_knn_nested_search.yml index c69e22d274c8e..6c6c75990b0f5 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/100_knn_nested_search.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/100_knn_nested_search.yml @@ -323,3 +323,87 @@ setup: - match: {hits.total.value: 3} - is_true : profile +--- +"nested kNN search with filter that might match nested docs": + - skip: + version: ' - 8.13.99' + reason: 'bugfix for matching non-nested docs in 8.14' + + - do: + indices.create: + index: nested_text + body: + mappings: + properties: + range: + type: long + other_nested_thing: + type: nested + properties: + text: + type: text + paragraphs: + type: nested + properties: + other_nested_thing: + type: nested + properties: + text: + type: text + vector: + type: dense_vector + dims: 2 + index: true + similarity: cosine + vector: + type: dense_vector + dims: 2 + index: true + similarity: cosine + - do: + index: + index: nested_text + id: "1" + body: + publish_date: "1" + paragraphs: + - vector: [1, 1] + text: "some text" + - vector: [1, 2] + text: "some text" + other_nested_thing: + - text: "some text" + vector: [1, 2] + - do: + index: + index: nested_text + id: "2" + body: + paragraphs: + - vector: [2, 1] + text: "some text" + - vector: [2, 2] + text: "some text" + other_nested_thing: + - text: "some text" + vector: [ 1, 2 ] + - do: + indices.refresh: {} + + - do: + search: + index: nested_text + body: + knn: + field: paragraphs.vector + query_vector: [1, 2] + num_candidates: 10 + k: 10 + filter: + bool: + must_not: + exists: + field: publish_date + + - match: {hits.total.value: 1} + - match: {hits.hits.0._id: "2"} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/130_knn_query_nested_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/130_knn_query_nested_search.yml index 5d07c0c8b5f9d..53cc7eb064270 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/130_knn_query_nested_search.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/130_knn_query_nested_search.yml @@ -319,3 +319,90 @@ setup: # Rabbit only has one passage vector - match: {hits.hits.4.fields.name.0: "rabbit.jpg"} - length: { hits.hits.4.inner_hits.nested.hits.hits: 1 } +--- +"nested kNN query search with filter that might match nested docs": + - skip: + version: ' - 8.13.99' + reason: 'bugfix for matching non-nested docs in 8.14' + + - do: + indices.create: + index: nested_text + body: + mappings: + properties: + range: + type: long + other_nested_thing: + type: nested + properties: + text: + type: text + paragraphs: + type: nested + properties: + other_nested_thing: + type: nested + properties: + text: + type: text + vector: + type: dense_vector + dims: 2 + index: true + similarity: cosine + vector: + type: dense_vector + dims: 2 + index: true + similarity: cosine + - do: + index: + index: nested_text + id: "1" + body: + publish_date: "1" + paragraphs: + - vector: [1, 1] + text: "some text" + - vector: [1, 2] + text: "some text" + other_nested_thing: + - text: "some text" + vector: [1, 2] + - do: + index: + index: nested_text + id: "2" + body: + paragraphs: + - vector: [2, 1] + text: "some text" + - vector: [2, 2] + text: "some text" + other_nested_thing: + - text: "some text" + vector: [ 1, 2 ] + - do: + indices.refresh: {} + + - do: + search: + index: nested_text + body: + query: + nested: + path: paragraphs + query: + knn: + field: paragraphs.vector + query_vector: [1, 2] + num_candidates: 10 + filter: + bool: + must_not: + exists: + field: publish_date + + - match: {hits.total.value: 1} + - match: {hits.hits.0._id: "2"} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateShardPathIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateShardPathIT.java index 3a1fa8e5da272..560a525ec526c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateShardPathIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateShardPathIT.java @@ -9,14 +9,18 @@ package org.elasticsearch.cluster; import org.apache.lucene.tests.util.LuceneTestCase; +import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainRequest; +import org.elasticsearch.action.admin.cluster.allocation.TransportClusterAllocationExplainAction; import org.elasticsearch.action.admin.cluster.node.shutdown.NodePrevalidateShardPathResponse; import org.elasticsearch.action.admin.cluster.node.shutdown.PrevalidateShardPathRequest; import org.elasticsearch.action.admin.cluster.node.shutdown.PrevalidateShardPathResponse; import org.elasticsearch.action.admin.cluster.node.shutdown.TransportPrevalidateShardPathAction; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESIntegTestCase; @@ -77,7 +81,31 @@ public void testCheckShards() throws Exception { assertThat(resp2.getNodes().size(), equalTo(1)); assertThat(resp2.getNodes().get(0).getNode().getId(), equalTo(node2Id)); assertTrue("There should be no failures in the response", resp.failures().isEmpty()); - assertTrue("The relocation source node should have removed the shard(s)", resp2.getNodes().get(0).getShardIds().isEmpty()); + Set node2ShardIds = resp2.getNodes().get(0).getShardIds(); + if (node2ShardIds.size() > 0) { + for (var node2Shard : clusterService().state() + .routingTable() + .allShards() + .filter(s -> s.getIndexName().equals(indexName)) + .filter(s -> node2ShardIds.contains(s.shardId())) + .filter(s -> s.currentNodeId().equals(node2Id)) + .toList()) { + var explanation = client().execute( + TransportClusterAllocationExplainAction.TYPE, + new ClusterAllocationExplainRequest().setIndex(node2Shard.getIndexName()) + .setCurrentNode(node2Shard.currentNodeId()) + .setShard(node2Shard.id()) + .setPrimary(node2Shard.primary()) + ).get(); + logger.info( + "Shard: {} is still located on relocation source node: {}. Allocation explanation: {}", + node2Shard.shardId(), + node2, + Strings.toString(ChunkedToXContent.wrapAsToXContent(explanation), false, true) + ); + } + throw new AssertionError("The relocation source node should have removed the shard(s)"); + } } catch (AssertionError e) { // Removal of shards which are no longer allocated to the node is attempted on every cluster state change in IndicesStore. // If for whatever reason the removal is not triggered (e.g. not enough nodes reported that the shards are active) or it diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java index 7b9f89b60ed94..d664d4ab352d9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java @@ -34,9 +34,7 @@ import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.junit.annotations.TestIssueLogging; -import org.hamcrest.Description; import org.hamcrest.Matcher; -import org.hamcrest.TypeSafeMatcher; import java.util.Arrays; import java.util.Comparator; @@ -51,8 +49,10 @@ import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING; import static org.elasticsearch.index.store.Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.in; import static org.hamcrest.Matchers.is; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) @@ -95,7 +95,7 @@ public void testHighWatermarkNotExceeded() throws Exception { // increase disk size of node 0 to allow just enough room for one shard, and check that it's rebalanced back getTestFileStore(dataNodeName).setTotalSpace(shardSizes.getSmallestShardSize() + WATERMARK_BYTES); - assertBusyWithDiskUsageRefresh(dataNode0Id, indexName, new ContainsExactlyOneOf<>(shardSizes.getSmallestShardIds())); + assertBusyWithDiskUsageRefresh(dataNode0Id, indexName, contains(in(shardSizes.getSmallestShardIds()))); } public void testRestoreSnapshotAllocationDoesNotExceedWatermark() throws Exception { @@ -158,11 +158,12 @@ public void testRestoreSnapshotAllocationDoesNotExceedWatermark() throws Excepti // increase disk size of node 0 to allow just enough room for one shard, and check that it's rebalanced back getTestFileStore(dataNodeName).setTotalSpace(shardSizes.getSmallestShardSize() + WATERMARK_BYTES); - assertBusyWithDiskUsageRefresh(dataNode0Id, indexName, new ContainsExactlyOneOf<>(shardSizes.getSmallestShardIds())); + assertBusyWithDiskUsageRefresh(dataNode0Id, indexName, contains(in(shardSizes.getSmallestShardIds()))); } @TestIssueLogging( - value = "org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceReconciler:DEBUG," + value = "org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceComputer:TRACE," + + "org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceReconciler:DEBUG," + "org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceShardsAllocator:TRACE", issueUrl = "https://github.com/elastic/elasticsearch/issues/105331" ) @@ -220,11 +221,7 @@ public void testRestoreSnapshotAllocationDoesNotExceedWatermarkWithMultipleShard assertThat(restoreInfo.successfulShards(), is(snapshotInfo.totalShards())); assertThat(restoreInfo.failedShards(), is(0)); - assertBusyWithDiskUsageRefresh( - dataNode0Id, - indexName, - new ContainsExactlyOneOf<>(shardSizes.getShardIdsWithSizeSmallerOrEqual(usableSpace)) - ); + assertBusyWithDiskUsageRefresh(dataNode0Id, indexName, contains(in(shardSizes.getShardIdsWithSizeSmallerOrEqual(usableSpace)))); } private Set getShardIds(final String nodeId, final String indexName) { @@ -345,23 +342,4 @@ private void assertBusyWithDiskUsageRefresh(String nodeId, String indexName, Mat private InternalClusterInfoService getInternalClusterInfoService() { return (InternalClusterInfoService) internalCluster().getCurrentMasterNodeInstance(ClusterInfoService.class); } - - private static final class ContainsExactlyOneOf extends TypeSafeMatcher> { - - private final Set expectedValues; - - ContainsExactlyOneOf(Set expectedValues) { - this.expectedValues = expectedValues; - } - - @Override - protected boolean matchesSafely(Set item) { - return item.size() == 1 && expectedValues.contains(item.iterator().next()); - } - - @Override - public void describeTo(Description description) { - description.appendText("Expected to contain exactly one value from ").appendValueList("[", ",", "]", expectedValues); - } - } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index 5f3b854b74fb4..d1827bf49410f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.gateway; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; @@ -32,6 +31,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.env.BuildVersion; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.NodeMetadata; import org.elasticsearch.index.IndexVersions; @@ -564,7 +564,7 @@ public void testHalfDeletedIndexImport() throws Exception { .putCustom(IndexGraveyard.TYPE, IndexGraveyard.builder().addTombstone(metadata.index("test").getIndex()).build()) .build() ); - NodeMetadata.FORMAT.writeAndCleanup(new NodeMetadata(nodeId, Version.CURRENT, metadata.oldestIndexVersion()), paths); + NodeMetadata.FORMAT.writeAndCleanup(new NodeMetadata(nodeId, BuildVersion.current(), metadata.oldestIndexVersion()), paths); }); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTaskInitializationFailureIT.java b/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTaskInitializationFailureIT.java index b8d9d4a184f06..ec193a37eeab7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTaskInitializationFailureIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTaskInitializationFailureIT.java @@ -49,6 +49,7 @@ public void testPersistentTasksThatFailDuringInitializationAreRemovedFromCluster UUIDs.base64UUID(), FailingInitializationPersistentTaskExecutor.TASK_NAME, new FailingInitializationTaskParams(), + null, startPersistentTaskFuture ); startPersistentTaskFuture.actionGet(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorFullRestartIT.java b/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorFullRestartIT.java index d1c72a9650b85..73c9495a2cd2f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorFullRestartIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorFullRestartIT.java @@ -44,7 +44,7 @@ public void testFullClusterRestart() throws Exception { PlainActionFuture> future = new PlainActionFuture<>(); futures.add(future); taskIds[i] = UUIDs.base64UUID(); - service.sendStartRequest(taskIds[i], TestPersistentTasksExecutor.NAME, new TestParams("Blah"), future); + service.sendStartRequest(taskIds[i], TestPersistentTasksExecutor.NAME, new TestParams("Blah"), null, future); } for (int i = 0; i < numberOfTasks; i++) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java index 3cc90a6795e37..813c06d9f02f3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java @@ -68,7 +68,7 @@ public static class WaitForPersistentTaskFuture> future = new PlainActionFuture<>(); - persistentTasksService.sendStartRequest(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, new TestParams("Blah"), future); + persistentTasksService.sendStartRequest(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, new TestParams("Blah"), null, future); long allocationId = future.get().getAllocationId(); waitForTaskToStart(); TaskInfo firstRunningTask = clusterAdmin().prepareListTasks() @@ -99,7 +99,7 @@ public void testPersistentActionCompletion() throws Exception { PersistentTasksService persistentTasksService = internalCluster().getInstance(PersistentTasksService.class); PlainActionFuture> future = new PlainActionFuture<>(); String taskId = UUIDs.base64UUID(); - persistentTasksService.sendStartRequest(taskId, TestPersistentTasksExecutor.NAME, new TestParams("Blah"), future); + persistentTasksService.sendStartRequest(taskId, TestPersistentTasksExecutor.NAME, new TestParams("Blah"), null, future); long allocationId = future.get().getAllocationId(); waitForTaskToStart(); TaskInfo firstRunningTask = clusterAdmin().prepareListTasks() @@ -118,7 +118,7 @@ public void testPersistentActionCompletion() throws Exception { logger.info("Simulating errant completion notification"); // try sending completion request with incorrect allocation id PlainActionFuture> failedCompletionNotificationFuture = new PlainActionFuture<>(); - persistentTasksService.sendCompletionRequest(taskId, Long.MAX_VALUE, null, null, failedCompletionNotificationFuture); + persistentTasksService.sendCompletionRequest(taskId, Long.MAX_VALUE, null, null, null, failedCompletionNotificationFuture); assertFutureThrows(failedCompletionNotificationFuture, ResourceNotFoundException.class); // Make sure that the task is still running assertThat( @@ -140,7 +140,7 @@ public void testPersistentActionWithNoAvailableNode() throws Exception { PlainActionFuture> future = new PlainActionFuture<>(); TestParams testParams = new TestParams("Blah"); testParams.setExecutorNodeAttr("test"); - persistentTasksService.sendStartRequest(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, testParams, future); + persistentTasksService.sendStartRequest(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, testParams, null, future); String taskId = future.get().getId(); Settings nodeSettings = Settings.builder().put(nodeSettings(0, Settings.EMPTY)).put("node.attr.test_attr", "test").build(); @@ -164,7 +164,7 @@ public void testPersistentActionWithNoAvailableNode() throws Exception { // Remove the persistent task PlainActionFuture> removeFuture = new PlainActionFuture<>(); - persistentTasksService.sendRemoveRequest(taskId, removeFuture); + persistentTasksService.sendRemoveRequest(taskId, null, removeFuture); assertEquals(removeFuture.get().getId(), taskId); } @@ -181,7 +181,7 @@ public void testPersistentActionWithNonClusterStateCondition() throws Exception PersistentTasksService persistentTasksService = internalCluster().getInstance(PersistentTasksService.class); PlainActionFuture> future = new PlainActionFuture<>(); TestParams testParams = new TestParams("Blah"); - persistentTasksService.sendStartRequest(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, testParams, future); + persistentTasksService.sendStartRequest(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, testParams, null, future); String taskId = future.get().getId(); assertThat(clusterAdmin().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get().getTasks(), empty()); @@ -196,14 +196,14 @@ public void testPersistentActionWithNonClusterStateCondition() throws Exception // Remove the persistent task PlainActionFuture> removeFuture = new PlainActionFuture<>(); - persistentTasksService.sendRemoveRequest(taskId, removeFuture); + persistentTasksService.sendRemoveRequest(taskId, null, removeFuture); assertEquals(removeFuture.get().getId(), taskId); } public void testPersistentActionStatusUpdate() throws Exception { PersistentTasksService persistentTasksService = internalCluster().getInstance(PersistentTasksService.class); PlainActionFuture> future = new PlainActionFuture<>(); - persistentTasksService.sendStartRequest(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, new TestParams("Blah"), future); + persistentTasksService.sendStartRequest(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, new TestParams("Blah"), null, future); String taskId = future.get().getId(); waitForTaskToStart(); TaskInfo firstRunningTask = clusterAdmin().prepareListTasks() @@ -249,7 +249,7 @@ public void testPersistentActionStatusUpdate() throws Exception { assertFutureThrows(future1, IllegalStateException.class, "timed out after 10ms"); PlainActionFuture> failedUpdateFuture = new PlainActionFuture<>(); - persistentTasksService.sendUpdateStateRequest(taskId, -2, new State("should fail"), failedUpdateFuture); + persistentTasksService.sendUpdateStateRequest(taskId, -2, new State("should fail"), null, failedUpdateFuture); assertFutureThrows( failedUpdateFuture, ResourceNotFoundException.class, @@ -274,11 +274,11 @@ public void testCreatePersistentTaskWithDuplicateId() throws Exception { PersistentTasksService persistentTasksService = internalCluster().getInstance(PersistentTasksService.class); PlainActionFuture> future = new PlainActionFuture<>(); String taskId = UUIDs.base64UUID(); - persistentTasksService.sendStartRequest(taskId, TestPersistentTasksExecutor.NAME, new TestParams("Blah"), future); + persistentTasksService.sendStartRequest(taskId, TestPersistentTasksExecutor.NAME, new TestParams("Blah"), null, future); future.get(); PlainActionFuture> future2 = new PlainActionFuture<>(); - persistentTasksService.sendStartRequest(taskId, TestPersistentTasksExecutor.NAME, new TestParams("Blah"), future2); + persistentTasksService.sendStartRequest(taskId, TestPersistentTasksExecutor.NAME, new TestParams("Blah"), null, future2); assertFutureThrows(future2, ResourceAlreadyExistsException.class); waitForTaskToStart(); @@ -314,7 +314,7 @@ public void testUnassignRunningPersistentTask() throws Exception { PlainActionFuture> future = new PlainActionFuture<>(); TestParams testParams = new TestParams("Blah"); testParams.setExecutorNodeAttr("test"); - persistentTasksService.sendStartRequest(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, testParams, future); + persistentTasksService.sendStartRequest(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, testParams, null, future); PersistentTask task = future.get(); String taskId = task.getId(); @@ -365,7 +365,7 @@ public void testAbortLocally() throws Exception { persistentTasksClusterService.setRecheckInterval(TimeValue.timeValueMillis(1)); PersistentTasksService persistentTasksService = internalCluster().getInstance(PersistentTasksService.class); PlainActionFuture> future = new PlainActionFuture<>(); - persistentTasksService.sendStartRequest(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, new TestParams("Blah"), future); + persistentTasksService.sendStartRequest(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, new TestParams("Blah"), null, future); String taskId = future.get().getId(); long allocationId = future.get().getAllocationId(); waitForTaskToStart(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java b/server/src/internalClusterTest/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java index d9aa15ed6e2f5..e7d23f97fc992 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java @@ -51,6 +51,7 @@ public void testEnableAssignmentAfterRestart() throws Exception { "task_" + i, TestPersistentTasksExecutor.NAME, new TestParams(randomAlphaOfLength(10)), + null, ActionListener.running(latch::countDown) ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RandomSamplerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RandomSamplerIT.java index 28c186c559dff..c9a6cfaf754c6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RandomSamplerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RandomSamplerIT.java @@ -24,6 +24,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThan; @ESIntegTestCase.SuiteScopeTestCase @@ -84,6 +85,49 @@ public void setupSuiteScopeCluster() throws Exception { ensureSearchable(); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105839") + public void testRandomSamplerConsistentSeed() { + double[] sampleMonotonicValue = new double[1]; + double[] sampleNumericValue = new double[1]; + long[] sampledDocCount = new long[1]; + // initialize the values + assertResponse( + prepareSearch("idx").setPreference("shard:0") + .addAggregation( + new RandomSamplerAggregationBuilder("sampler").setProbability(PROBABILITY) + .setSeed(0) + .subAggregation(avg("mean_monotonic").field(MONOTONIC_VALUE)) + .subAggregation(avg("mean_numeric").field(NUMERIC_VALUE)) + .setShardSeed(42) + ), + response -> { + InternalRandomSampler sampler = response.getAggregations().get("sampler"); + sampleMonotonicValue[0] = ((Avg) sampler.getAggregations().get("mean_monotonic")).getValue(); + sampleNumericValue[0] = ((Avg) sampler.getAggregations().get("mean_numeric")).getValue(); + sampledDocCount[0] = sampler.getDocCount(); + } + ); + + for (int i = 0; i < NUM_SAMPLE_RUNS; i++) { + assertResponse( + prepareSearch("idx").setPreference("shard:0") + .addAggregation( + new RandomSamplerAggregationBuilder("sampler").setProbability(PROBABILITY) + .setSeed(0) + .subAggregation(avg("mean_monotonic").field(MONOTONIC_VALUE)) + .subAggregation(avg("mean_numeric").field(NUMERIC_VALUE)) + .setShardSeed(42) + ), + response -> { + InternalRandomSampler sampler = response.getAggregations().get("sampler"); + assertThat(((Avg) sampler.getAggregations().get("mean_monotonic")).getValue(), equalTo(sampleMonotonicValue[0])); + assertThat(((Avg) sampler.getAggregations().get("mean_numeric")).getValue(), equalTo(sampleNumericValue[0])); + assertThat(sampler.getDocCount(), equalTo(sampledDocCount[0])); + } + ); + } + } + public void testRandomSampler() { double[] sampleMonotonicValue = new double[1]; double[] sampleNumericValue = new double[1]; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPointScriptDocValuesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPointScriptDocValuesIT.java index 3b2d266e77cda..c62f4932220fc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPointScriptDocValuesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPointScriptDocValuesIT.java @@ -21,8 +21,7 @@ import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; -import org.hamcrest.BaseMatcher; -import org.hamcrest.Description; +import org.hamcrest.Matcher; import org.hamcrest.Matchers; import org.junit.Before; @@ -33,12 +32,12 @@ import java.util.HashMap; import java.util.Map; import java.util.function.Function; +import java.util.stream.IntStream; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.oneOf; public class GeoPointScriptDocValuesIT extends ESSingleNodeTestCase { @@ -255,28 +254,8 @@ public void testNullPoint() throws Exception { ); } - private static MultiPointLabelPosition isMultiPointLabelPosition(double[] lats, double[] lons) { - return new MultiPointLabelPosition(lats, lons); - } - - private static class MultiPointLabelPosition extends BaseMatcher { - private final GeoPoint[] points; - - private MultiPointLabelPosition(double[] lats, double[] lons) { - points = new GeoPoint[lats.length]; - for (int i = 0; i < lats.length; i++) { - points[i] = new GeoPoint(lats[i], lons[i]); - } - } - - @Override - public boolean matches(Object actual) { - return is(oneOf(points)).matches(actual); - } - - @Override - public void describeTo(Description description) { - description.appendText("is(oneOf(" + Arrays.toString(points) + ")"); - } + private static Matcher isMultiPointLabelPosition(double[] lats, double[] lons) { + assert lats.length == lons.length; + return oneOf(IntStream.range(0, lats.length).mapToObj(i -> new GeoPoint(lats[i], lons[i])).toArray(GeoPoint[]::new)); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java index 6b5b3826272ce..a04d1a5c8b02d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequestBuilder; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; +import org.elasticsearch.action.admin.cluster.snapshots.get.SnapshotSortKey; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.RepositoryMissingException; @@ -84,39 +85,31 @@ private void doTestSortOrder(String repoName, Collection allSnapshotName final List defaultSorting = clusterAdmin().prepareGetSnapshots(repoName).setOrder(order).get().getSnapshots(); assertSnapshotListSorted(defaultSorting, null, order); final String[] repos = { repoName }; + assertSnapshotListSorted(allSnapshotsSorted(allSnapshotNames, repos, SnapshotSortKey.NAME, order), SnapshotSortKey.NAME, order); assertSnapshotListSorted( - allSnapshotsSorted(allSnapshotNames, repos, GetSnapshotsRequest.SortBy.NAME, order), - GetSnapshotsRequest.SortBy.NAME, + allSnapshotsSorted(allSnapshotNames, repos, SnapshotSortKey.DURATION, order), + SnapshotSortKey.DURATION, order ); assertSnapshotListSorted( - allSnapshotsSorted(allSnapshotNames, repos, GetSnapshotsRequest.SortBy.DURATION, order), - GetSnapshotsRequest.SortBy.DURATION, + allSnapshotsSorted(allSnapshotNames, repos, SnapshotSortKey.INDICES, order), + SnapshotSortKey.INDICES, order ); assertSnapshotListSorted( - allSnapshotsSorted(allSnapshotNames, repos, GetSnapshotsRequest.SortBy.INDICES, order), - GetSnapshotsRequest.SortBy.INDICES, + allSnapshotsSorted(allSnapshotNames, repos, SnapshotSortKey.START_TIME, order), + SnapshotSortKey.START_TIME, order ); + assertSnapshotListSorted(allSnapshotsSorted(allSnapshotNames, repos, SnapshotSortKey.SHARDS, order), SnapshotSortKey.SHARDS, order); assertSnapshotListSorted( - allSnapshotsSorted(allSnapshotNames, repos, GetSnapshotsRequest.SortBy.START_TIME, order), - GetSnapshotsRequest.SortBy.START_TIME, + allSnapshotsSorted(allSnapshotNames, repos, SnapshotSortKey.FAILED_SHARDS, order), + SnapshotSortKey.FAILED_SHARDS, order ); assertSnapshotListSorted( - allSnapshotsSorted(allSnapshotNames, repos, GetSnapshotsRequest.SortBy.SHARDS, order), - GetSnapshotsRequest.SortBy.SHARDS, - order - ); - assertSnapshotListSorted( - allSnapshotsSorted(allSnapshotNames, repos, GetSnapshotsRequest.SortBy.FAILED_SHARDS, order), - GetSnapshotsRequest.SortBy.FAILED_SHARDS, - order - ); - assertSnapshotListSorted( - allSnapshotsSorted(allSnapshotNames, repos, GetSnapshotsRequest.SortBy.REPOSITORY, order), - GetSnapshotsRequest.SortBy.REPOSITORY, + allSnapshotsSorted(allSnapshotNames, repos, SnapshotSortKey.REPOSITORY, order), + SnapshotSortKey.REPOSITORY, order ); } @@ -127,7 +120,7 @@ public void testResponseSizeLimit() throws Exception { createRepository(repoName, "fs", repoPath); maybeInitWithOldSnapshotVersion(repoName, repoPath); final List names = createNSnapshots(repoName, randomIntBetween(6, 20)); - for (GetSnapshotsRequest.SortBy sort : GetSnapshotsRequest.SortBy.values()) { + for (SnapshotSortKey sort : SnapshotSortKey.values()) { for (SortOrder order : SortOrder.values()) { logger.info("--> testing pagination for [{}] [{}]", sort, order); doTestPagination(repoName, names, sort, order); @@ -135,7 +128,7 @@ public void testResponseSizeLimit() throws Exception { } } - private void doTestPagination(String repoName, List names, GetSnapshotsRequest.SortBy sort, SortOrder order) { + private void doTestPagination(String repoName, List names, SnapshotSortKey sort, SortOrder order) { final String[] repos = { repoName }; final List allSnapshotsSorted = allSnapshotsSorted(names, repos, sort, order); final GetSnapshotsResponse batch1 = sortedWithLimit(repos, sort, null, 2, order); @@ -191,9 +184,9 @@ public void testSortAndPaginateWithInProgress() throws Exception { ) ); final String[] repos = { repoName }; - assertStablePagination(repos, allSnapshotNames, GetSnapshotsRequest.SortBy.START_TIME); - assertStablePagination(repos, allSnapshotNames, GetSnapshotsRequest.SortBy.NAME); - assertStablePagination(repos, allSnapshotNames, GetSnapshotsRequest.SortBy.INDICES); + assertStablePagination(repos, allSnapshotNames, SnapshotSortKey.START_TIME); + assertStablePagination(repos, allSnapshotNames, SnapshotSortKey.NAME); + assertStablePagination(repos, allSnapshotNames, SnapshotSortKey.INDICES); final List currentSnapshots = clusterAdmin().prepareGetSnapshots(matchAllPattern()) .setSnapshots(GetSnapshotsRequest.CURRENT_SNAPSHOT) .get() @@ -215,9 +208,9 @@ public void testSortAndPaginateWithInProgress() throws Exception { assertSuccessful(inProgressSnapshot); } - assertStablePagination(repos, allSnapshotNames, GetSnapshotsRequest.SortBy.START_TIME); - assertStablePagination(repos, allSnapshotNames, GetSnapshotsRequest.SortBy.NAME); - assertStablePagination(repos, allSnapshotNames, GetSnapshotsRequest.SortBy.INDICES); + assertStablePagination(repos, allSnapshotNames, SnapshotSortKey.START_TIME); + assertStablePagination(repos, allSnapshotNames, SnapshotSortKey.NAME); + assertStablePagination(repos, allSnapshotNames, SnapshotSortKey.INDICES); } public void testPaginationRequiresVerboseListing() throws Exception { @@ -228,14 +221,14 @@ public void testPaginationRequiresVerboseListing() throws Exception { ActionRequestValidationException.class, clusterAdmin().prepareGetSnapshots(repoName) .setVerbose(false) - .setSort(GetSnapshotsRequest.SortBy.DURATION) + .setSort(SnapshotSortKey.DURATION) .setSize(GetSnapshotsRequest.NO_LIMIT) ); expectThrows( ActionRequestValidationException.class, clusterAdmin().prepareGetSnapshots(repoName) .setVerbose(false) - .setSort(GetSnapshotsRequest.SortBy.START_TIME) + .setSort(SnapshotSortKey.START_TIME) .setSize(randomIntBetween(1, 100)) ); } @@ -258,16 +251,11 @@ public void testExcludePatterns() throws Exception { allSnapshotNames.addAll(namesOtherRepo); final SortOrder order = SortOrder.DESC; - final List allSorted = allSnapshotsSorted( - allSnapshotNames, - new String[] { "*" }, - GetSnapshotsRequest.SortBy.REPOSITORY, - order - ); + final List allSorted = allSnapshotsSorted(allSnapshotNames, new String[] { "*" }, SnapshotSortKey.REPOSITORY, order); final List allSortedWithoutOther = allSnapshotsSorted( allSnapshotNamesWithoutOther, new String[] { "*", "-" + otherRepo }, - GetSnapshotsRequest.SortBy.REPOSITORY, + SnapshotSortKey.REPOSITORY, order ); assertThat(allSortedWithoutOther, is(allSorted.subList(0, allSnapshotNamesWithoutOther.size()))); @@ -275,7 +263,7 @@ public void testExcludePatterns() throws Exception { final List allInOther = allSnapshotsSorted( namesOtherRepo, new String[] { "*", "-test-repo-*" }, - GetSnapshotsRequest.SortBy.REPOSITORY, + SnapshotSortKey.REPOSITORY, order ); assertThat(allInOther, is(allSorted.subList(allSnapshotNamesWithoutOther.size(), allSorted.size()))); @@ -289,7 +277,7 @@ public void testExcludePatterns() throws Exception { final List allInOtherWithoutOtherPrefix = allSnapshotsSorted( namesOtherRepo, patternOtherRepo, - GetSnapshotsRequest.SortBy.REPOSITORY, + SnapshotSortKey.REPOSITORY, order, "-other*" ); @@ -298,7 +286,7 @@ public void testExcludePatterns() throws Exception { final List allInOtherWithoutOtherExplicit = allSnapshotsSorted( namesOtherRepo, patternOtherRepo, - GetSnapshotsRequest.SortBy.REPOSITORY, + SnapshotSortKey.REPOSITORY, order, "-" + otherPrefixSnapshot1, "-" + otherPrefixSnapshot2 @@ -345,7 +333,7 @@ public void testNamesStartingInDash() { final SnapshotInfo weirdSnapshot2InWeird2 = createFullSnapshot(weirdRepo2, weirdSnapshot2); final List allSnapshots = clusterAdmin().prepareGetSnapshots(matchAllPattern()) - .setSort(GetSnapshotsRequest.SortBy.REPOSITORY) + .setSort(SnapshotSortKey.REPOSITORY) .get() .getSnapshots(); assertThat(allSnapshots, hasSize(9)); @@ -407,11 +395,7 @@ public void testNamesStartingInDash() { } private List getAllByPatterns(String[] repos, String[] snapshots) { - return clusterAdmin().prepareGetSnapshots(repos) - .setSnapshots(snapshots) - .setSort(GetSnapshotsRequest.SortBy.REPOSITORY) - .get() - .getSnapshots(); + return clusterAdmin().prepareGetSnapshots(repos).setSnapshots(snapshots).setSort(SnapshotSortKey.REPOSITORY).get().getSnapshots(); } public void testFilterBySLMPolicy() throws Exception { @@ -420,7 +404,7 @@ public void testFilterBySLMPolicy() throws Exception { createNSnapshots(repoName, randomIntBetween(1, 5)); final List snapshotsWithoutPolicy = clusterAdmin().prepareGetSnapshots(matchAllPattern()) .setSnapshots(matchAllPattern()) - .setSort(GetSnapshotsRequest.SortBy.NAME) + .setSort(SnapshotSortKey.NAME) .get() .getSnapshots(); final String snapshotWithPolicy = "snapshot-with-policy"; @@ -456,7 +440,7 @@ public void testFilterBySLMPolicy() throws Exception { final List allSnapshots = clusterAdmin().prepareGetSnapshots(matchAllPattern()) .setSnapshots(matchAllPattern()) - .setSort(GetSnapshotsRequest.SortBy.NAME) + .setSort(SnapshotSortKey.NAME) .get() .getSnapshots(); assertThat(getAllSnapshotsForPolicies(GetSnapshotsRequest.NO_POLICY_PATTERN, policyName, otherPolicyName), is(allSnapshots)); @@ -477,7 +461,7 @@ public void testSortAfter() throws Exception { final List allSnapshotInfo = clusterAdmin().prepareGetSnapshots(matchAllPattern()) .setSnapshots(matchAllPattern()) - .setSort(GetSnapshotsRequest.SortBy.START_TIME) + .setSort(SnapshotSortKey.START_TIME) .get() .getSnapshots(); assertThat(allSnapshotInfo, is(List.of(snapshot1, snapshot2, snapshot3))); @@ -504,7 +488,7 @@ public void testSortAfter() throws Exception { final List allSnapshotInfoDesc = clusterAdmin().prepareGetSnapshots(matchAllPattern()) .setSnapshots(matchAllPattern()) - .setSort(GetSnapshotsRequest.SortBy.START_TIME) + .setSort(SnapshotSortKey.START_TIME) .setOrder(SortOrder.DESC) .get() .getSnapshots(); @@ -525,7 +509,7 @@ public void testSortAfter() throws Exception { final List allSnapshotInfoByDuration = clusterAdmin().prepareGetSnapshots(matchAllPattern()) .setSnapshots(matchAllPattern()) - .setSort(GetSnapshotsRequest.SortBy.DURATION) + .setSort(SnapshotSortKey.DURATION) .get() .getSnapshots(); @@ -541,7 +525,7 @@ public void testSortAfter() throws Exception { final List allSnapshotInfoByDurationDesc = clusterAdmin().prepareGetSnapshots(matchAllPattern()) .setSnapshots(matchAllPattern()) - .setSort(GetSnapshotsRequest.SortBy.DURATION) + .setSort(SnapshotSortKey.DURATION) .setOrder(SortOrder.DESC) .get() .getSnapshots(); @@ -554,12 +538,12 @@ public void testSortAfter() throws Exception { final SnapshotInfo otherSnapshot = createFullSnapshot(repoName, "other-snapshot"); - assertThat(allSnapshots(new String[] { "snap*" }, GetSnapshotsRequest.SortBy.NAME, SortOrder.ASC, "a"), is(allSnapshotInfo)); - assertThat(allSnapshots(new String[] { "o*" }, GetSnapshotsRequest.SortBy.NAME, SortOrder.ASC, "a"), is(List.of(otherSnapshot))); + assertThat(allSnapshots(new String[] { "snap*" }, SnapshotSortKey.NAME, SortOrder.ASC, "a"), is(allSnapshotInfo)); + assertThat(allSnapshots(new String[] { "o*" }, SnapshotSortKey.NAME, SortOrder.ASC, "a"), is(List.of(otherSnapshot))); final GetSnapshotsResponse paginatedResponse = clusterAdmin().prepareGetSnapshots(matchAllPattern()) .setSnapshots("snap*") - .setSort(GetSnapshotsRequest.SortBy.NAME) + .setSort(SnapshotSortKey.NAME) .setFromSortValue("a") .setOffset(1) .setSize(1) @@ -568,7 +552,7 @@ public void testSortAfter() throws Exception { assertThat(paginatedResponse.totalCount(), is(3)); final GetSnapshotsResponse paginatedResponse2 = clusterAdmin().prepareGetSnapshots(matchAllPattern()) .setSnapshots("snap*") - .setSort(GetSnapshotsRequest.SortBy.NAME) + .setSort(SnapshotSortKey.NAME) .setFromSortValue("a") .setOffset(0) .setSize(2) @@ -587,7 +571,7 @@ public void testRetrievingSnapshotsWhenRepositoryIsMissing() throws Exception { snapshotNames.sort(String::compareTo); final GetSnapshotsResponse response = clusterAdmin().prepareGetSnapshots(repoName, missingRepoName) - .setSort(GetSnapshotsRequest.SortBy.NAME) + .setSort(SnapshotSortKey.NAME) .get(); assertThat(response.getSnapshots().stream().map(info -> info.snapshotId().getName()).toList(), equalTo(snapshotNames)); assertTrue(response.getFailures().containsKey(missingRepoName)); @@ -618,35 +602,30 @@ private SnapshotInfo createFullSnapshotWithUniqueTimestamps( } private List allAfterStartTimeAscending(long timestamp) { - return allSnapshots(matchAllPattern(), GetSnapshotsRequest.SortBy.START_TIME, SortOrder.ASC, timestamp); + return allSnapshots(matchAllPattern(), SnapshotSortKey.START_TIME, SortOrder.ASC, timestamp); } private List allBeforeStartTimeDescending(long timestamp) { - return allSnapshots(matchAllPattern(), GetSnapshotsRequest.SortBy.START_TIME, SortOrder.DESC, timestamp); + return allSnapshots(matchAllPattern(), SnapshotSortKey.START_TIME, SortOrder.DESC, timestamp); } private List allAfterNameAscending(String name) { - return allSnapshots(matchAllPattern(), GetSnapshotsRequest.SortBy.NAME, SortOrder.ASC, name); + return allSnapshots(matchAllPattern(), SnapshotSortKey.NAME, SortOrder.ASC, name); } private List allBeforeNameDescending(String name) { - return allSnapshots(matchAllPattern(), GetSnapshotsRequest.SortBy.NAME, SortOrder.DESC, name); + return allSnapshots(matchAllPattern(), SnapshotSortKey.NAME, SortOrder.DESC, name); } private List allAfterDurationAscending(long duration) { - return allSnapshots(matchAllPattern(), GetSnapshotsRequest.SortBy.DURATION, SortOrder.ASC, duration); + return allSnapshots(matchAllPattern(), SnapshotSortKey.DURATION, SortOrder.ASC, duration); } private List allBeforeDurationDescending(long duration) { - return allSnapshots(matchAllPattern(), GetSnapshotsRequest.SortBy.DURATION, SortOrder.DESC, duration); + return allSnapshots(matchAllPattern(), SnapshotSortKey.DURATION, SortOrder.DESC, duration); } - private static List allSnapshots( - String[] snapshotNames, - GetSnapshotsRequest.SortBy sortBy, - SortOrder order, - Object fromSortValue - ) { + private static List allSnapshots(String[] snapshotNames, SnapshotSortKey sortBy, SortOrder order, Object fromSortValue) { return clusterAdmin().prepareGetSnapshots(matchAllPattern()) .setSnapshots(snapshotNames) .setSort(sortBy) @@ -660,12 +639,12 @@ private static List getAllSnapshotsForPolicies(String... policies) return clusterAdmin().prepareGetSnapshots(matchAllPattern()) .setSnapshots(matchAllPattern()) .setPolicies(policies) - .setSort(GetSnapshotsRequest.SortBy.NAME) + .setSort(SnapshotSortKey.NAME) .get() .getSnapshots(); } - private static void assertStablePagination(String[] repoNames, Collection allSnapshotNames, GetSnapshotsRequest.SortBy sort) { + private static void assertStablePagination(String[] repoNames, Collection allSnapshotNames, SnapshotSortKey sort) { final SortOrder order = randomFrom(SortOrder.values()); final List allSorted = allSnapshotsSorted(allSnapshotNames, repoNames, sort, order); @@ -680,7 +659,7 @@ private static void assertStablePagination(String[] repoNames, Collection allSnapshotsSorted( Collection allSnapshotNames, String[] repoNames, - GetSnapshotsRequest.SortBy sortBy, + SnapshotSortKey sortBy, SortOrder order, String... namePatterns ) { @@ -724,7 +703,7 @@ private static List allSnapshotsSorted( private static GetSnapshotsResponse sortedWithLimit( String[] repoNames, - GetSnapshotsRequest.SortBy sortBy, + SnapshotSortKey sortBy, String after, int size, SortOrder order, @@ -738,13 +717,7 @@ private static GetSnapshotsResponse sortedWithLimit( .get(); } - private static GetSnapshotsResponse sortedWithLimit( - String[] repoNames, - GetSnapshotsRequest.SortBy sortBy, - int offset, - int size, - SortOrder order - ) { + private static GetSnapshotsResponse sortedWithLimit(String[] repoNames, SnapshotSortKey sortBy, int offset, int size, SortOrder order) { return baseGetSnapshotsRequest(repoNames).setOffset(offset).setSort(sortBy).setSize(size).setOrder(order).get(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java b/server/src/internalClusterTest/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java index 422aa757656ac..c9c648e57169a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java @@ -19,7 +19,6 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.test.hamcrest.RegexMatcher; import java.lang.management.ManagementFactory; import java.lang.management.ThreadInfo; @@ -36,6 +35,7 @@ import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.in; +import static org.hamcrest.Matchers.matchesRegex; @ClusterScope(scope = Scope.TEST, numDataNodes = 0, numClientNodes = 0) public class SimpleThreadPoolIT extends ESIntegTestCase { @@ -107,7 +107,7 @@ public void testThreadNames() throws Exception { + "|" + Pattern.quote(ESIntegTestCase.TEST_CLUSTER_NODE_PREFIX) + ")"; - assertThat(threadName, RegexMatcher.matches("\\[" + nodePrefix + "\\d+\\]")); + assertThat(threadName, matchesRegex("elasticsearch\\[" + nodePrefix + "\\d+\\].*")); } } diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index 36a940af63c61..9c142d18034c0 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -381,6 +381,7 @@ opens org.elasticsearch.common.logging to org.apache.logging.log4j.core; exports org.elasticsearch.action.datastreams.lifecycle; + exports org.elasticsearch.action.datastreams.autosharding; exports org.elasticsearch.action.downsample; exports org.elasticsearch.plugins.internal to diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index 33566203bb99a..83e5375546b63 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -29,6 +29,7 @@ import org.elasticsearch.index.mapper.DocumentParsingException; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.AutoscalingMissedIndicesUpdateException; +import org.elasticsearch.indices.FailureIndexNotSupportedException; import org.elasticsearch.indices.recovery.RecoveryCommitTooNewException; import org.elasticsearch.ingest.GraphStructureException; import org.elasticsearch.rest.ApiNotAvailableException; @@ -1910,6 +1911,12 @@ private enum ElasticsearchExceptionHandle { GraphStructureException::new, 177, TransportVersions.INGEST_GRAPH_STRUCTURE_EXCEPTION + ), + FAILURE_INDEX_NOT_SUPPORTED_EXCEPTION( + FailureIndexNotSupportedException.class, + FailureIndexNotSupportedException::new, + 178, + TransportVersions.ADD_FAILURE_STORE_INDICES_OPTIONS ); final Class exceptionClass; diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index a6fa7a9ea8e99..418720284eda8 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -133,6 +133,15 @@ static TransportVersion def(int id) { public static final TransportVersion INDEX_REQUEST_NORMALIZED_BYTES_PARSED = def(8_593_00_0); public static final TransportVersion INGEST_GRAPH_STRUCTURE_EXCEPTION = def(8_594_00_0); public static final TransportVersion ML_MODEL_IN_SERVICE_SETTINGS = def(8_595_00_0); + public static final TransportVersion RANDOM_AGG_SHARD_SEED = def(8_596_00_0); + public static final TransportVersion ESQL_TIMINGS = def(8_597_00_0); + public static final TransportVersion DATA_STREAM_AUTO_SHARDING_EVENT = def(8_598_00_0); + public static final TransportVersion ADD_FAILURE_STORE_INDICES_OPTIONS = def(8_599_00_0); + public static final TransportVersion ESQL_ENRICH_OPERATOR_STATUS = def(8_600_00_0); + public static final TransportVersion ESQL_SERIALIZE_ARRAY_VECTOR = def(8_601_00_0); + public static final TransportVersion ESQL_SERIALIZE_ARRAY_BLOCK = def(8_602_00_0); + public static final TransportVersion ADD_DATA_STREAM_GLOBAL_RETENTION = def(8_603_00_0); + public static final TransportVersion ALLOCATION_STATS = def(8_604_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index c1e12faab9cf8..241af6e7b6c45 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -166,6 +166,7 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_8_12_0 = new Version(8_12_00_99); public static final Version V_8_12_1 = new Version(8_12_01_99); public static final Version V_8_12_2 = new Version(8_12_02_99); + public static final Version V_8_12_3 = new Version(8_12_03_99); public static final Version V_8_13_0 = new Version(8_13_00_99); public static final Version V_8_14_0 = new Version(8_14_00_99); public static final Version CURRENT = V_8_14_0; diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index dc3b02872fd83..a8f26ab966646 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -12,6 +12,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.action.admin.cluster.allocation.TransportClusterAllocationExplainAction; import org.elasticsearch.action.admin.cluster.allocation.TransportDeleteDesiredBalanceAction; +import org.elasticsearch.action.admin.cluster.allocation.TransportGetAllocationStatsAction; import org.elasticsearch.action.admin.cluster.allocation.TransportGetDesiredBalanceAction; import org.elasticsearch.action.admin.cluster.configuration.TransportAddVotingConfigExclusionsAction; import org.elasticsearch.action.admin.cluster.configuration.TransportClearVotingConfigExclusionsAction; @@ -645,6 +646,7 @@ public void reg actions.register(TransportAddVotingConfigExclusionsAction.TYPE, TransportAddVotingConfigExclusionsAction.class); actions.register(TransportClearVotingConfigExclusionsAction.TYPE, TransportClearVotingConfigExclusionsAction.class); actions.register(TransportClusterAllocationExplainAction.TYPE, TransportClusterAllocationExplainAction.class); + actions.register(TransportGetAllocationStatsAction.TYPE, TransportGetAllocationStatsAction.class); actions.register(TransportGetDesiredBalanceAction.TYPE, TransportGetDesiredBalanceAction.class); actions.register(TransportDeleteDesiredBalanceAction.TYPE, TransportDeleteDesiredBalanceAction.class); actions.register(ClusterStatsAction.INSTANCE, TransportClusterStatsAction.class); diff --git a/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java b/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java index 7f3578ce9f16f..bfe1ff04b7b77 100644 --- a/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java +++ b/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java @@ -185,7 +185,7 @@ default Index getConcreteWriteIndex(IndexAbstraction ia, Metadata metadata) { */ enum OpType { /** - * Index the source. If there an existing document with the id, it will + * Index the source. If there is an existing document with the id, it will * be replaced. */ INDEX(0), diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java new file mode 100644 index 0000000000000..a17a627342c4f --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java @@ -0,0 +1,132 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.cluster.allocation; + +import org.elasticsearch.TransportVersions; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.MasterNodeReadRequest; +import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.routing.allocation.AllocationStatsService; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationStats; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.Map; + +public class TransportGetAllocationStatsAction extends TransportMasterNodeReadAction< + TransportGetAllocationStatsAction.Request, + TransportGetAllocationStatsAction.Response> { + + public static final ActionType TYPE = new ActionType<>("cluster:monitor/allocation/stats"); + + private final AllocationStatsService allocationStatsService; + + @Inject + public TransportGetAllocationStatsAction( + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + AllocationStatsService allocationStatsService + ) { + super( + TYPE.name(), + transportService, + clusterService, + threadPool, + actionFilters, + TransportGetAllocationStatsAction.Request::new, + indexNameExpressionResolver, + TransportGetAllocationStatsAction.Response::new, + threadPool.executor(ThreadPool.Names.MANAGEMENT) + ); + this.allocationStatsService = allocationStatsService; + } + + @Override + protected void doExecute(Task task, Request request, ActionListener listener) { + if (clusterService.state().getMinTransportVersion().before(TransportVersions.ALLOCATION_STATS)) { + // The action is not available before ALLOCATION_STATS + listener.onResponse(new Response(Map.of())); + return; + } + super.doExecute(task, request, listener); + } + + @Override + protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) throws Exception { + listener.onResponse(new Response(allocationStatsService.stats())); + } + + @Override + protected ClusterBlockException checkBlock(Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + } + + public static class Request extends MasterNodeReadRequest { + + public Request(TaskId parentTaskId) { + setParentTask(parentTaskId); + } + + public Request(StreamInput in) throws IOException { + super(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + assert out.getTransportVersion().onOrAfter(TransportVersions.ALLOCATION_STATS); + super.writeTo(out); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + } + + public static class Response extends ActionResponse { + + private final Map nodeAllocationStats; + + public Response(Map nodeAllocationStats) { + this.nodeAllocationStats = nodeAllocationStats; + } + + public Response(StreamInput in) throws IOException { + super(in); + this.nodeAllocationStats = in.readImmutableMap(StreamInput::readString, NodeAllocationStats::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(nodeAllocationStats, StreamOutput::writeString, StreamOutput::writeWriteable); + } + + public Map getNodeAllocationStats() { + return nodeAllocationStats; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportPostFeatureUpgradeAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportPostFeatureUpgradeAction.java index bd5114bf91ed2..281e26a44f335 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportPostFeatureUpgradeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportPostFeatureUpgradeAction.java @@ -95,6 +95,7 @@ protected void masterOperation( SYSTEM_INDEX_UPGRADE_TASK_NAME, SYSTEM_INDEX_UPGRADE_TASK_NAME, new SystemIndexMigrationTaskParams(), + null, ActionListener.wrap(startedTask -> { listener.onResponse(new PostFeatureUpgradeResponse(true, featuresToMigrate, null, null)); }, ex -> { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoMetrics.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoMetrics.java index 3e632f9bdd212..39e210571f37b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoMetrics.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoMetrics.java @@ -14,6 +14,7 @@ import java.io.IOException; import java.util.Arrays; +import java.util.HashSet; import java.util.Set; import java.util.stream.Collectors; @@ -21,13 +22,14 @@ * This class is a container that encapsulates the necessary information needed to indicate which node information is requested. */ public class NodesInfoMetrics implements Writeable { - private Set requestedMetrics = Metric.allMetrics(); + private final Set requestedMetrics; - public NodesInfoMetrics() {} + public NodesInfoMetrics() { + requestedMetrics = new HashSet<>(Metric.allMetrics()); + } public NodesInfoMetrics(StreamInput in) throws IOException { - requestedMetrics.clear(); - requestedMetrics.addAll(Arrays.asList(in.readStringArray())); + requestedMetrics = in.readCollectionAsImmutableSet(StreamInput::readString); } public Set requestedMetrics() { @@ -36,7 +38,7 @@ public Set requestedMetrics() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeStringArray(requestedMetrics.toArray(new String[0])); + out.writeStringCollection(requestedMetrics); } /** @@ -58,6 +60,10 @@ public enum Metric { AGGREGATIONS("aggregations"), INDICES("indices"); + private static final Set ALL_METRICS = Arrays.stream(values()) + .map(Metric::metricName) + .collect(Collectors.toUnmodifiableSet()); + private final String metricName; Metric(String name) { @@ -69,7 +75,7 @@ public String metricName() { } public static Set allMetrics() { - return Arrays.stream(values()).map(Metric::metricName).collect(Collectors.toSet()); + return ALL_METRICS; } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java index 595e441e9b2cf..8fcb5a320bd41 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.support.nodes.BaseNodeResponse; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationStats; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -97,6 +98,9 @@ public class NodeStats extends BaseNodeResponse implements ChunkedToXContent { @Nullable private final RepositoriesStats repositoriesStats; + @Nullable + private final NodeAllocationStats nodeAllocationStats; + public NodeStats(StreamInput in) throws IOException { super(in); timestamp = in.readVLong(); @@ -117,11 +121,12 @@ public NodeStats(StreamInput in) throws IOException { ingestStats = in.readOptionalWriteable(IngestStats::read); adaptiveSelectionStats = in.readOptionalWriteable(AdaptiveSelectionStats::new); indexingPressureStats = in.readOptionalWriteable(IndexingPressureStats::new); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { - repositoriesStats = in.readOptionalWriteable(RepositoriesStats::new); - } else { - repositoriesStats = null; - } + repositoriesStats = in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X) + ? in.readOptionalWriteable(RepositoriesStats::new) + : null; + nodeAllocationStats = in.getTransportVersion().onOrAfter(TransportVersions.ALLOCATION_STATS) + ? in.readOptionalWriteable(NodeAllocationStats::new) + : null; } public NodeStats( @@ -142,7 +147,8 @@ public NodeStats( @Nullable AdaptiveSelectionStats adaptiveSelectionStats, @Nullable ScriptCacheStats scriptCacheStats, @Nullable IndexingPressureStats indexingPressureStats, - @Nullable RepositoriesStats repositoriesStats + @Nullable RepositoriesStats repositoriesStats, + @Nullable NodeAllocationStats nodeAllocationStats ) { super(node); this.timestamp = timestamp; @@ -162,6 +168,31 @@ public NodeStats( this.scriptCacheStats = scriptCacheStats; this.indexingPressureStats = indexingPressureStats; this.repositoriesStats = repositoriesStats; + this.nodeAllocationStats = nodeAllocationStats; + } + + public NodeStats withNodeAllocationStats(@Nullable NodeAllocationStats nodeAllocationStats) { + return new NodeStats( + getNode(), + timestamp, + indices, + os, + process, + jvm, + threadPool, + fs, + transport, + http, + breaker, + scriptStats, + discoveryStats, + ingestStats, + adaptiveSelectionStats, + scriptCacheStats, + indexingPressureStats, + repositoriesStats, + nodeAllocationStats + ); } public long getTimestamp() { @@ -271,6 +302,11 @@ public RepositoriesStats getRepositoriesStats() { return repositoriesStats; } + @Nullable + public NodeAllocationStats getNodeAllocationStats() { + return nodeAllocationStats; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -297,6 +333,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(repositoriesStats); } + if (out.getTransportVersion().onOrAfter(TransportVersions.ALLOCATION_STATS)) { + out.writeOptionalWriteable(nodeAllocationStats); + } } @Override @@ -343,7 +382,11 @@ public Iterator toXContentChunked(ToXContent.Params outerP ifPresent(getIngestStats()).toXContentChunked(outerParams), singleChunk(ifPresent(getAdaptiveSelectionStats())), ifPresent(getScriptCacheStats()).toXContentChunked(outerParams), - singleChunk((builder, p) -> builder.value(ifPresent(getIndexingPressureStats()), p).value(ifPresent(getRepositoriesStats()), p)) + singleChunk( + (builder, p) -> builder.value(ifPresent(getIndexingPressureStats()), p) + .value(ifPresent(getRepositoriesStats()), p) + .value(ifPresent(getNodeAllocationStats()), p) + ) ); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java index ab7278c629bf2..8d863653874bb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java @@ -158,6 +158,11 @@ public NodesStatsRequestBuilder setRepositoryStats(boolean repositoryStats) { return this; } + public NodesStatsRequestBuilder setAllocationStats(boolean allocationStats) { + addOrRemoveMetric(allocationStats, NodesStatsRequestParameters.Metric.ALLOCATIONS); + return this; + } + /** * Helper method for adding metrics to a request */ diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParameters.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParameters.java index 2948af59d17fd..9e965fcccb2f3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParameters.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParameters.java @@ -89,7 +89,8 @@ public enum Metric { ADAPTIVE_SELECTION("adaptive_selection"), SCRIPT_CACHE("script_cache"), INDEXING_PRESSURE("indexing_pressure"), - REPOSITORIES("repositories"); + REPOSITORIES("repositories"), + ALLOCATIONS("allocations"); private String metricName; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index 1edc57b0a7df2..6ff2303997482 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -8,11 +8,15 @@ package org.elasticsearch.action.admin.cluster.node.stats; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.admin.cluster.allocation.TransportGetAllocationStatsAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.nodes.TransportNodesAction; +import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationStats; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; @@ -42,7 +46,9 @@ public class TransportNodesStatsAction extends TransportNodesAction< NodeStats> { public static final ActionType TYPE = new ActionType<>("cluster:monitor/nodes/stats"); + private final NodeService nodeService; + private final NodeClient client; @Inject public TransportNodesStatsAction( @@ -50,7 +56,8 @@ public TransportNodesStatsAction( ClusterService clusterService, TransportService transportService, NodeService nodeService, - ActionFilters actionFilters + ActionFilters actionFilters, + NodeClient client ) { super( TYPE.name(), @@ -61,6 +68,7 @@ public TransportNodesStatsAction( threadPool.executor(ThreadPool.Names.MANAGEMENT) ); this.nodeService = nodeService; + this.client = client; } @Override @@ -68,6 +76,34 @@ protected NodesStatsResponse newResponse(NodesStatsRequest request, List responses, + List failures, + ActionListener listener + ) { + Set metrics = request.getNodesStatsRequestParameters().requestedMetrics(); + if (NodesStatsRequestParameters.Metric.ALLOCATIONS.containedIn(metrics)) { + client.execute( + TransportGetAllocationStatsAction.TYPE, + new TransportGetAllocationStatsAction.Request(new TaskId(clusterService.localNode().getId(), task.getId())), + listener.delegateFailure((l, r) -> { + ActionListener.respondAndRelease(l, newResponse(request, merge(responses, r.getNodeAllocationStats()), failures)); + }) + ); + } else { + ActionListener.run(listener, l -> ActionListener.respondAndRelease(l, newResponse(request, responses, failures))); + } + } + + private static List merge(List responses, Map allocationStats) { + return responses.stream() + .map(response -> response.withNodeAllocationStats(allocationStats.get(response.getNode().getId()))) + .toList(); + } + @Override protected NodeStatsRequest newNodeRequest(NodesStatsRequest request) { return new NodeStatsRequest(request); @@ -80,10 +116,10 @@ protected NodeStats newNodeResponse(StreamInput in, DiscoveryNode node) throws I } @Override - protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest, Task task) { + protected NodeStats nodeOperation(NodeStatsRequest request, Task task) { assert task instanceof CancellableTask; - final NodesStatsRequestParameters nodesStatsRequestParameters = nodeStatsRequest.getNodesStatsRequestParameters(); + final NodesStatsRequestParameters nodesStatsRequestParameters = request.getNodesStatsRequestParameters(); Set metrics = nodesStatsRequestParameters.requestedMetrics(); return nodeService.stats( nodesStatsRequestParameters.indices(), diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java index fdb3a958f6f6e..9e0b6937257b4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java @@ -209,7 +209,7 @@ void getFinishedTaskFromIndex(Task thisTask, GetTaskRequest request, ActionListe client.get(get, ActionListener.wrap(r -> onGetFinishedTaskFromIndex(r, listener), e -> { if (ExceptionsHelper.unwrap(e, IndexNotFoundException.class) != null) { - // We haven't yet created the index for the task results so it can't be found. + // We haven't yet created the index for the task results, so it can't be found. listener.onFailure( new ResourceNotFoundException("task [{}] isn't running and hasn't stored its results", e, request.getTaskId()) ); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java index b31dde0f75613..bed02ef2cbc19 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java @@ -16,29 +16,20 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.RepositoriesMetadata; -import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.repositories.RepositoryMissingException; +import org.elasticsearch.repositories.ResolvedRepositories; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import java.util.ArrayList; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Set; - /** * Transport action for get repositories operation */ public class TransportGetRepositoriesAction extends TransportMasterNodeReadAction { - public static final String ALL_PATTERN = "_all"; - @Inject public TransportGetRepositoriesAction( TransportService transportService, @@ -60,11 +51,6 @@ public TransportGetRepositoriesAction( ); } - public static boolean isMatchAll(String[] patterns) { - return (patterns.length == 0) - || (patterns.length == 1 && (ALL_PATTERN.equalsIgnoreCase(patterns[0]) || Regex.isMatchAllPattern(patterns[0]))); - } - @Override protected ClusterBlockException checkBlock(GetRepositoriesRequest request, ClusterState state) { return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); @@ -77,69 +63,11 @@ protected void masterOperation( ClusterState state, final ActionListener listener ) { - RepositoriesResult result = getRepositories(state, request.repositories()); + final var result = ResolvedRepositories.resolve(state, request.repositories()); if (result.hasMissingRepositories()) { listener.onFailure(new RepositoryMissingException(String.join(", ", result.missing()))); } else { - listener.onResponse(new GetRepositoriesResponse(new RepositoriesMetadata(result.metadata))); - } - } - - /** - * Get repository metadata for given repository names from given cluster state. - * - * @param state Cluster state - * @param repoNames Repository names or patterns to get metadata for - * @return a result with the repository metadata that were found in the cluster state and the missing repositories - */ - public static RepositoriesResult getRepositories(ClusterState state, String[] repoNames) { - RepositoriesMetadata repositories = RepositoriesMetadata.get(state); - if (isMatchAll(repoNames)) { - return new RepositoriesResult(repositories.repositories()); - } - final List missingRepositories = new ArrayList<>(); - final List includePatterns = new ArrayList<>(); - final List excludePatterns = new ArrayList<>(); - boolean seenWildcard = false; - for (String repositoryOrPattern : repoNames) { - if (seenWildcard && repositoryOrPattern.length() > 1 && repositoryOrPattern.startsWith("-")) { - excludePatterns.add(repositoryOrPattern.substring(1)); - } else { - if (Regex.isSimpleMatchPattern(repositoryOrPattern)) { - seenWildcard = true; - } else { - if (repositories.repository(repositoryOrPattern) == null) { - missingRepositories.add(repositoryOrPattern); - } - } - includePatterns.add(repositoryOrPattern); - } - } - final String[] excludes = excludePatterns.toArray(Strings.EMPTY_ARRAY); - final Set repositoryListBuilder = new LinkedHashSet<>(); // to keep insertion order - for (String repositoryOrPattern : includePatterns) { - for (RepositoryMetadata repository : repositories.repositories()) { - if (repositoryListBuilder.contains(repository) == false - && Regex.simpleMatch(repositoryOrPattern, repository.name()) - && Regex.simpleMatch(excludes, repository.name()) == false) { - repositoryListBuilder.add(repository); - } - } - } - return new RepositoriesResult(List.copyOf(repositoryListBuilder), missingRepositories); - } - - /** - * A holder class that consists of the repository metadata and the names of the repositories that were not found in the cluster state. - */ - public record RepositoriesResult(List metadata, List missing) { - - RepositoriesResult(List repositoryMetadata) { - this(repositoryMetadata, List.of()); - } - - boolean hasMissingRepositories() { - return missing.isEmpty() == false; + listener.onResponse(new GetRepositoriesResponse(new RepositoriesMetadata(result.repositoryMetadata()))); } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java index ccfd192246c0a..826fa453e0402 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java @@ -21,6 +21,7 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.core.Predicates; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.search.internal.AliasFilter; @@ -85,8 +86,8 @@ protected void masterOperation( final String[] aliases = indexNameExpressionResolver.indexAliases( clusterState, index, - aliasMetadata -> true, - dataStreamAlias -> true, + Predicates.always(), + Predicates.always(), true, indicesAndAliases ); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java index fda371f9364f9..b47abc0e4dd8f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java @@ -15,19 +15,15 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.core.Nullable; import org.elasticsearch.search.sort.SortOrder; -import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import java.io.IOException; -import java.nio.charset.StandardCharsets; import java.util.Arrays; -import java.util.Base64; import java.util.Map; import static org.elasticsearch.action.ValidateActions.addValidationError; @@ -51,17 +47,20 @@ public class GetSnapshotsRequest extends MasterNodeRequest private int size = NO_LIMIT; /** - * Numeric offset at which to start fetching snapshots. Mutually exclusive with {@link After} if not equal to {@code 0}. + * Numeric offset at which to start fetching snapshots. Mutually exclusive with {@link #after} if not equal to {@code 0}. */ private int offset = 0; + /** + * Sort key value at which to start fetching snapshots. Mutually exclusive with {@link #offset} if not {@code null}. + */ @Nullable - private After after; + private SnapshotSortKey.After after; @Nullable private String fromSortValue; - private SortBy sort = SortBy.START_TIME; + private SnapshotSortKey sort = SnapshotSortKey.START_TIME; private SortOrder order = SortOrder.ASC; @@ -105,8 +104,8 @@ public GetSnapshotsRequest(StreamInput in) throws IOException { snapshots = in.readStringArray(); ignoreUnavailable = in.readBoolean(); verbose = in.readBoolean(); - after = in.readOptionalWriteable(After::new); - sort = in.readEnum(SortBy.class); + after = in.readOptionalWriteable(SnapshotSortKey.After::new); + sort = in.readEnum(SnapshotSortKey.class); size = in.readVInt(); order = SortOrder.readFromStream(in); offset = in.readVInt(); @@ -146,7 +145,7 @@ public ActionRequestValidationException validate() { validationException = addValidationError("size must be -1 or greater than 0", validationException); } if (verbose == false) { - if (sort != SortBy.START_TIME) { + if (sort != SnapshotSortKey.START_TIME) { validationException = addValidationError("can't use non-default sort with verbose=false", validationException); } if (size > 0) { @@ -283,15 +282,16 @@ public boolean includeIndexNames() { return includeIndexNames; } - public After after() { + @Nullable + public SnapshotSortKey.After after() { return after; } - public SortBy sort() { + public SnapshotSortKey sort() { return sort; } - public GetSnapshotsRequest after(@Nullable After after) { + public GetSnapshotsRequest after(@Nullable SnapshotSortKey.After after) { this.after = after; return this; } @@ -306,7 +306,7 @@ public String fromSortValue() { return fromSortValue; } - public GetSnapshotsRequest sort(SortBy sort) { + public GetSnapshotsRequest sort(SnapshotSortKey sort) { this.sort = sort; return this; } @@ -350,107 +350,6 @@ public Task createTask(long id, String type, String action, TaskId parentTaskId, return new CancellableTask(id, type, action, getDescription(), parentTaskId, headers); } - public enum SortBy { - START_TIME("start_time"), - NAME("name"), - DURATION("duration"), - INDICES("index_count"), - SHARDS("shard_count"), - FAILED_SHARDS("failed_shard_count"), - REPOSITORY("repository"); - - private final String param; - - SortBy(String param) { - this.param = param; - } - - @Override - public String toString() { - return param; - } - - public static SortBy of(String value) { - return switch (value) { - case "start_time" -> START_TIME; - case "name" -> NAME; - case "duration" -> DURATION; - case "index_count" -> INDICES; - case "shard_count" -> SHARDS; - case "failed_shard_count" -> FAILED_SHARDS; - case "repository" -> REPOSITORY; - default -> throw new IllegalArgumentException("unknown sort order [" + value + "]"); - }; - } - } - - public static final class After implements Writeable { - - private final String value; - - private final String repoName; - - private final String snapshotName; - - After(StreamInput in) throws IOException { - this(in.readString(), in.readString(), in.readString()); - } - - public static After fromQueryParam(String param) { - final String[] parts = new String(Base64.getUrlDecoder().decode(param), StandardCharsets.UTF_8).split(","); - if (parts.length != 3) { - throw new IllegalArgumentException("invalid ?after parameter [" + param + "]"); - } - return new After(parts[0], parts[1], parts[2]); - } - - @Nullable - public static After from(@Nullable SnapshotInfo snapshotInfo, SortBy sortBy) { - if (snapshotInfo == null) { - return null; - } - final String afterValue = switch (sortBy) { - case START_TIME -> String.valueOf(snapshotInfo.startTime()); - case NAME -> snapshotInfo.snapshotId().getName(); - case DURATION -> String.valueOf(snapshotInfo.endTime() - snapshotInfo.startTime()); - case INDICES -> String.valueOf(snapshotInfo.indices().size()); - case SHARDS -> String.valueOf(snapshotInfo.totalShards()); - case FAILED_SHARDS -> String.valueOf(snapshotInfo.failedShards()); - case REPOSITORY -> snapshotInfo.repository(); - }; - return new After(afterValue, snapshotInfo.repository(), snapshotInfo.snapshotId().getName()); - } - - public After(String value, String repoName, String snapshotName) { - this.value = value; - this.repoName = repoName; - this.snapshotName = snapshotName; - } - - public String value() { - return value; - } - - public String snapshotName() { - return snapshotName; - } - - public String repoName() { - return repoName; - } - - public String asQueryParam() { - return Base64.getUrlEncoder().encodeToString((value + "," + repoName + "," + snapshotName).getBytes(StandardCharsets.UTF_8)); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(value); - out.writeString(repoName); - out.writeString(snapshotName); - } - } - @Override public String getDescription() { final StringBuilder stringBuilder = new StringBuilder("repositories["); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java index eadbaa8aa0952..68877f6144693 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java @@ -109,10 +109,10 @@ public GetSnapshotsRequestBuilder setVerbose(boolean verbose) { } public GetSnapshotsRequestBuilder setAfter(String after) { - return setAfter(after == null ? null : GetSnapshotsRequest.After.fromQueryParam(after)); + return setAfter(after == null ? null : SnapshotSortKey.decodeAfterQueryParam(after)); } - public GetSnapshotsRequestBuilder setAfter(@Nullable GetSnapshotsRequest.After after) { + public GetSnapshotsRequestBuilder setAfter(@Nullable SnapshotSortKey.After after) { request.after(after); return this; } @@ -122,7 +122,7 @@ public GetSnapshotsRequestBuilder setFromSortValue(@Nullable String fromSortValu return this; } - public GetSnapshotsRequestBuilder setSort(GetSnapshotsRequest.SortBy sort) { + public GetSnapshotsRequestBuilder setSort(SnapshotSortKey sort) { request.sort(sort); return this; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/SnapshotSortKey.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/SnapshotSortKey.java new file mode 100644 index 0000000000000..14735d13ae68e --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/SnapshotSortKey.java @@ -0,0 +1,270 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.cluster.snapshots.get; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Predicates; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.snapshots.SnapshotInfo; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Base64; +import java.util.Comparator; +import java.util.function.Predicate; +import java.util.function.ToLongFunction; + +/** + * Sort key for snapshots e.g. returned from the get-snapshots API. All values break ties using {@link SnapshotInfo#snapshotId} (i.e. by + * name). + */ +public enum SnapshotSortKey { + /** + * Sort by snapshot start time. + */ + START_TIME("start_time", Comparator.comparingLong(SnapshotInfo::startTime)) { + @Override + protected String getSortKeyValue(SnapshotInfo snapshotInfo) { + return Long.toString(snapshotInfo.startTime()); + } + + @Override + protected Predicate innerGetAfterPredicate(After after, SortOrder sortOrder) { + return after.longValuePredicate(SnapshotInfo::startTime, sortOrder); + } + }, + + /** + * Sort by snapshot name. + */ + NAME("name", Comparator.comparing(sni -> sni.snapshotId().getName())) { + @Override + protected String getSortKeyValue(SnapshotInfo snapshotInfo) { + return snapshotInfo.snapshotId().getName(); + } + + @Override + protected Predicate innerGetAfterPredicate(After after, SortOrder sortOrder) { + // TODO: cover via pre-flight predicate + final String snapshotName = after.snapshotName(); + final String repoName = after.repoName(); + return sortOrder == SortOrder.ASC + ? (info -> compareName(snapshotName, repoName, info) < 0) + : (info -> compareName(snapshotName, repoName, info) > 0); + } + }, + + /** + * Sort by snapshot duration (end time minus start time). + */ + DURATION("duration", Comparator.comparingLong(sni -> sni.endTime() - sni.startTime())) { + @Override + protected String getSortKeyValue(SnapshotInfo snapshotInfo) { + return Long.toString(snapshotInfo.endTime() - snapshotInfo.startTime()); + } + + @Override + protected Predicate innerGetAfterPredicate(After after, SortOrder sortOrder) { + return after.longValuePredicate(info -> info.endTime() - info.startTime(), sortOrder); + } + }, + + /** + * Sort by number of indices in the snapshot. + */ + INDICES("index_count", Comparator.comparingInt(sni -> sni.indices().size())) { + @Override + protected String getSortKeyValue(SnapshotInfo snapshotInfo) { + return Integer.toString(snapshotInfo.indices().size()); + } + + @Override + protected Predicate innerGetAfterPredicate(After after, SortOrder sortOrder) { + // TODO: cover via pre-flight predicate + return after.longValuePredicate(info -> info.indices().size(), sortOrder); + } + }, + + /** + * Sort by number of shards in the snapshot. + */ + SHARDS("shard_count", Comparator.comparingInt(SnapshotInfo::totalShards)) { + @Override + protected String getSortKeyValue(SnapshotInfo snapshotInfo) { + return Integer.toString(snapshotInfo.totalShards()); + } + + @Override + protected Predicate innerGetAfterPredicate(After after, SortOrder sortOrder) { + return after.longValuePredicate(SnapshotInfo::totalShards, sortOrder); + } + }, + + /** + * Sort by number of failed shards in the snapshot. + */ + FAILED_SHARDS("failed_shard_count", Comparator.comparingInt(SnapshotInfo::failedShards)) { + @Override + protected String getSortKeyValue(SnapshotInfo snapshotInfo) { + return Integer.toString(snapshotInfo.failedShards()); + } + + @Override + protected Predicate innerGetAfterPredicate(After after, SortOrder sortOrder) { + return after.longValuePredicate(SnapshotInfo::failedShards, sortOrder); + } + }, + + /** + * Sort by repository name. + */ + REPOSITORY("repository", Comparator.comparing(SnapshotInfo::repository)) { + @Override + protected String getSortKeyValue(SnapshotInfo snapshotInfo) { + return snapshotInfo.repository(); + } + + @Override + protected Predicate innerGetAfterPredicate(After after, SortOrder sortOrder) { + // TODO: cover via pre-flight predicate + final String snapshotName = after.snapshotName(); + final String repoName = after.repoName(); + return sortOrder == SortOrder.ASC + ? (info -> compareRepositoryName(snapshotName, repoName, info) < 0) + : (info -> compareRepositoryName(snapshotName, repoName, info) > 0); + } + + private static int compareRepositoryName(String name, String repoName, SnapshotInfo info) { + final int res = repoName.compareTo(info.repository()); + if (res != 0) { + return res; + } + return name.compareTo(info.snapshotId().getName()); + } + }; + + private final String name; + private final Comparator ascendingSnapshotInfoComparator; + private final Comparator descendingSnapshotInfoComparator; + + SnapshotSortKey(String name, Comparator snapshotInfoComparator) { + this.name = name; + this.ascendingSnapshotInfoComparator = snapshotInfoComparator.thenComparing(SnapshotInfo::snapshotId); + this.descendingSnapshotInfoComparator = ascendingSnapshotInfoComparator.reversed(); + } + + @Override + public String toString() { + return name; + } + + /** + * @return a {@link Comparator} which sorts {@link SnapshotInfo} instances according to this sort key. + */ + public final Comparator getSnapshotInfoComparator(SortOrder sortOrder) { + return switch (sortOrder) { + case ASC -> ascendingSnapshotInfoComparator; + case DESC -> descendingSnapshotInfoComparator; + }; + } + + /** + * @return an {@link After} which can be included in a {@link GetSnapshotsRequest} (e.g. to be sent to a remote node) and ultimately + * converted into a predicate to filter out {@link SnapshotInfo} items which were returned on earlier pages of results. See also + * {@link #encodeAfterQueryParam} and {@link #getAfterPredicate}. + */ + public static After decodeAfterQueryParam(String param) { + final String[] parts = new String(Base64.getUrlDecoder().decode(param), StandardCharsets.UTF_8).split(","); + if (parts.length != 3) { + throw new IllegalArgumentException("invalid ?after parameter [" + param + "]"); + } + return new After(parts[0], parts[1], parts[2]); + } + + /** + * @return an encoded representation of the value of the sort key for the given {@link SnapshotInfo}, including the values of the + * snapshot name and repo name for tiebreaking purposes, which can be returned to the user so they can pass it back to the + * {@code ?after} param of a subsequent call to the get-snapshots API in order to retrieve the next page of results. + */ + public final String encodeAfterQueryParam(SnapshotInfo snapshotInfo) { + final var rawValue = getSortKeyValue(snapshotInfo) + "," + snapshotInfo.repository() + "," + snapshotInfo.snapshotId().getName(); + return Base64.getUrlEncoder().encodeToString(rawValue.getBytes(StandardCharsets.UTF_8)); + } + + /** + * @return a string representation of the value of the sort key for the given {@link SnapshotInfo}, which should be the last item in the + * response, which is combined with the snapshot and repository names, encoded, and returned to the user so they can pass it back to + * the {@code ?after} param of a subsequent call to the get-snapshots API in order to retrieve the next page of results. + */ + protected abstract String getSortKeyValue(SnapshotInfo snapshotInfo); + + /** + * @return a predicate to filter out {@link SnapshotInfo} items that match the user's query but which sort earlier than the given + * {@link After} value (i.e. they were returned on earlier pages of results). If {@code after} is {@code null} then the returned + * predicate matches all snapshots. + */ + public final Predicate getAfterPredicate(@Nullable After after, SortOrder sortOrder) { + return after == null ? Predicates.always() : innerGetAfterPredicate(after, sortOrder); + } + + /** + * @return a predicate to filter out {@link SnapshotInfo} items that match the user's query but which sort earlier than the given + * {@link After} value (i.e. they were returned on earlier pages of results). The {@code after} parameter is not {@code null}. + */ + protected abstract Predicate innerGetAfterPredicate(After after, SortOrder sortOrder); + + private static int compareName(String name, String repoName, SnapshotInfo info) { + final int res = name.compareTo(info.snapshotId().getName()); + if (res != 0) { + return res; + } + return repoName.compareTo(info.repository()); + } + + public static SnapshotSortKey of(String name) { + return switch (name) { + case "start_time" -> START_TIME; + case "name" -> NAME; + case "duration" -> DURATION; + case "index_count" -> INDICES; + case "shard_count" -> SHARDS; + case "failed_shard_count" -> FAILED_SHARDS; + case "repository" -> REPOSITORY; + default -> throw new IllegalArgumentException("unknown sort key [" + name + "]"); + }; + } + + public record After(String value, String repoName, String snapshotName) implements Writeable { + + After(StreamInput in) throws IOException { + this(in.readString(), in.readString(), in.readString()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(value); + out.writeString(repoName); + out.writeString(snapshotName); + } + + Predicate longValuePredicate(ToLongFunction extractor, SortOrder sortOrder) { + final var after = Long.parseLong(value); + return sortOrder == SortOrder.ASC ? info -> { + final long val = extractor.applyAsLong(info); + return after < val || (after == val && compareName(snapshotName, repoName, info) < 0); + } : info -> { + final long val = extractor.applyAsLong(info); + return after > val || (after == val && compareName(snapshotName, repoName, info) > 0); + }; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java index ca910a8d94078..28586c7a6410b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java @@ -10,7 +10,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.cluster.repositories.get.TransportGetRepositoriesAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.action.support.master.TransportMasterNodeAction; @@ -22,17 +21,24 @@ import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.util.concurrent.AbstractThrottledTaskRunner; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ListenableFuture; +import org.elasticsearch.common.util.concurrent.ThrottledIterator; import org.elasticsearch.core.Nullable; -import org.elasticsearch.repositories.GetSnapshotInfoContext; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.repositories.RepositoryMissingException; +import org.elasticsearch.repositories.ResolvedRepositories; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.snapshots.SnapshotId; @@ -41,13 +47,13 @@ import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -56,6 +62,7 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiPredicate; +import java.util.function.BooleanSupplier; import java.util.function.Predicate; import java.util.function.ToLongFunction; import java.util.stream.Stream; @@ -65,6 +72,8 @@ */ public class TransportGetSnapshotsAction extends TransportMasterNodeAction { + private static final Logger logger = LogManager.getLogger(TransportGetSnapshotsAction.class); + private final RepositoriesService repositoriesService; @Inject @@ -109,598 +118,435 @@ protected void masterOperation( ) { assert task instanceof CancellableTask : task + " not cancellable"; - getMultipleReposSnapshotInfo( + new GetSnapshotsOperation( + (CancellableTask) task, + ResolvedRepositories.resolve(state, request.repositories()), request.isSingleRepositoryRequest() == false, - SnapshotsInProgress.get(state), - TransportGetRepositoriesAction.getRepositories(state, request.repositories()), request.snapshots(), request.ignoreUnavailable(), - request.verbose(), - (CancellableTask) task, + SnapshotPredicates.fromRequest(request), request.sort(), - request.after(), - request.offset(), - request.size(), request.order(), request.fromSortValue(), - SnapshotPredicates.fromRequest(request), - request.includeIndexNames(), - listener - ); + request.offset(), + request.after(), + request.size(), + SnapshotsInProgress.get(state), + request.verbose(), + request.includeIndexNames() + ).getMultipleReposSnapshotInfo(listener); } /** - * Filters the list of repositories that a request will fetch snapshots from in the special case of sorting by repository - * name and having a non-null value for {@link GetSnapshotsRequest#fromSortValue()} on the request to exclude repositories outside - * the sort value range if possible. + * A single invocation of the get-snapshots API. + *

+ * Decides which repositories to query, picks a collection of candidate {@link SnapshotId} values from each {@link RepositoryData}, + * chosen according to the request parameters, loads the relevant {@link SnapshotInfo} blobs, and finally sorts and filters the + * results. */ - private static List maybeFilterRepositories( - List repositories, - GetSnapshotsRequest.SortBy sortBy, - SortOrder order, - @Nullable String fromSortValue - ) { - if (sortBy != GetSnapshotsRequest.SortBy.REPOSITORY || fromSortValue == null) { - return repositories; - } - final Predicate predicate = order == SortOrder.ASC - ? repositoryMetadata -> fromSortValue.compareTo(repositoryMetadata.name()) <= 0 - : repositoryMetadata -> fromSortValue.compareTo(repositoryMetadata.name()) >= 0; - return repositories.stream().filter(predicate).toList(); - } + private class GetSnapshotsOperation { + private final CancellableTask cancellableTask; + + // repositories + private final List repositories; + private final boolean isMultiRepoRequest; + + // snapshots selection + private final String[] snapshots; + private final boolean ignoreUnavailable; + private final SnapshotPredicates predicates; + + // snapshot ordering/pagination + private final SnapshotSortKey sortBy; + private final SortOrder order; + @Nullable + private final String fromSortValue; + private final int offset; + @Nullable + private final SnapshotSortKey.After after; + private final int size; + + // current state + private final SnapshotsInProgress snapshotsInProgress; + + // output detail + private final boolean verbose; + private final boolean indices; + + // snapshot info throttling + private final GetSnapshotInfoExecutor getSnapshotInfoExecutor; + + // results + private final Map failuresByRepository = ConcurrentCollections.newConcurrentMap(); + private final Queue> allSnapshotInfos = ConcurrentCollections.newQueue(); + private final AtomicInteger remaining = new AtomicInteger(); + private final AtomicInteger totalCount = new AtomicInteger(); + + GetSnapshotsOperation( + CancellableTask cancellableTask, + ResolvedRepositories resolvedRepositories, + boolean isMultiRepoRequest, + String[] snapshots, + boolean ignoreUnavailable, + SnapshotPredicates predicates, + SnapshotSortKey sortBy, + SortOrder order, + String fromSortValue, + int offset, + SnapshotSortKey.After after, + int size, + SnapshotsInProgress snapshotsInProgress, + boolean verbose, + boolean indices + ) { + this.cancellableTask = cancellableTask; + this.repositories = resolvedRepositories.repositoryMetadata(); + this.isMultiRepoRequest = isMultiRepoRequest; + this.snapshots = snapshots; + this.ignoreUnavailable = ignoreUnavailable; + this.predicates = predicates; + this.sortBy = sortBy; + this.order = order; + this.fromSortValue = fromSortValue; + this.offset = offset; + this.after = after; + this.size = size; + this.snapshotsInProgress = snapshotsInProgress; + this.verbose = verbose; + this.indices = indices; + + this.getSnapshotInfoExecutor = new GetSnapshotInfoExecutor( + threadPool.info(ThreadPool.Names.SNAPSHOT_META).getMax(), + cancellableTask::isCancelled + ); - private void getMultipleReposSnapshotInfo( - boolean isMultiRepoRequest, - SnapshotsInProgress snapshotsInProgress, - TransportGetRepositoriesAction.RepositoriesResult repositoriesResult, - String[] snapshots, - boolean ignoreUnavailable, - boolean verbose, - CancellableTask cancellableTask, - GetSnapshotsRequest.SortBy sortBy, - @Nullable GetSnapshotsRequest.After after, - int offset, - int size, - SortOrder order, - String fromSortValue, - SnapshotPredicates predicates, - boolean indices, - ActionListener listener - ) { - // Process the missing repositories - final Map failures = ConcurrentCollections.newConcurrentMap(); - for (String missingRepo : repositoriesResult.missing()) { - failures.put(missingRepo, new RepositoryMissingException(missingRepo)); + for (final var missingRepo : resolvedRepositories.missing()) { + failuresByRepository.put(missingRepo, new RepositoryMissingException(missingRepo)); + } } - final Queue> allSnapshotInfos = ConcurrentCollections.newQueue(); - final var remaining = new AtomicInteger(); - final var totalCount = new AtomicInteger(); - - List repositories = maybeFilterRepositories(repositoriesResult.metadata(), sortBy, order, fromSortValue); - try (var listeners = new RefCountingListener(listener.map(ignored -> { - cancellableTask.ensureNotCancelled(); - final var sortedSnapshotsInRepos = sortSnapshots( - allSnapshotInfos.stream().flatMap(Collection::stream), - totalCount.get(), - sortBy, - after, - offset, - size, - order - ); - final var snapshotInfos = sortedSnapshotsInRepos.snapshotInfos(); - assert indices || snapshotInfos.stream().allMatch(snapshotInfo -> snapshotInfo.indices().isEmpty()); - final int finalRemaining = sortedSnapshotsInRepos.remaining() + remaining.get(); - return new GetSnapshotsResponse( - snapshotInfos, - failures, - finalRemaining > 0 - ? GetSnapshotsRequest.After.from(snapshotInfos.get(snapshotInfos.size() - 1), sortBy).asQueryParam() - : null, - totalCount.get(), - finalRemaining - ); - }))) { - for (final RepositoryMetadata repository : repositories) { - final String repoName = repository.name(); - getSingleRepoSnapshotInfo( - snapshotsInProgress, - repoName, - snapshots, - predicates, - ignoreUnavailable, - verbose, - cancellableTask, - sortBy, - after, - order, - indices, - listeners.acquire((SnapshotsInRepo snapshotsInRepo) -> { + void getMultipleReposSnapshotInfo(ActionListener listener) { + try (var listeners = new RefCountingListener(listener.map(ignored -> { + cancellableTask.ensureNotCancelled(); + final var sortedSnapshotsInRepos = sortSnapshots( + allSnapshotInfos.stream().flatMap(Collection::stream), + totalCount.get(), + offset, + size + ); + final var snapshotInfos = sortedSnapshotsInRepos.snapshotInfos(); + assert indices || snapshotInfos.stream().allMatch(snapshotInfo -> snapshotInfo.indices().isEmpty()); + final int finalRemaining = sortedSnapshotsInRepos.remaining() + remaining.get(); + return new GetSnapshotsResponse( + snapshotInfos, + failuresByRepository, + finalRemaining > 0 ? sortBy.encodeAfterQueryParam(snapshotInfos.get(snapshotInfos.size() - 1)) : null, + totalCount.get(), + finalRemaining + ); + }))) { + for (final RepositoryMetadata repository : repositories) { + final String repoName = repository.name(); + if (skipRepository(repoName)) { + // TODO we should still count the matching snapshots in totalCount + continue; + } + + getSingleRepoSnapshotInfo(repoName, listeners.acquire((SnapshotsInRepo snapshotsInRepo) -> { allSnapshotInfos.add(snapshotsInRepo.snapshotInfos()); remaining.addAndGet(snapshotsInRepo.remaining()); totalCount.addAndGet(snapshotsInRepo.totalCount()); }).delegateResponse((l, e) -> { if (isMultiRepoRequest && e instanceof ElasticsearchException elasticsearchException) { - failures.put(repoName, elasticsearchException); + failuresByRepository.put(repoName, elasticsearchException); l.onResponse(SnapshotsInRepo.EMPTY); } else { l.onFailure(e); } - }) - ); + })); + } } } - } - private void getSingleRepoSnapshotInfo( - SnapshotsInProgress snapshotsInProgress, - String repo, - String[] snapshots, - SnapshotPredicates predicates, - boolean ignoreUnavailable, - boolean verbose, - CancellableTask task, - GetSnapshotsRequest.SortBy sortBy, - @Nullable final GetSnapshotsRequest.After after, - SortOrder order, - boolean indices, - ActionListener listener - ) { - final Map allSnapshotIds = new HashMap<>(); - final List currentSnapshots = new ArrayList<>(); - for (SnapshotInfo snapshotInfo : currentSnapshots(snapshotsInProgress, repo)) { - Snapshot snapshot = snapshotInfo.snapshot(); - allSnapshotIds.put(snapshot.getSnapshotId().getName(), snapshot); - currentSnapshots.add(snapshotInfo.maybeWithoutIndices(indices)); + private boolean skipRepository(String repositoryName) { + if (sortBy == SnapshotSortKey.REPOSITORY && fromSortValue != null) { + // If we are sorting by repository name with an offset given by fromSortValue, skip earlier repositories + return order == SortOrder.ASC ? fromSortValue.compareTo(repositoryName) > 0 : fromSortValue.compareTo(repositoryName) < 0; + } else { + return false; + } } - final ListenableFuture repositoryDataListener = new ListenableFuture<>(); - if (isCurrentSnapshotsOnly(snapshots)) { - repositoryDataListener.onResponse(null); - } else { - repositoriesService.getRepositoryData(repo, repositoryDataListener); - } + private void getSingleRepoSnapshotInfo(String repo, ActionListener listener) { + final Map allSnapshotIds = new HashMap<>(); + final List currentSnapshots = new ArrayList<>(); + for (final SnapshotInfo snapshotInfo : currentSnapshots(repo)) { + Snapshot snapshot = snapshotInfo.snapshot(); + allSnapshotIds.put(snapshot.getSnapshotId().getName(), snapshot); + currentSnapshots.add(snapshotInfo.maybeWithoutIndices(indices)); + } - repositoryDataListener.addListener( - listener.delegateFailureAndWrap( - (l, repositoryData) -> loadSnapshotInfos( - snapshotsInProgress, - repo, - snapshots, - ignoreUnavailable, - verbose, - allSnapshotIds, - currentSnapshots, - repositoryData, - task, - sortBy, - after, - order, - predicates, - indices, - l - ) - ) - ); - } + final ListenableFuture repositoryDataListener = new ListenableFuture<>(); + if (isCurrentSnapshotsOnly()) { + repositoryDataListener.onResponse(null); + } else { + repositoriesService.getRepositoryData(repo, repositoryDataListener); + } - /** - * Returns a list of currently running snapshots from repository sorted by snapshot creation date - * - * @param snapshotsInProgress snapshots in progress in the cluster state - * @param repositoryName repository name - * @return list of snapshots - */ - private static List currentSnapshots(SnapshotsInProgress snapshotsInProgress, String repositoryName) { - List snapshotList = new ArrayList<>(); - List entries = SnapshotsService.currentSnapshots( - snapshotsInProgress, - repositoryName, - Collections.emptyList() - ); - for (SnapshotsInProgress.Entry entry : entries) { - snapshotList.add(SnapshotInfo.inProgress(entry)); + repositoryDataListener.addListener( + listener.delegateFailureAndWrap( + (l, repositoryData) -> loadSnapshotInfos(repo, allSnapshotIds, currentSnapshots, repositoryData, l) + ) + ); } - return snapshotList; - } - private void loadSnapshotInfos( - SnapshotsInProgress snapshotsInProgress, - String repo, - String[] snapshots, - boolean ignoreUnavailable, - boolean verbose, - Map allSnapshotIds, - List currentSnapshots, - @Nullable RepositoryData repositoryData, - CancellableTask task, - GetSnapshotsRequest.SortBy sortBy, - @Nullable final GetSnapshotsRequest.After after, - SortOrder order, - SnapshotPredicates predicates, - boolean indices, - ActionListener listener - ) { - if (task.notifyIfCancelled(listener)) { - return; + /** + * Returns a list of currently running snapshots from repository sorted by snapshot creation date + * + * @param repositoryName repository name + * @return list of snapshots + */ + private List currentSnapshots(String repositoryName) { + List snapshotList = new ArrayList<>(); + List entries = SnapshotsService.currentSnapshots( + snapshotsInProgress, + repositoryName, + Collections.emptyList() + ); + for (SnapshotsInProgress.Entry entry : entries) { + snapshotList.add(SnapshotInfo.inProgress(entry)); + } + return snapshotList; } - if (repositoryData != null) { - for (SnapshotId snapshotId : repositoryData.getSnapshotIds()) { - if (predicates.test(snapshotId, repositoryData)) { - allSnapshotIds.put(snapshotId.getName(), new Snapshot(repo, snapshotId)); + private void loadSnapshotInfos( + String repo, + Map allSnapshotIds, + List currentSnapshots, + @Nullable RepositoryData repositoryData, + ActionListener listener + ) { + if (cancellableTask.notifyIfCancelled(listener)) { + return; + } + + if (repositoryData != null) { + for (SnapshotId snapshotId : repositoryData.getSnapshotIds()) { + if (predicates.test(snapshotId, repositoryData)) { + allSnapshotIds.put(snapshotId.getName(), new Snapshot(repo, snapshotId)); + } } } - } - final Set toResolve = new HashSet<>(); - if (TransportGetRepositoriesAction.isMatchAll(snapshots)) { - toResolve.addAll(allSnapshotIds.values()); - } else { - final List includePatterns = new ArrayList<>(); - final List excludePatterns = new ArrayList<>(); - boolean hasCurrent = false; - boolean seenWildcard = false; - for (String snapshotOrPattern : snapshots) { - if (seenWildcard && snapshotOrPattern.length() > 1 && snapshotOrPattern.startsWith("-")) { - excludePatterns.add(snapshotOrPattern.substring(1)); - } else { - if (Regex.isSimpleMatchPattern(snapshotOrPattern)) { - seenWildcard = true; - includePatterns.add(snapshotOrPattern); - } else if (GetSnapshotsRequest.CURRENT_SNAPSHOT.equalsIgnoreCase(snapshotOrPattern)) { - hasCurrent = true; - seenWildcard = true; + final Set toResolve = new HashSet<>(); + if (ResolvedRepositories.isMatchAll(snapshots)) { + toResolve.addAll(allSnapshotIds.values()); + } else { + final List includePatterns = new ArrayList<>(); + final List excludePatterns = new ArrayList<>(); + boolean hasCurrent = false; + boolean seenWildcard = false; + for (String snapshotOrPattern : snapshots) { + if (seenWildcard && snapshotOrPattern.length() > 1 && snapshotOrPattern.startsWith("-")) { + excludePatterns.add(snapshotOrPattern.substring(1)); } else { - if (ignoreUnavailable == false && allSnapshotIds.containsKey(snapshotOrPattern) == false) { - throw new SnapshotMissingException(repo, snapshotOrPattern); + if (Regex.isSimpleMatchPattern(snapshotOrPattern)) { + seenWildcard = true; + includePatterns.add(snapshotOrPattern); + } else if (GetSnapshotsRequest.CURRENT_SNAPSHOT.equalsIgnoreCase(snapshotOrPattern)) { + hasCurrent = true; + seenWildcard = true; + } else { + if (ignoreUnavailable == false && allSnapshotIds.containsKey(snapshotOrPattern) == false) { + throw new SnapshotMissingException(repo, snapshotOrPattern); + } + includePatterns.add(snapshotOrPattern); } - includePatterns.add(snapshotOrPattern); } } - } - final String[] includes = includePatterns.toArray(Strings.EMPTY_ARRAY); - final String[] excludes = excludePatterns.toArray(Strings.EMPTY_ARRAY); - for (Map.Entry entry : allSnapshotIds.entrySet()) { - final Snapshot snapshot = entry.getValue(); - if (toResolve.contains(snapshot) == false - && Regex.simpleMatch(includes, entry.getKey()) - && Regex.simpleMatch(excludes, entry.getKey()) == false) { - toResolve.add(snapshot); - } - } - if (hasCurrent) { - for (SnapshotInfo snapshotInfo : currentSnapshots) { - final Snapshot snapshot = snapshotInfo.snapshot(); - if (Regex.simpleMatch(excludes, snapshot.getSnapshotId().getName()) == false) { + final String[] includes = includePatterns.toArray(Strings.EMPTY_ARRAY); + final String[] excludes = excludePatterns.toArray(Strings.EMPTY_ARRAY); + for (Map.Entry entry : allSnapshotIds.entrySet()) { + final Snapshot snapshot = entry.getValue(); + if (toResolve.contains(snapshot) == false + && Regex.simpleMatch(includes, entry.getKey()) + && Regex.simpleMatch(excludes, entry.getKey()) == false) { toResolve.add(snapshot); } } + if (hasCurrent) { + for (SnapshotInfo snapshotInfo : currentSnapshots) { + final Snapshot snapshot = snapshotInfo.snapshot(); + if (Regex.simpleMatch(excludes, snapshot.getSnapshotId().getName()) == false) { + toResolve.add(snapshot); + } + } + } + if (toResolve.isEmpty() && ignoreUnavailable == false && isCurrentSnapshotsOnly() == false) { + throw new SnapshotMissingException(repo, snapshots[0]); + } } - if (toResolve.isEmpty() && ignoreUnavailable == false && isCurrentSnapshotsOnly(snapshots) == false) { - throw new SnapshotMissingException(repo, snapshots[0]); - } - } - if (verbose) { - snapshots( - snapshotsInProgress, - repo, - toResolve.stream().map(Snapshot::getSnapshotId).toList(), - ignoreUnavailable, - task, - sortBy, - after, - order, - predicates, - indices, - listener - ); - } else { - assert predicates.isMatchAll() : "filtering is not supported in non-verbose mode"; - final SnapshotsInRepo snapshotInfos; - if (repositoryData != null) { - // want non-current snapshots as well, which are found in the repository data - snapshotInfos = buildSimpleSnapshotInfos(toResolve, repo, repositoryData, currentSnapshots, sortBy, after, order, indices); + if (verbose) { + snapshots(repo, toResolve.stream().map(Snapshot::getSnapshotId).toList(), listener); } else { - // only want current snapshots - snapshotInfos = sortSnapshots( - currentSnapshots.stream().map(SnapshotInfo::basic).toList(), - sortBy, - after, - 0, - GetSnapshotsRequest.NO_LIMIT, - order - ); - } - listener.onResponse(snapshotInfos); - } - } - - /** - * Returns a list of snapshots from repository sorted by snapshot creation date - * - * @param snapshotsInProgress snapshots in progress in the cluster state - * @param repositoryName repository name - * @param snapshotIds snapshots for which to fetch snapshot information - * @param ignoreUnavailable if true, snapshots that could not be read will only be logged with a warning, - * @param indices if false, drop the list of indices from each result - */ - private void snapshots( - SnapshotsInProgress snapshotsInProgress, - String repositoryName, - Collection snapshotIds, - boolean ignoreUnavailable, - CancellableTask task, - GetSnapshotsRequest.SortBy sortBy, - @Nullable GetSnapshotsRequest.After after, - SortOrder order, - SnapshotPredicates predicate, - boolean indices, - ActionListener listener - ) { - if (task.notifyIfCancelled(listener)) { - return; - } - final Set snapshotSet = new HashSet<>(); - final Set snapshotIdsToIterate = new HashSet<>(snapshotIds); - // first, look at the snapshots in progress - final List entries = SnapshotsService.currentSnapshots( - snapshotsInProgress, - repositoryName, - snapshotIdsToIterate.stream().map(SnapshotId::getName).toList() - ); - for (SnapshotsInProgress.Entry entry : entries) { - if (snapshotIdsToIterate.remove(entry.snapshot().getSnapshotId())) { - final SnapshotInfo snapshotInfo = SnapshotInfo.inProgress(entry); - if (predicate.test(snapshotInfo)) { - snapshotSet.add(snapshotInfo.maybeWithoutIndices(indices)); + assert predicates.isMatchAll() : "filtering is not supported in non-verbose mode"; + final SnapshotsInRepo snapshotInfos; + if (repositoryData != null) { + // want non-current snapshots as well, which are found in the repository data + snapshotInfos = buildSimpleSnapshotInfos(toResolve, repo, repositoryData, currentSnapshots); + } else { + // only want current snapshots + snapshotInfos = sortSnapshotsWithNoOffsetOrLimit(currentSnapshots.stream().map(SnapshotInfo::basic).toList()); } + listener.onResponse(snapshotInfos); } } - // then, look in the repository if there's any matching snapshots left - final List snapshotInfos; - if (snapshotIdsToIterate.isEmpty()) { - snapshotInfos = Collections.emptyList(); - } else { - snapshotInfos = Collections.synchronizedList(new ArrayList<>()); - } - final ActionListener allDoneListener = listener.safeMap(v -> { - final ArrayList snapshotList = new ArrayList<>(snapshotInfos); - snapshotList.addAll(snapshotSet); - return sortSnapshots(snapshotList, sortBy, after, 0, GetSnapshotsRequest.NO_LIMIT, order); - }); - if (snapshotIdsToIterate.isEmpty()) { - allDoneListener.onResponse(null); - return; - } - final Repository repository; - try { - repository = repositoriesService.repository(repositoryName); - } catch (RepositoryMissingException e) { - listener.onFailure(e); - return; - } - repository.getSnapshotInfo( - new GetSnapshotInfoContext(snapshotIdsToIterate, ignoreUnavailable == false, task::isCancelled, (context, snapshotInfo) -> { - if (predicate.test(snapshotInfo)) { - snapshotInfos.add(snapshotInfo.maybeWithoutIndices(indices)); - } - }, allDoneListener) - ); - } - - private static boolean isCurrentSnapshotsOnly(String[] snapshots) { - return (snapshots.length == 1 && GetSnapshotsRequest.CURRENT_SNAPSHOT.equalsIgnoreCase(snapshots[0])); - } - private static SnapshotsInRepo buildSimpleSnapshotInfos( - final Set toResolve, - final String repoName, - final RepositoryData repositoryData, - final List currentSnapshots, - final GetSnapshotsRequest.SortBy sortBy, - @Nullable final GetSnapshotsRequest.After after, - final SortOrder order, - boolean indices - ) { - List snapshotInfos = new ArrayList<>(); - for (SnapshotInfo snapshotInfo : currentSnapshots) { - if (toResolve.remove(snapshotInfo.snapshot())) { - snapshotInfos.add(snapshotInfo.basic()); + /** + * Returns a list of snapshots from repository sorted by snapshot creation date + * + * @param repositoryName repository name + * @param snapshotIds snapshots for which to fetch snapshot information + */ + private void snapshots(String repositoryName, Collection snapshotIds, ActionListener listener) { + if (cancellableTask.notifyIfCancelled(listener)) { + return; } - } - Map> snapshotsToIndices = new HashMap<>(); - if (indices) { - for (IndexId indexId : repositoryData.getIndices().values()) { - for (SnapshotId snapshotId : repositoryData.getSnapshots(indexId)) { - if (toResolve.contains(new Snapshot(repoName, snapshotId))) { - snapshotsToIndices.computeIfAbsent(snapshotId, (k) -> new ArrayList<>()).add(indexId.getName()); + final List snapshots = new ArrayList<>(snapshotIds.size()); + final Set snapshotIdsToIterate = new HashSet<>(snapshotIds); + // first, look at the snapshots in progress + final List entries = SnapshotsService.currentSnapshots( + snapshotsInProgress, + repositoryName, + snapshotIdsToIterate.stream().map(SnapshotId::getName).toList() + ); + for (SnapshotsInProgress.Entry entry : entries) { + if (snapshotIdsToIterate.remove(entry.snapshot().getSnapshotId())) { + final SnapshotInfo snapshotInfo = SnapshotInfo.inProgress(entry); + if (predicates.test(snapshotInfo)) { + snapshots.add(snapshotInfo.maybeWithoutIndices(indices)); } } } - } - for (Snapshot snapshot : toResolve) { - snapshotInfos.add( - new SnapshotInfo( - snapshot, - snapshotsToIndices.getOrDefault(snapshot.getSnapshotId(), Collections.emptyList()), - Collections.emptyList(), - Collections.emptyList(), - repositoryData.getSnapshotState(snapshot.getSnapshotId()) + // then, look in the repository if there's any matching snapshots left + try ( + var listeners = new RefCountingListener( + // no need to synchronize access to snapshots: Repository#getSnapshotInfo fails fast but we're on the success path here + listener.safeMap(v -> sortSnapshotsWithNoOffsetOrLimit(snapshots)) ) - ); - } - return sortSnapshots(snapshotInfos, sortBy, after, 0, GetSnapshotsRequest.NO_LIMIT, order); - } - - private static final Comparator BY_START_TIME = Comparator.comparingLong(SnapshotInfo::startTime) - .thenComparing(SnapshotInfo::snapshotId); - - private static final Comparator BY_DURATION = Comparator.comparingLong( - sni -> sni.endTime() - sni.startTime() - ).thenComparing(SnapshotInfo::snapshotId); - - private static final Comparator BY_INDICES_COUNT = Comparator.comparingInt(sni -> sni.indices().size()) - .thenComparing(SnapshotInfo::snapshotId); - - private static final Comparator BY_SHARDS_COUNT = Comparator.comparingInt(SnapshotInfo::totalShards) - .thenComparing(SnapshotInfo::snapshotId); - - private static final Comparator BY_FAILED_SHARDS_COUNT = Comparator.comparingInt(SnapshotInfo::failedShards) - .thenComparing(SnapshotInfo::snapshotId); - - private static final Comparator BY_NAME = Comparator.comparing(sni -> sni.snapshotId().getName()); - - private static final Comparator BY_REPOSITORY = Comparator.comparing(SnapshotInfo::repository) - .thenComparing(SnapshotInfo::snapshotId); - - private static long getDuration(SnapshotId snapshotId, RepositoryData repositoryData) { - final RepositoryData.SnapshotDetails details = repositoryData.getSnapshotDetails(snapshotId); - if (details == null) { - return -1; - } - final long startTime = details.getStartTimeMillis(); - if (startTime == -1) { - return -1; - } - final long endTime = details.getEndTimeMillis(); - if (endTime == -1) { - return -1; - } - return endTime - startTime; - } + ) { + if (snapshotIdsToIterate.isEmpty()) { + return; + } - private static long getStartTime(SnapshotId snapshotId, RepositoryData repositoryData) { - final RepositoryData.SnapshotDetails details = repositoryData.getSnapshotDetails(snapshotId); - return details == null ? -1 : details.getStartTimeMillis(); - } + final Repository repository; + try { + repository = repositoriesService.repository(repositoryName); + } catch (RepositoryMissingException e) { + listeners.acquire().onFailure(e); + return; + } - private static int indexCount(SnapshotId snapshotId, RepositoryData repositoryData) { - // TODO: this could be made more efficient by caching this number in RepositoryData - int indexCount = 0; - for (IndexId idx : repositoryData.getIndices().values()) { - if (repositoryData.getSnapshots(idx).contains(snapshotId)) { - indexCount++; + // only need to synchronize accesses related to reading SnapshotInfo from the repo + final List syncSnapshots = Collections.synchronizedList(snapshots); + + ThrottledIterator.run( + Iterators.failFast(snapshotIdsToIterate.iterator(), () -> cancellableTask.isCancelled() || listeners.isFailing()), + (ref, snapshotId) -> { + final var refListener = ActionListener.runBefore(listeners.acquire(), ref::close); + getSnapshotInfoExecutor.getSnapshotInfo(repository, snapshotId, new ActionListener<>() { + @Override + public void onResponse(SnapshotInfo snapshotInfo) { + if (predicates.test(snapshotInfo)) { + syncSnapshots.add(snapshotInfo.maybeWithoutIndices(indices)); + } + refListener.onResponse(null); + } + + @Override + public void onFailure(Exception e) { + if (ignoreUnavailable) { + logger.warn(Strings.format("failed to fetch snapshot info for [%s:%s]", repository, snapshotId), e); + refListener.onResponse(null); + } else { + refListener.onFailure(e); + } + } + }); + }, + getSnapshotInfoExecutor.getMaxRunningTasks(), + () -> {}, + () -> {} + ); } } - return indexCount; - } - private static SnapshotsInRepo sortSnapshots( - List snapshotInfos, - GetSnapshotsRequest.SortBy sortBy, - @Nullable GetSnapshotsRequest.After after, - int offset, - int size, - SortOrder order - ) { - return sortSnapshots(snapshotInfos.stream(), snapshotInfos.size(), sortBy, after, offset, size, order); - } - - private static SnapshotsInRepo sortSnapshots( - Stream infos, - int totalCount, - GetSnapshotsRequest.SortBy sortBy, - @Nullable GetSnapshotsRequest.After after, - int offset, - int size, - SortOrder order - ) { - final Comparator comparator = switch (sortBy) { - case START_TIME -> BY_START_TIME; - case NAME -> BY_NAME; - case DURATION -> BY_DURATION; - case INDICES -> BY_INDICES_COUNT; - case SHARDS -> BY_SHARDS_COUNT; - case FAILED_SHARDS -> BY_FAILED_SHARDS_COUNT; - case REPOSITORY -> BY_REPOSITORY; - }; - - if (after != null) { - assert offset == 0 : "can't combine after and offset but saw [" + after + "] and offset [" + offset + "]"; - infos = infos.filter(buildAfterPredicate(sortBy, after, order)); - } - infos = infos.sorted(order == SortOrder.DESC ? comparator.reversed() : comparator).skip(offset); - final List allSnapshots = infos.toList(); - final List snapshots; - if (size != GetSnapshotsRequest.NO_LIMIT) { - snapshots = allSnapshots.stream().limit(size + 1).toList(); - } else { - snapshots = allSnapshots; + private boolean isCurrentSnapshotsOnly() { + return snapshots.length == 1 && GetSnapshotsRequest.CURRENT_SNAPSHOT.equalsIgnoreCase(snapshots[0]); } - final List resultSet = size != GetSnapshotsRequest.NO_LIMIT && size < snapshots.size() - ? snapshots.subList(0, size) - : snapshots; - return new SnapshotsInRepo(resultSet, totalCount, allSnapshots.size() - resultSet.size()); - } - private static Predicate buildAfterPredicate( - GetSnapshotsRequest.SortBy sortBy, - GetSnapshotsRequest.After after, - SortOrder order - ) { - final String snapshotName = after.snapshotName(); - final String repoName = after.repoName(); - final String value = after.value(); - return switch (sortBy) { - case START_TIME -> filterByLongOffset(SnapshotInfo::startTime, Long.parseLong(value), snapshotName, repoName, order); - case NAME -> - // TODO: cover via pre-flight predicate - order == SortOrder.ASC - ? (info -> compareName(snapshotName, repoName, info) < 0) - : (info -> compareName(snapshotName, repoName, info) > 0); - case DURATION -> filterByLongOffset( - info -> info.endTime() - info.startTime(), - Long.parseLong(value), - snapshotName, - repoName, - order - ); - case INDICES -> - // TODO: cover via pre-flight predicate - filterByLongOffset(info -> info.indices().size(), Integer.parseInt(value), snapshotName, repoName, order); - case SHARDS -> filterByLongOffset(SnapshotInfo::totalShards, Integer.parseInt(value), snapshotName, repoName, order); - case FAILED_SHARDS -> filterByLongOffset(SnapshotInfo::failedShards, Integer.parseInt(value), snapshotName, repoName, order); - case REPOSITORY -> - // TODO: cover via pre-flight predicate - order == SortOrder.ASC - ? (info -> compareRepositoryName(snapshotName, repoName, info) < 0) - : (info -> compareRepositoryName(snapshotName, repoName, info) > 0); - }; - } - - private static Predicate filterByLongOffset( - ToLongFunction extractor, - long after, - String snapshotName, - String repoName, - SortOrder order - ) { - return order == SortOrder.ASC ? info -> { - final long val = extractor.applyAsLong(info); - return after < val || (after == val && compareName(snapshotName, repoName, info) < 0); - } : info -> { - final long val = extractor.applyAsLong(info); - return after > val || (after == val && compareName(snapshotName, repoName, info) > 0); - }; - } + private SnapshotsInRepo buildSimpleSnapshotInfos( + final Set toResolve, + final String repoName, + final RepositoryData repositoryData, + final List currentSnapshots + ) { + List snapshotInfos = new ArrayList<>(); + for (SnapshotInfo snapshotInfo : currentSnapshots) { + if (toResolve.remove(snapshotInfo.snapshot())) { + snapshotInfos.add(snapshotInfo.basic()); + } + } + Map> snapshotsToIndices = new HashMap<>(); + if (indices) { + for (IndexId indexId : repositoryData.getIndices().values()) { + for (SnapshotId snapshotId : repositoryData.getSnapshots(indexId)) { + if (toResolve.contains(new Snapshot(repoName, snapshotId))) { + snapshotsToIndices.computeIfAbsent(snapshotId, (k) -> new ArrayList<>()).add(indexId.getName()); + } + } + } + } + for (Snapshot snapshot : toResolve) { + snapshotInfos.add( + new SnapshotInfo( + snapshot, + snapshotsToIndices.getOrDefault(snapshot.getSnapshotId(), Collections.emptyList()), + Collections.emptyList(), + Collections.emptyList(), + repositoryData.getSnapshotState(snapshot.getSnapshotId()) + ) + ); + } + return sortSnapshotsWithNoOffsetOrLimit(snapshotInfos); + } - private static int compareRepositoryName(String name, String repoName, SnapshotInfo info) { - final int res = repoName.compareTo(info.repository()); - if (res != 0) { - return res; + private SnapshotsInRepo sortSnapshotsWithNoOffsetOrLimit(List snapshotInfos) { + return sortSnapshots(snapshotInfos.stream(), snapshotInfos.size(), 0, GetSnapshotsRequest.NO_LIMIT); } - return name.compareTo(info.snapshotId().getName()); - } - private static int compareName(String name, String repoName, SnapshotInfo info) { - final int res = name.compareTo(info.snapshotId().getName()); - if (res != 0) { - return res; + private SnapshotsInRepo sortSnapshots(Stream snapshotInfoStream, int totalCount, int offset, int size) { + final var resultsStream = snapshotInfoStream.filter(sortBy.getAfterPredicate(after, order)) + .sorted(sortBy.getSnapshotInfoComparator(order)) + .skip(offset); + if (size == GetSnapshotsRequest.NO_LIMIT) { + return new SnapshotsInRepo(resultsStream.toList(), totalCount, 0); + } else { + final var allocateSize = Math.min(size, 1000); // ignore excessively-large sizes in request params + final var results = new ArrayList(allocateSize); + var remaining = 0; + for (var iterator = resultsStream.iterator(); iterator.hasNext();) { + final var snapshotInfo = iterator.next(); + if (results.size() < size) { + results.add(snapshotInfo); + } else { + remaining += 1; + } + } + return new SnapshotsInRepo(results, totalCount, remaining); + } } - return repoName.compareTo(info.repository()); } /** @@ -815,7 +661,7 @@ private static boolean matchPolicy(String[] includes, String[] excludes, boolean return excludes.length == 0 || Regex.simpleMatch(excludes, policy) == false; } - private static SnapshotPredicates getSortValuePredicate(String fromSortValue, GetSnapshotsRequest.SortBy sortBy, SortOrder order) { + private static SnapshotPredicates getSortValuePredicate(String fromSortValue, SnapshotSortKey sortBy, SortOrder order) { if (fromSortValue == null) { return MATCH_ALL; } @@ -881,9 +727,70 @@ private static Predicate filterByLongOffset(ToLongFunction after <= extractor.applyAsLong(info) : info -> after >= extractor.applyAsLong(info); } + private static long getDuration(SnapshotId snapshotId, RepositoryData repositoryData) { + final RepositoryData.SnapshotDetails details = repositoryData.getSnapshotDetails(snapshotId); + if (details == null) { + return -1; + } + final long startTime = details.getStartTimeMillis(); + if (startTime == -1) { + return -1; + } + final long endTime = details.getEndTimeMillis(); + if (endTime == -1) { + return -1; + } + return endTime - startTime; + } + + private static long getStartTime(SnapshotId snapshotId, RepositoryData repositoryData) { + final RepositoryData.SnapshotDetails details = repositoryData.getSnapshotDetails(snapshotId); + return details == null ? -1 : details.getStartTimeMillis(); + } + + private static int indexCount(SnapshotId snapshotId, RepositoryData repositoryData) { + // TODO: this could be made more efficient by caching this number in RepositoryData + int indexCount = 0; + for (IndexId idx : repositoryData.getIndices().values()) { + if (repositoryData.getSnapshots(idx).contains(snapshotId)) { + indexCount++; + } + } + return indexCount; + } } private record SnapshotsInRepo(List snapshotInfos, int totalCount, int remaining) { private static final SnapshotsInRepo EMPTY = new SnapshotsInRepo(List.of(), 0, 0); } + + /** + * Throttling executor for retrieving {@link SnapshotInfo} instances from the repository without spamming the SNAPSHOT_META threadpool + * and starving other users of access to it. Similar to {@link Repository#getSnapshotInfo} but allows for finer-grained control over + * which snapshots are retrieved. + */ + private static class GetSnapshotInfoExecutor extends AbstractThrottledTaskRunner> { + private final int maxRunningTasks; + private final BooleanSupplier isCancelledSupplier; + + GetSnapshotInfoExecutor(int maxRunningTasks, BooleanSupplier isCancelledSupplier) { + super(GetSnapshotsAction.NAME, maxRunningTasks, EsExecutors.DIRECT_EXECUTOR_SERVICE, ConcurrentCollections.newBlockingQueue()); + this.maxRunningTasks = maxRunningTasks; + this.isCancelledSupplier = isCancelledSupplier; + } + + int getMaxRunningTasks() { + return maxRunningTasks; + } + + void getSnapshotInfo(Repository repository, SnapshotId snapshotId, ActionListener listener) { + enqueueTask(listener.delegateFailure((l, ref) -> { + if (isCancelledSupplier.getAsBoolean()) { + l.onFailure(new TaskCancelledException("task cancelled")); + } else { + repository.getSnapshotInfo(snapshotId, ActionListener.releaseAfter(l, ref)); + } + })); + } + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index 4be6c6af3d7db..973ae9098047f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -29,7 +29,6 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; -import org.elasticsearch.repositories.GetSnapshotInfoContext; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; @@ -318,45 +317,39 @@ private void loadRepositoryData( delegate.onResponse(new SnapshotsStatusResponse(Collections.unmodifiableList(builder))); } else { final List threadSafeBuilder = Collections.synchronizedList(builder); - repositoriesService.repository(repositoryName) - .getSnapshotInfo(new GetSnapshotInfoContext(snapshotIdsToLoad, true, task::isCancelled, (context, snapshotInfo) -> { - List shardStatusBuilder = new ArrayList<>(); - final Map shardStatuses; - try { - shardStatuses = snapshotShards(repositoryName, repositoryData, task, snapshotInfo); - } catch (Exception e) { - // stops all further fetches of snapshotInfo since context is fail-fast - context.onFailure(e); - return; - } - for (final var shardStatus : shardStatuses.entrySet()) { - IndexShardSnapshotStatus.Copy lastSnapshotStatus = shardStatus.getValue(); - shardStatusBuilder.add(new SnapshotIndexShardStatus(shardStatus.getKey(), lastSnapshotStatus)); - } - final SnapshotsInProgress.State state = switch (snapshotInfo.state()) { - case FAILED -> SnapshotsInProgress.State.FAILED; - case SUCCESS, PARTIAL -> - // Translating both PARTIAL and SUCCESS to SUCCESS for now - // TODO: add the differentiation on the metadata level in the next major release - SnapshotsInProgress.State.SUCCESS; - default -> throw new IllegalArgumentException("Unexpected snapshot state " + snapshotInfo.state()); - }; - final long startTime = snapshotInfo.startTime(); - final long endTime = snapshotInfo.endTime(); - assert endTime >= startTime || (endTime == 0L && snapshotInfo.state().completed() == false) - : "Inconsistent timestamps found in SnapshotInfo [" + snapshotInfo + "]"; - threadSafeBuilder.add( - new SnapshotStatus( - new Snapshot(repositoryName, snapshotInfo.snapshotId()), - state, - Collections.unmodifiableList(shardStatusBuilder), - snapshotInfo.includeGlobalState(), - startTime, - // Use current time to calculate overall runtime for in-progress snapshots that have endTime == 0 - (endTime == 0 ? threadPool.absoluteTimeInMillis() : endTime) - startTime - ) - ); - }, delegate.map(v -> new SnapshotsStatusResponse(List.copyOf(threadSafeBuilder))))); + repositoriesService.repository(repositoryName).getSnapshotInfo(snapshotIdsToLoad, true, task::isCancelled, snapshotInfo -> { + List shardStatusBuilder = new ArrayList<>(); + final Map shardStatuses; + shardStatuses = snapshotShards(repositoryName, repositoryData, task, snapshotInfo); + // an exception here stops further fetches of snapshotInfo since context is fail-fast + for (final var shardStatus : shardStatuses.entrySet()) { + IndexShardSnapshotStatus.Copy lastSnapshotStatus = shardStatus.getValue(); + shardStatusBuilder.add(new SnapshotIndexShardStatus(shardStatus.getKey(), lastSnapshotStatus)); + } + final SnapshotsInProgress.State state = switch (snapshotInfo.state()) { + case FAILED -> SnapshotsInProgress.State.FAILED; + case SUCCESS, PARTIAL -> + // Translating both PARTIAL and SUCCESS to SUCCESS for now + // TODO: add the differentiation on the metadata level in the next major release + SnapshotsInProgress.State.SUCCESS; + default -> throw new IllegalArgumentException("Unexpected snapshot state " + snapshotInfo.state()); + }; + final long startTime = snapshotInfo.startTime(); + final long endTime = snapshotInfo.endTime(); + assert endTime >= startTime || (endTime == 0L && snapshotInfo.state().completed() == false) + : "Inconsistent timestamps found in SnapshotInfo [" + snapshotInfo + "]"; + threadSafeBuilder.add( + new SnapshotStatus( + new Snapshot(repositoryName, snapshotInfo.snapshotId()), + state, + Collections.unmodifiableList(shardStatusBuilder), + snapshotInfo.includeGlobalState(), + startTime, + // Use current time to calculate overall runtime for in-progress snapshots that have endTime == 0 + (endTime == 0 ? threadPool.absoluteTimeInMillis() : endTime) - startTime + ) + ); + }, delegate.map(v -> new SnapshotsStatusResponse(List.copyOf(threadSafeBuilder)))); } })); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java index 29bffa3949258..c6431c7a593cd 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.core.Predicates; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; @@ -89,7 +90,7 @@ protected void masterOperation( final CancellableTask cancellableTask = (CancellableTask) task; final Predicate acceptableClusterStatePredicate = request.waitForMetadataVersion() == null - ? clusterState -> true + ? Predicates.always() : clusterState -> clusterState.metadata().version() >= request.waitForMetadataVersion(); final Predicate acceptableClusterStateOrFailedPredicate = request.local() diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java index 3e8e6fbfde75c..4f7525c700fc2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.Predicates; import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.indices.SystemIndices.SystemIndexAccessLevel; @@ -160,7 +161,7 @@ private static void checkSystemIndexAccess( ) { final Predicate systemIndexAccessAllowPredicate; if (systemIndexAccessLevel == SystemIndexAccessLevel.NONE) { - systemIndexAccessAllowPredicate = indexName -> false; + systemIndexAccessAllowPredicate = Predicates.never(); } else if (systemIndexAccessLevel == SystemIndexAccessLevel.RESTRICTED) { systemIndexAccessAllowPredicate = systemIndices.getProductSystemIndexNamePredicate(threadContext); } else { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java index 1c2598d70998a..a550350c20f6b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java @@ -9,7 +9,9 @@ package org.elasticsearch.action.admin.indices.get; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.info.ClusterInfoRequest; +import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.ArrayUtils; @@ -92,7 +94,15 @@ public static Feature[] fromRequest(RestRequest request) { private transient boolean includeDefaults = false; public GetIndexRequest() { - + super( + DataStream.isFailureStoreEnabled() + ? IndicesOptions.builder(IndicesOptions.strictExpandOpen()) + .failureStoreOptions( + IndicesOptions.FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(true) + ) + .build() + : IndicesOptions.strictExpandOpen() + ); } public GetIndexRequest(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java index 45532d8024f87..a2787e1a55fd7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -66,7 +66,24 @@ public class PutMappingRequest extends AcknowledgedRequest im private String[] indices; - private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, true); + private IndicesOptions indicesOptions = IndicesOptions.builder() + .concreteTargetOptions(IndicesOptions.ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) + .wildcardOptions( + IndicesOptions.WildcardOptions.builder() + .matchOpen(true) + .matchClosed(true) + .includeHidden(false) + .allowEmptyExpressions(false) + .resolveAliases(true) + ) + .gatekeeperOptions( + IndicesOptions.GatekeeperOptions.builder() + .allowClosedIndices(true) + .allowAliasToMultipleIndices(true) + .ignoreThrottled(false) + .allowFailureIndices(false) + ) + .build(); private String source; private String origin = ""; diff --git a/server/src/main/java/org/elasticsearch/action/bulk/Retry.java b/server/src/main/java/org/elasticsearch/action/bulk/Retry.java index 33fb81a6520cb..62ef9a08f0070 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/Retry.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/Retry.java @@ -16,6 +16,7 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.Predicates; import org.elasticsearch.core.TimeValue; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.Scheduler; @@ -104,14 +105,14 @@ static class RetryHandler extends DelegatingActionListener true)); + addResponses(bulkItemResponses, Predicates.always()); finishHim(); } else { if (canRetry(bulkItemResponses)) { addResponses(bulkItemResponses, (r -> r.isFailed() == false)); retry(createBulkRequestForRetry(bulkItemResponses)); } else { - addResponses(bulkItemResponses, (r -> true)); + addResponses(bulkItemResponses, Predicates.always()); finishHim(); } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/Retry2.java b/server/src/main/java/org/elasticsearch/action/bulk/Retry2.java index 784ba1eb95d5d..999bd6af925a6 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/Retry2.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/Retry2.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.core.Predicates; import org.elasticsearch.core.TimeValue; import org.elasticsearch.rest.RestStatus; @@ -183,7 +184,7 @@ public void onResponse(BulkResponse bulkItemResponses) { bulkItemResponses.getItems().length ); // we're done here, include all responses - addResponses(bulkItemResponses, (r -> true)); + addResponses(bulkItemResponses, Predicates.always()); listener.onResponse(getAccumulatedResponse()); } else { if (canRetry(bulkItemResponses)) { @@ -201,7 +202,7 @@ public void onResponse(BulkResponse bulkItemResponses) { bulkItemResponses.getTook(), bulkItemResponses.getItems().length ); - addResponses(bulkItemResponses, (r -> true)); + addResponses(bulkItemResponses, Predicates.always()); listener.onResponse(getAccumulatedResponse()); } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 3e661c2efe72f..a2445e95a572f 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -291,7 +291,7 @@ public void onTimeout(TimeValue timeout) { } private void forkAndExecute(Task task, BulkRequest bulkRequest, String executorName, ActionListener releasingListener) { - threadPool.executor(Names.WRITE).execute(new ActionRunnable<>(releasingListener) { + threadPool.executor(executorName).execute(new ActionRunnable<>(releasingListener) { @Override protected void doRun() { doInternalExecute(task, bulkRequest, executorName, releasingListener); diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java index 079c8f8b01ceb..8c469f7dffc4d 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamAutoShardingEvent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -37,6 +38,7 @@ import java.util.Objects; import static org.elasticsearch.TransportVersions.V_8_11_X; +import static org.elasticsearch.cluster.metadata.DataStream.AUTO_SHARDING_FIELD; public class GetDataStreamAction extends ActionType { @@ -179,6 +181,10 @@ public static class DataStreamInfo implements SimpleDiffable, To public static final ParseField TEMPORAL_RANGES = new ParseField("temporal_ranges"); public static final ParseField TEMPORAL_RANGE_START = new ParseField("start"); public static final ParseField TEMPORAL_RANGE_END = new ParseField("end"); + public static final ParseField TIME_SINCE_LAST_AUTO_SHARD_EVENT = new ParseField("time_since_last_auto_shard_event"); + public static final ParseField TIME_SINCE_LAST_AUTO_SHARD_EVENT_MILLIS = new ParseField( + "time_since_last_auto_shard_event_millis" + ); private final DataStream dataStream; private final ClusterHealthStatus dataStreamStatus; @@ -348,6 +354,17 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params, @Nulla if (DataStream.isFailureStoreEnabled()) { builder.field(DataStream.FAILURE_STORE_FIELD.getPreferredName(), dataStream.isFailureStore()); } + if (dataStream.getAutoShardingEvent() != null) { + DataStreamAutoShardingEvent autoShardingEvent = dataStream.getAutoShardingEvent(); + builder.startObject(AUTO_SHARDING_FIELD.getPreferredName()); + autoShardingEvent.toXContent(builder, params); + builder.humanReadableField( + TIME_SINCE_LAST_AUTO_SHARD_EVENT_MILLIS.getPreferredName(), + TIME_SINCE_LAST_AUTO_SHARD_EVENT.getPreferredName(), + autoShardingEvent.getTimeSinceLastAutoShardingEvent(System::currentTimeMillis) + ); + builder.endObject(); + } if (timeSeries != null) { builder.startObject(TIME_SERIES.getPreferredName()); builder.startArray(TEMPORAL_RANGES.getPreferredName()); diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/AutoShardingResult.java b/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/AutoShardingResult.java new file mode 100644 index 0000000000000..7bbd3291caf3a --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/AutoShardingResult.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.datastreams.autosharding; + +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; + +import java.util.Arrays; + +import static org.elasticsearch.action.datastreams.autosharding.AutoShardingType.COOLDOWN_PREVENTED_DECREASE; +import static org.elasticsearch.action.datastreams.autosharding.AutoShardingType.COOLDOWN_PREVENTED_INCREASE; + +/** + * Represents an auto sharding recommendation. It includes the current and target number of shards together with a remaining cooldown + * period that needs to lapse before the current recommendation should be applied. + *

+ * If auto sharding is not applicable for a data stream (e.g. due to + * {@link DataStreamAutoShardingService#DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING}) the target number of shards will be -1 and cool down + * remaining {@link TimeValue#MAX_VALUE}. + */ +public record AutoShardingResult( + AutoShardingType type, + int currentNumberOfShards, + int targetNumberOfShards, + TimeValue coolDownRemaining, + @Nullable Double writeLoad +) { + + static final String COOLDOWN_PREVENTING_TYPES = Arrays.toString( + new AutoShardingType[] { COOLDOWN_PREVENTED_DECREASE, COOLDOWN_PREVENTED_INCREASE } + ); + + public AutoShardingResult { + if (type.equals(AutoShardingType.INCREASE_SHARDS) || type.equals(AutoShardingType.DECREASE_SHARDS)) { + if (coolDownRemaining.equals(TimeValue.ZERO) == false) { + throw new IllegalArgumentException( + "The increase/decrease shards events must have a cooldown period of zero. Use one of [" + + COOLDOWN_PREVENTING_TYPES + + "] types indead" + ); + } + } + } + + public static final AutoShardingResult NOT_APPLICABLE_RESULT = new AutoShardingResult( + AutoShardingType.NOT_APPLICABLE, + -1, + -1, + TimeValue.MAX_VALUE, + null + ); + +} diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/AutoShardingType.java b/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/AutoShardingType.java new file mode 100644 index 0000000000000..50d3027abbc88 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/AutoShardingType.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.datastreams.autosharding; + +/** + * Represents the type of recommendation the auto sharding service provided. + */ +public enum AutoShardingType { + INCREASE_SHARDS, + DECREASE_SHARDS, + COOLDOWN_PREVENTED_INCREASE, + COOLDOWN_PREVENTED_DECREASE, + NO_CHANGE_REQUIRED, + NOT_APPLICABLE +} diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingService.java b/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingService.java new file mode 100644 index 0000000000000..e830f538d222f --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingService.java @@ -0,0 +1,415 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.datastreams.autosharding; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.IndexMetadataStats; +import org.elasticsearch.cluster.metadata.IndexWriteLoad; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.features.FeatureService; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.index.Index; + +import java.util.List; +import java.util.Objects; +import java.util.OptionalDouble; +import java.util.function.Function; +import java.util.function.LongSupplier; + +import static org.elasticsearch.action.datastreams.autosharding.AutoShardingResult.NOT_APPLICABLE_RESULT; + +/** + * Calculates the optimal number of shards the data stream write index should have based on the indexing load. + */ +public class DataStreamAutoShardingService { + + private static final Logger logger = LogManager.getLogger(DataStreamAutoShardingService.class); + public static final String DATA_STREAMS_AUTO_SHARDING_ENABLED = "data_streams.auto_sharding.enabled"; + + public static final NodeFeature DATA_STREAM_AUTO_SHARDING_FEATURE = new NodeFeature("data_stream.auto_sharding"); + + public static final Setting> DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING = Setting.listSetting( + "data_streams.auto_sharding.excludes", + List.of("*"), + Function.identity(), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Represents the minimum amount of time between two scaling events if the next event will increase the number of shards. + * We've chosen a value of 4.5minutes by default, just lower than the data stream lifecycle poll interval so we can increase shards with + * every DSL run, but we don't want it to be lower/0 as data stream lifecycle might run more often than the poll interval in case of + * a master failover. + */ + public static final Setting DATA_STREAMS_AUTO_SHARDING_INCREASE_SHARDS_COOLDOWN = Setting.timeSetting( + "data_streams.auto_sharding.increase_shards.cooldown", + TimeValue.timeValueSeconds(270), + TimeValue.timeValueSeconds(0), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Represents the minimum amount of time between two scaling events if the next event will reduce the number of shards. + */ + public static final Setting DATA_STREAMS_AUTO_SHARDING_DECREASE_SHARDS_COOLDOWN = Setting.timeSetting( + "data_streams.auto_sharding.decrease_shards.cooldown", + TimeValue.timeValueDays(3), + TimeValue.timeValueSeconds(0), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Represents the minimum number of write threads we expect a node to have in the environments where auto sharding will be enabled. + */ + public static final Setting CLUSTER_AUTO_SHARDING_MIN_WRITE_THREADS = Setting.intSetting( + "cluster.auto_sharding.min_write_threads", + 2, + 1, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Represents the maximum number of write threads we expect a node to have in the environments where auto sharding will be enabled. + */ + public static final Setting CLUSTER_AUTO_SHARDING_MAX_WRITE_THREADS = Setting.intSetting( + "cluster.auto_sharding.max_write_threads", + 32, + 1, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + private final ClusterService clusterService; + private final boolean isAutoShardingEnabled; + private final FeatureService featureService; + private final LongSupplier nowSupplier; + private volatile TimeValue increaseShardsCooldown; + private volatile TimeValue reduceShardsCooldown; + private volatile int minWriteThreads; + private volatile int maxWriteThreads; + private volatile List dataStreamExcludePatterns; + + public DataStreamAutoShardingService( + Settings settings, + ClusterService clusterService, + FeatureService featureService, + LongSupplier nowSupplier + ) { + this.clusterService = clusterService; + this.isAutoShardingEnabled = settings.getAsBoolean(DATA_STREAMS_AUTO_SHARDING_ENABLED, false); + this.increaseShardsCooldown = DATA_STREAMS_AUTO_SHARDING_INCREASE_SHARDS_COOLDOWN.get(settings); + this.reduceShardsCooldown = DATA_STREAMS_AUTO_SHARDING_DECREASE_SHARDS_COOLDOWN.get(settings); + this.minWriteThreads = CLUSTER_AUTO_SHARDING_MIN_WRITE_THREADS.get(settings); + this.maxWriteThreads = CLUSTER_AUTO_SHARDING_MAX_WRITE_THREADS.get(settings); + this.dataStreamExcludePatterns = DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING.get(settings); + this.featureService = featureService; + this.nowSupplier = nowSupplier; + } + + public void init() { + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(DATA_STREAMS_AUTO_SHARDING_INCREASE_SHARDS_COOLDOWN, this::updateIncreaseShardsCooldown); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(DATA_STREAMS_AUTO_SHARDING_DECREASE_SHARDS_COOLDOWN, this::updateReduceShardsCooldown); + clusterService.getClusterSettings().addSettingsUpdateConsumer(CLUSTER_AUTO_SHARDING_MIN_WRITE_THREADS, this::updateMinWriteThreads); + clusterService.getClusterSettings().addSettingsUpdateConsumer(CLUSTER_AUTO_SHARDING_MAX_WRITE_THREADS, this::updateMaxWriteThreads); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING, this::updateDataStreamExcludePatterns); + } + + /** + * Computes the optimal number of shards for the provided data stream according to the write index's indexing load (to check if we must + * increase the number of shards, whilst the heuristics for decreasing the number of shards _might_ use the provided write indexing + * load). + * The result type will indicate the recommendation of the auto sharding service : + * - not applicable if the data stream is excluded from auto sharding as configured by + * {@link #DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING} or if the auto sharding functionality is disabled according to + * {@link #DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING}, or if the cluster doesn't have the feature available + * - increase number of shards if the optimal number of shards it deems necessary for the provided data stream is GT the current number + * of shards + * - decrease the number of shards if the optimal number of shards it deems necessary for the provided data stream is LT the current + * number of shards + * + * If the recommendation is to INCREASE/DECREASE shards the reported cooldown period will be TimeValue.ZERO. + * If the auto sharding service thinks the number of shards must be changed but it can't recommend a change due to the cooldown + * period not lapsing, the result will be of type {@link AutoShardingType#COOLDOWN_PREVENTED_INCREASE} or + * {@link AutoShardingType#COOLDOWN_PREVENTED_INCREASE} with the remaining cooldown configured and the number of shards that should + * be configured for the data stream once the remaining cooldown lapses as the target number of shards. + * + * The NOT_APPLICABLE type result will report a cooldown period of TimeValue.MAX_VALUE. + * + * The NO_CHANGE_REQUIRED type will potentially report the remaining cooldown always report a cool down period of TimeValue.ZERO (as + * there'll be no new auto sharding event) + */ + public AutoShardingResult calculate(ClusterState state, DataStream dataStream, @Nullable Double writeIndexLoad) { + Metadata metadata = state.metadata(); + if (isAutoShardingEnabled == false) { + logger.debug("Data stream auto sharding service is not enabled."); + return NOT_APPLICABLE_RESULT; + } + + if (featureService.clusterHasFeature(state, DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE) == false) { + logger.debug( + "Data stream auto sharding service cannot compute the optimal number of shards for data stream [{}] because the cluster " + + "doesn't have the auto sharding feature", + dataStream.getName() + ); + return NOT_APPLICABLE_RESULT; + } + + if (dataStreamExcludePatterns.stream().anyMatch(pattern -> Regex.simpleMatch(pattern, dataStream.getName()))) { + logger.debug( + "Data stream [{}] is excluded from auto sharding via the [{}] setting", + dataStream.getName(), + DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING.getKey() + ); + return NOT_APPLICABLE_RESULT; + } + + if (writeIndexLoad == null) { + logger.debug( + "Data stream auto sharding service cannot compute the optimal number of shards for data stream [{}] as the write index " + + "load is not available", + dataStream.getName() + ); + return NOT_APPLICABLE_RESULT; + } + return innerCalculate(metadata, dataStream, writeIndexLoad, nowSupplier); + } + + private AutoShardingResult innerCalculate(Metadata metadata, DataStream dataStream, double writeIndexLoad, LongSupplier nowSupplier) { + // increasing the number of shards is calculated solely based on the index load of the write index + IndexMetadata writeIndex = metadata.index(dataStream.getWriteIndex()); + assert writeIndex != null : "the data stream write index must exist in the provided cluster metadata"; + AutoShardingResult increaseShardsResult = getIncreaseShardsResult(dataStream, writeIndexLoad, nowSupplier, writeIndex); + return Objects.requireNonNullElseGet( + increaseShardsResult, + () -> getDecreaseShardsResult( + metadata, + dataStream, + writeIndexLoad, + nowSupplier, + writeIndex, + getRemainingDecreaseShardsCooldown(metadata, dataStream) + ) + ); + + } + + @Nullable + private AutoShardingResult getIncreaseShardsResult( + DataStream dataStream, + double writeIndexLoad, + LongSupplier nowSupplier, + IndexMetadata writeIndex + ) { + // increasing the number of shards is calculated solely based on the index load of the write index + long optimalShardCount = computeOptimalNumberOfShards(minWriteThreads, maxWriteThreads, writeIndexLoad); + if (optimalShardCount > writeIndex.getNumberOfShards()) { + TimeValue timeSinceLastAutoShardingEvent = dataStream.getAutoShardingEvent() != null + ? dataStream.getAutoShardingEvent().getTimeSinceLastAutoShardingEvent(nowSupplier) + : TimeValue.MAX_VALUE; + + TimeValue coolDownRemaining = TimeValue.timeValueMillis( + Math.max(0L, increaseShardsCooldown.millis() - timeSinceLastAutoShardingEvent.millis()) + ); + logger.debug( + "data stream autosharding service recommends increasing the number of shards from [{}] to [{}] after [{}] cooldown for " + + "data stream [{}]", + writeIndex.getNumberOfShards(), + optimalShardCount, + coolDownRemaining, + dataStream.getName() + ); + return new AutoShardingResult( + coolDownRemaining.equals(TimeValue.ZERO) ? AutoShardingType.INCREASE_SHARDS : AutoShardingType.COOLDOWN_PREVENTED_INCREASE, + writeIndex.getNumberOfShards(), + Math.toIntExact(optimalShardCount), + coolDownRemaining, + writeIndexLoad + ); + } + return null; + } + + /** + * Calculates the amount of time remaining before we can consider reducing the number of shards. + * This reference for the remaining time math is either the time since the last auto sharding event (if available) or otherwise the + * oldest index in the data stream. + */ + private TimeValue getRemainingDecreaseShardsCooldown(Metadata metadata, DataStream dataStream) { + Index oldestBackingIndex = dataStream.getIndices().get(0); + IndexMetadata oldestIndexMeta = metadata.getIndexSafe(oldestBackingIndex); + + return dataStream.getAutoShardingEvent() == null + // without a pre-existing auto sharding event we wait until the oldest index has been created longer than the decrease_shards + // cool down period "ago" so we don't immediately reduce the number of shards after a data stream is created + ? TimeValue.timeValueMillis( + Math.max(0L, oldestIndexMeta.getCreationDate() + reduceShardsCooldown.millis() - nowSupplier.getAsLong()) + ) + : TimeValue.timeValueMillis( + Math.max( + 0L, + reduceShardsCooldown.millis() - dataStream.getAutoShardingEvent() + .getTimeSinceLastAutoShardingEvent(nowSupplier) + .millis() + ) + ); + } + + private AutoShardingResult getDecreaseShardsResult( + Metadata metadata, + DataStream dataStream, + double writeIndexLoad, + LongSupplier nowSupplier, + IndexMetadata writeIndex, + TimeValue remainingReduceShardsCooldown + ) { + double maxIndexLoadWithinCoolingPeriod = getMaxIndexLoadWithinCoolingPeriod( + metadata, + dataStream, + writeIndexLoad, + reduceShardsCooldown, + nowSupplier + ); + + logger.trace( + "calculating the optimal number of shards for a potential decrease in number of shards for data stream [{}] with the" + + " max indexing load [{}] over the decrease shards cool down period", + dataStream.getName(), + maxIndexLoadWithinCoolingPeriod + ); + long optimalShardCount = computeOptimalNumberOfShards(minWriteThreads, maxWriteThreads, maxIndexLoadWithinCoolingPeriod); + if (optimalShardCount < writeIndex.getNumberOfShards()) { + logger.debug( + "data stream autosharding service recommends decreasing the number of shards from [{}] to [{}] after [{}] cooldown for " + + "data stream [{}]", + writeIndex.getNumberOfShards(), + optimalShardCount, + remainingReduceShardsCooldown, + dataStream.getName() + ); + + // we should reduce the number of shards + return new AutoShardingResult( + remainingReduceShardsCooldown.equals(TimeValue.ZERO) + ? AutoShardingType.DECREASE_SHARDS + : AutoShardingType.COOLDOWN_PREVENTED_DECREASE, + writeIndex.getNumberOfShards(), + Math.toIntExact(optimalShardCount), + remainingReduceShardsCooldown, + maxIndexLoadWithinCoolingPeriod + ); + } + + logger.trace( + "data stream autosharding service recommends maintaining the number of shards [{}] for data stream [{}]", + writeIndex.getNumberOfShards(), + dataStream.getName() + ); + return new AutoShardingResult( + AutoShardingType.NO_CHANGE_REQUIRED, + writeIndex.getNumberOfShards(), + writeIndex.getNumberOfShards(), + TimeValue.ZERO, + maxIndexLoadWithinCoolingPeriod + ); + } + + // Visible for testing + static long computeOptimalNumberOfShards(int minNumberWriteThreads, int maxNumberWriteThreads, double indexingLoad) { + return Math.max( + Math.min(roundUp(indexingLoad / (minNumberWriteThreads / 2.0)), 3), + roundUp(indexingLoad / (maxNumberWriteThreads / 2.0)) + ); + } + + private static long roundUp(double value) { + return (long) Math.ceil(value); + } + + // Visible for testing + /** + * Calculates the maximum write index load observed for the provided data stream across all the backing indices that were created + * during the provide {@param coolingPeriod} (note: to cover the entire cooling period, the backing index created before the cooling + * period is also considered). + */ + static double getMaxIndexLoadWithinCoolingPeriod( + Metadata metadata, + DataStream dataStream, + double writeIndexLoad, + TimeValue coolingPeriod, + LongSupplier nowSupplier + ) { + // for reducing the number of shards we look at more than just the write index + List writeLoadsWithinCoolingPeriod = DataStream.getIndicesWithinMaxAgeRange( + dataStream, + metadata::getIndexSafe, + coolingPeriod, + nowSupplier + ) + .stream() + .filter(index -> index.equals(dataStream.getWriteIndex()) == false) + .map(metadata::index) + .filter(Objects::nonNull) + .map(IndexMetadata::getStats) + .filter(Objects::nonNull) + .map(IndexMetadataStats::writeLoad) + .filter(Objects::nonNull) + .toList(); + + // assume the current write index load is the highest observed and look back to find the actual maximum + double maxIndexLoadWithinCoolingPeriod = writeIndexLoad; + for (IndexWriteLoad writeLoad : writeLoadsWithinCoolingPeriod) { + double totalIndexLoad = 0; + for (int shardId = 0; shardId < writeLoad.numberOfShards(); shardId++) { + final OptionalDouble writeLoadForShard = writeLoad.getWriteLoadForShard(shardId); + totalIndexLoad += writeLoadForShard.orElse(0); + } + + if (totalIndexLoad > maxIndexLoadWithinCoolingPeriod) { + maxIndexLoadWithinCoolingPeriod = totalIndexLoad; + } + } + return maxIndexLoadWithinCoolingPeriod; + } + + void updateIncreaseShardsCooldown(TimeValue scaleUpCooldown) { + this.increaseShardsCooldown = scaleUpCooldown; + } + + void updateReduceShardsCooldown(TimeValue scaleDownCooldown) { + this.reduceShardsCooldown = scaleDownCooldown; + } + + void updateMinWriteThreads(int minNumberWriteThreads) { + this.minWriteThreads = minNumberWriteThreads; + } + + void updateMaxWriteThreads(int maxNumberWriteThreads) { + this.maxWriteThreads = maxNumberWriteThreads; + } + + private void updateDataStreamExcludePatterns(List newExcludePatterns) { + this.dataStreamExcludePatterns = newExcludePatterns; + } +} diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java index 095a5ec8f5594..856571c305615 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Predicates; import org.elasticsearch.index.mapper.TimeSeriesParams; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.InstantiatingObjectParser; @@ -567,7 +568,7 @@ private String[] filterIndices(int length, Predicate pred) { } FieldCapabilities build(boolean withIndices) { - final String[] indices = withIndices ? filterIndices(totalIndices, ic -> true) : null; + final String[] indices = withIndices ? filterIndices(totalIndices, Predicates.always()) : null; // Iff this field is searchable in some indices AND non-searchable in others // we record the list of non-searchable indices @@ -603,7 +604,7 @@ FieldCapabilities build(boolean withIndices) { // Collect all indices that have this field. If it is marked differently in different indices, we cannot really // make a decisions which index is "right" and which index is "wrong" so collecting all indices where this field // is present is probably the only sensible thing to do here - metricConflictsIndices = Objects.requireNonNullElseGet(indices, () -> filterIndices(totalIndices, ic -> true)); + metricConflictsIndices = Objects.requireNonNullElseGet(indices, () -> filterIndices(totalIndices, Predicates.always())); } else { metricConflictsIndices = null; } diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFetcher.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFetcher.java index 363f50542c4dc..8025923dbdd33 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFetcher.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFetcher.java @@ -120,11 +120,8 @@ private FieldCapabilitiesIndexResponse doFetch( // even if the mapping is the same if we return only fields with values we need // to make sure that we consider all the shard-mappings pair, that is why we // calculate a different hash for this particular case. - StringBuilder sb = new StringBuilder(indexService.getShard(shardId.getId()).getShardUuid()); - if (mapping != null) { - sb.append(mapping.getSha256()); - } - indexMappingHash = sb.toString(); + final String shardUuid = indexService.getShard(shardId.getId()).getShardUuid(); + indexMappingHash = mapping == null ? shardUuid : shardUuid + mapping.getSha256(); } if (indexMappingHash != null) { final Map existing = indexMappingHashToResponses.get(indexMappingHash); @@ -160,16 +157,19 @@ static Map retrieveFieldCaps( ) { boolean includeParentObjects = checkIncludeParents(filters); - Predicate filter = buildFilter(indexFieldfilter, filters, types, context); + Predicate filter = buildFilter(filters, types, context); boolean isTimeSeriesIndex = context.getIndexSettings().getTimestampBounds() != null; + var fieldInfos = indexShard.getFieldInfos(); + includeEmptyFields = includeEmptyFields || enableFieldHasValue == false; Map responseMap = new HashMap<>(); for (String field : context.getAllFieldNames()) { if (fieldNameFilter.test(field) == false) { continue; } MappedFieldType ft = context.getFieldType(field); - boolean includeField = includeEmptyFields || enableFieldHasValue == false || ft.fieldHasValue(indexShard.getFieldInfos()); - if (includeField && filter.test(ft)) { + if ((includeEmptyFields || ft.fieldHasValue(fieldInfos)) + && (indexFieldfilter.test(ft.name()) || context.isMetadataField(ft.name())) + && (filter == null || filter.test(ft))) { IndexFieldCapabilities fieldCap = new IndexFieldCapabilities( field, ft.familyTypeName(), @@ -245,17 +245,12 @@ private static boolean alwaysMatches(QueryBuilder indexFilter) { return indexFilter == null || indexFilter instanceof MatchAllQueryBuilder; } - private static Predicate buildFilter( - Predicate fieldFilter, - String[] filters, - String[] fieldTypes, - SearchExecutionContext context - ) { + private static Predicate buildFilter(String[] filters, String[] fieldTypes, SearchExecutionContext context) { // security filters don't exclude metadata fields - Predicate fcf = ft -> fieldFilter.test(ft.name()) || context.isMetadataField(ft.name()); + Predicate fcf = null; if (fieldTypes.length > 0) { Set acceptedTypes = Set.of(fieldTypes); - fcf = fcf.and(ft -> acceptedTypes.contains(ft.familyTypeName())); + fcf = ft -> acceptedTypes.contains(ft.familyTypeName()); } for (String filter : filters) { if ("parent".equals(filter) || "-parent".equals(filter)) { @@ -268,7 +263,7 @@ private static Predicate buildFilter( case "-multifield" -> ft -> context.isMultiField(ft.name()) == false; default -> throw new IllegalArgumentException("Unknown field caps filter [" + filter + "]"); }; - fcf = fcf.and(next); + fcf = fcf == null ? next : fcf.and(next); } return fcf; } diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java index 722808af879d6..cc72dd80dceac 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java @@ -24,7 +24,7 @@ import java.util.Map; import java.util.Objects; -final class FieldCapabilitiesIndexResponse implements Writeable { +public final class FieldCapabilitiesIndexResponse implements Writeable { private static final TransportVersion MAPPING_HASH_VERSION = TransportVersions.V_8_2_0; private final String indexName; @@ -34,7 +34,7 @@ final class FieldCapabilitiesIndexResponse implements Writeable { private final boolean canMatch; private final transient TransportVersion originVersion; - FieldCapabilitiesIndexResponse( + public FieldCapabilitiesIndexResponse( String indexName, @Nullable String indexMappingHash, Map responseMap, diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java index 3a9d403ffb565..4b1c256bdeb71 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java @@ -75,7 +75,7 @@ public FieldCapabilitiesRequest() {} *

* Note that when using the high-level REST client, results are always merged (this flag is always considered 'true'). */ - boolean isMergeResults() { + public boolean isMergeResults() { return mergeResults; } @@ -85,7 +85,7 @@ boolean isMergeResults() { *

* Note that when using the high-level REST client, results are always merged (this flag is always considered 'true'). */ - void setMergeResults(boolean mergeResults) { + public void setMergeResults(boolean mergeResults) { this.mergeResults = mergeResults; } diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java index 84388864166dc..4946f6ca7835d 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java @@ -57,7 +57,7 @@ public FieldCapabilitiesResponse(String[] indices, Map indexResponses, List failures) { + public FieldCapabilitiesResponse(List indexResponses, List failures) { this(Strings.EMPTY_ARRAY, Collections.emptyMap(), indexResponses, failures); } @@ -117,7 +117,7 @@ public List getFailures() { /** * Returns the actual per-index field caps responses */ - List getIndexResponses() { + public List getIndexResponses() { return indexResponses; } diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/ResponseRewriter.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/ResponseRewriter.java index 38b0287522207..c4e9b1bce6d81 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/ResponseRewriter.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/ResponseRewriter.java @@ -10,6 +10,7 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; +import org.elasticsearch.core.Predicates; import java.util.HashMap; import java.util.Map; @@ -49,7 +50,7 @@ private static Function buildTra String[] filters, String[] allowedTypes ) { - Predicate test = ifc -> true; + Predicate test = Predicates.always(); Set objects = null; Set nestedObjects = null; if (allowedTypes.length > 0) { diff --git a/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java b/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java index d3d19fe1714ba..db26da382d3e1 100644 --- a/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java +++ b/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java @@ -296,11 +296,11 @@ private void tryGetFromTranslog(GetRequest request, IndexShard indexShard, Disco } static DiscoveryNode getCurrentNodeOfPrimary(ClusterState clusterState, ShardId shardId) { - var shardRoutingTable = clusterState.routingTable().shardRoutingTable(shardId); - if (shardRoutingTable.primaryShard() == null || shardRoutingTable.primaryShard().active() == false) { + final var primaryShard = clusterState.routingTable().shardRoutingTable(shardId).primaryShard(); + if (primaryShard.active() == false) { throw new NoShardAvailableActionException(shardId, "primary shard is not active"); } - DiscoveryNode node = clusterState.nodes().get(shardRoutingTable.primaryShard().currentNodeId()); + DiscoveryNode node = clusterState.nodes().get(primaryShard.currentNodeId()); assert node != null; return node; } diff --git a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java index 149cdb9206b34..48c2f1890ba08 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java @@ -44,66 +44,68 @@ final class ExpandSearchPhase extends SearchPhase { * Returns true iff the search request has inner hits and needs field collapsing */ private boolean isCollapseRequest() { - final SearchRequest searchRequest = context.getRequest(); - return searchRequest.source() != null - && searchRequest.source().collapse() != null - && searchRequest.source().collapse().getInnerHits().isEmpty() == false; + final var searchSource = context.getRequest().source(); + return searchSource != null && searchSource.collapse() != null && searchSource.collapse().getInnerHits().isEmpty() == false; } @Override public void run() { - if (isCollapseRequest() && searchHits.getHits().length > 0) { - SearchRequest searchRequest = context.getRequest(); - CollapseBuilder collapseBuilder = searchRequest.source().collapse(); - final List innerHitBuilders = collapseBuilder.getInnerHits(); - MultiSearchRequest multiRequest = new MultiSearchRequest(); - if (collapseBuilder.getMaxConcurrentGroupRequests() > 0) { - multiRequest.maxConcurrentSearchRequests(collapseBuilder.getMaxConcurrentGroupRequests()); + if (isCollapseRequest() == false || searchHits.getHits().length == 0) { + onPhaseDone(); + } else { + doRun(); + } + } + + private void doRun() { + SearchRequest searchRequest = context.getRequest(); + CollapseBuilder collapseBuilder = searchRequest.source().collapse(); + final List innerHitBuilders = collapseBuilder.getInnerHits(); + MultiSearchRequest multiRequest = new MultiSearchRequest(); + if (collapseBuilder.getMaxConcurrentGroupRequests() > 0) { + multiRequest.maxConcurrentSearchRequests(collapseBuilder.getMaxConcurrentGroupRequests()); + } + for (SearchHit hit : searchHits.getHits()) { + BoolQueryBuilder groupQuery = new BoolQueryBuilder(); + Object collapseValue = hit.field(collapseBuilder.getField()).getValue(); + if (collapseValue != null) { + groupQuery.filter(QueryBuilders.matchQuery(collapseBuilder.getField(), collapseValue)); + } else { + groupQuery.mustNot(QueryBuilders.existsQuery(collapseBuilder.getField())); + } + QueryBuilder origQuery = searchRequest.source().query(); + if (origQuery != null) { + groupQuery.must(origQuery); + } + for (InnerHitBuilder innerHitBuilder : innerHitBuilders) { + CollapseBuilder innerCollapseBuilder = innerHitBuilder.getInnerCollapseBuilder(); + SearchSourceBuilder sourceBuilder = buildExpandSearchSourceBuilder(innerHitBuilder, innerCollapseBuilder).query(groupQuery) + .postFilter(searchRequest.source().postFilter()) + .runtimeMappings(searchRequest.source().runtimeMappings()); + SearchRequest groupRequest = new SearchRequest(searchRequest); + groupRequest.source(sourceBuilder); + multiRequest.add(groupRequest); } + } + context.getSearchTransport().sendExecuteMultiSearch(multiRequest, context.getTask(), ActionListener.wrap(response -> { + Iterator it = response.iterator(); for (SearchHit hit : searchHits.getHits()) { - BoolQueryBuilder groupQuery = new BoolQueryBuilder(); - Object collapseValue = hit.field(collapseBuilder.getField()).getValue(); - if (collapseValue != null) { - groupQuery.filter(QueryBuilders.matchQuery(collapseBuilder.getField(), collapseValue)); - } else { - groupQuery.mustNot(QueryBuilders.existsQuery(collapseBuilder.getField())); - } - QueryBuilder origQuery = searchRequest.source().query(); - if (origQuery != null) { - groupQuery.must(origQuery); - } for (InnerHitBuilder innerHitBuilder : innerHitBuilders) { - CollapseBuilder innerCollapseBuilder = innerHitBuilder.getInnerCollapseBuilder(); - SearchSourceBuilder sourceBuilder = buildExpandSearchSourceBuilder(innerHitBuilder, innerCollapseBuilder).query( - groupQuery - ).postFilter(searchRequest.source().postFilter()).runtimeMappings(searchRequest.source().runtimeMappings()); - SearchRequest groupRequest = new SearchRequest(searchRequest); - groupRequest.source(sourceBuilder); - multiRequest.add(groupRequest); - } - } - context.getSearchTransport().sendExecuteMultiSearch(multiRequest, context.getTask(), ActionListener.wrap(response -> { - Iterator it = response.iterator(); - for (SearchHit hit : searchHits.getHits()) { - for (InnerHitBuilder innerHitBuilder : innerHitBuilders) { - MultiSearchResponse.Item item = it.next(); - if (item.isFailure()) { - context.onPhaseFailure(this, "failed to expand hits", item.getFailure()); - return; - } - SearchHits innerHits = item.getResponse().getHits(); - if (hit.getInnerHits() == null) { - hit.setInnerHits(Maps.newMapWithExpectedSize(innerHitBuilders.size())); - } - hit.getInnerHits().put(innerHitBuilder.getName(), innerHits); - innerHits.mustIncRef(); + MultiSearchResponse.Item item = it.next(); + if (item.isFailure()) { + context.onPhaseFailure(this, "failed to expand hits", item.getFailure()); + return; } + SearchHits innerHits = item.getResponse().getHits(); + if (hit.getInnerHits() == null) { + hit.setInnerHits(Maps.newMapWithExpectedSize(innerHitBuilders.size())); + } + hit.getInnerHits().put(innerHitBuilder.getName(), innerHits); + innerHits.mustIncRef(); } - onPhaseDone(); - }, context::onFailure)); - } else { + } onPhaseDone(); - } + }, context::onFailure)); } private static SearchSourceBuilder buildExpandSearchSourceBuilder(InnerHitBuilder options, CollapseBuilder innerCollapseBuilder) { diff --git a/server/src/main/java/org/elasticsearch/action/search/FetchLookupFieldsPhase.java b/server/src/main/java/org/elasticsearch/action/search/FetchLookupFieldsPhase.java index 9c50d534ac4ce..0605e23fc343c 100644 --- a/server/src/main/java/org/elasticsearch/action/search/FetchLookupFieldsPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/FetchLookupFieldsPhase.java @@ -75,6 +75,10 @@ public void run() { context.sendSearchResponse(searchResponse, queryResults); return; } + doRun(clusters); + } + + private void doRun(List clusters) { final MultiSearchRequest multiSearchRequest = new MultiSearchRequest(); for (Cluster cluster : clusters) { // Do not prepend the clusterAlias to the targetIndex if the search request is already on the remote cluster. diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java index bad0ed488d03b..793a5bfe4e9d4 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java @@ -73,7 +73,10 @@ public void run() { sendResponse(reducedQueryPhase, fetchResults); return; } + doRun(scoreDocs, reducedQueryPhase); + } + private void doRun(ScoreDoc[] scoreDocs, SearchPhaseController.ReducedQueryPhase reducedQueryPhase) { final List[] docIdsToLoad = SearchPhaseController.fillDocIdsToLoad(queryResults.length(), scoreDocs); final ScoreDoc[] lastEmittedDocPerShard = SearchPhaseController.getLastEmittedDocPerShard( reducedQueryPhase, diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index d80322b2954c6..0922e15999e8c 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -51,6 +51,7 @@ import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Predicates; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; @@ -198,8 +199,8 @@ private Map buildPerIndexOriginalIndices( String[] aliases = indexNameExpressionResolver.indexAliases( clusterState, index, - aliasMetadata -> true, - dataStreamAlias -> true, + Predicates.always(), + Predicates.always(), true, indicesAndAliases ); diff --git a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java index 6e94ea11c652d..3b03b1cf0a4f6 100644 --- a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java +++ b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java @@ -8,8 +8,11 @@ package org.elasticsearch.action.support; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.core.Nullable; @@ -40,12 +43,27 @@ * target does not exist. * @param wildcardOptions, applies only to wildcard expressions and defines how the wildcards will be expanded and if it will * be acceptable to have expressions that results to no indices. - * @param generalOptions, applies to all the resolved indices and defines if throttled will be included and if certain type of - * aliases or indices are allowed, or they will throw an error. + * @param gatekeeperOptions, applies to all the resolved indices and defines if throttled will be included and if certain type of + * aliases or indices are allowed, or they will throw an error. It acts as a gatekeeper when an action + * does not support certain options. + * @param failureStoreOptions, applies to all indices already matched and controls the type of indices that will be returned. Currently, + * there are two types, data stream failure indices (only certain data streams have them) and data stream + * backing indices or stand-alone indices. */ -public record IndicesOptions(ConcreteTargetOptions concreteTargetOptions, WildcardOptions wildcardOptions, GeneralOptions generalOptions) - implements - ToXContentFragment { +public record IndicesOptions( + ConcreteTargetOptions concreteTargetOptions, + WildcardOptions wildcardOptions, + GatekeeperOptions gatekeeperOptions, + FailureStoreOptions failureStoreOptions +) implements ToXContentFragment { + + public IndicesOptions( + ConcreteTargetOptions concreteTargetOptions, + WildcardOptions wildcardOptions, + GatekeeperOptions gatekeeperOptions + ) { + this(concreteTargetOptions, wildcardOptions, gatekeeperOptions, FailureStoreOptions.DEFAULT); + } public static IndicesOptions.Builder builder() { return new Builder(); @@ -286,20 +304,28 @@ public static Builder builder(WildcardOptions wildcardOptions) { } /** - * These options apply on all indices that have been selected by the other Options. It can either filter the response or - * define what type of indices or aliases are not allowed which will result in an error response. + * The "gatekeeper" options apply on all indices that have been selected by the other Options. It contains two type of flags: + * - The "allow*" flags, which purpose is to enable actions to define certain conditions that need to apply on the concrete indices + * they accept. For example, single-index actions will set allowAliasToMultipleIndices to false, while search will not accept a + * closed index etc. These options are not configurable by the end-user. + * - The ignoreThrottled flag, which is a depricared flag that will filter out frozen indices. * @param allowAliasToMultipleIndices, allow aliases to multiple indices, true by default. * @param allowClosedIndices, allow closed indices, true by default. - * @param ignoreThrottled, filters out throttled (aka frozen indices), defaults to true. + * @param allowFailureIndices, allow failure indices in the response, true by default + * @param ignoreThrottled, filters out throttled (aka frozen indices), defaults to true. This is deprecated and the only one + * that only filters and never throws an error. */ - public record GeneralOptions(boolean allowAliasToMultipleIndices, boolean allowClosedIndices, @Deprecated boolean ignoreThrottled) - implements - ToXContentFragment { + public record GatekeeperOptions( + boolean allowAliasToMultipleIndices, + boolean allowClosedIndices, + boolean allowFailureIndices, + @Deprecated boolean ignoreThrottled + ) implements ToXContentFragment { public static final String IGNORE_THROTTLED = "ignore_throttled"; - public static final GeneralOptions DEFAULT = new GeneralOptions(true, true, false); + public static final GatekeeperOptions DEFAULT = new GatekeeperOptions(true, true, true, false); - public static GeneralOptions parseParameter(Object ignoreThrottled, GeneralOptions defaultOptions) { + public static GatekeeperOptions parseParameter(Object ignoreThrottled, GatekeeperOptions defaultOptions) { if (ignoreThrottled == null && defaultOptions != null) { return defaultOptions; } @@ -316,15 +342,17 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public static class Builder { private boolean allowAliasToMultipleIndices; private boolean allowClosedIndices; + private boolean allowFailureIndices; private boolean ignoreThrottled; public Builder() { this(DEFAULT); } - Builder(GeneralOptions options) { + Builder(GatekeeperOptions options) { allowAliasToMultipleIndices = options.allowAliasToMultipleIndices; allowClosedIndices = options.allowClosedIndices; + allowFailureIndices = options.allowFailureIndices; ignoreThrottled = options.ignoreThrottled; } @@ -346,6 +374,15 @@ public Builder allowClosedIndices(boolean allowClosedIndices) { return this; } + /** + * Failure indices are accepted when true, otherwise the resolution will throw an error. + * Defaults to true. + */ + public Builder allowFailureIndices(boolean allowFailureIndices) { + this.allowFailureIndices = allowFailureIndices; + return this; + } + /** * Throttled indices will not be included in the result. Defaults to false. */ @@ -354,8 +391,8 @@ public Builder ignoreThrottled(boolean ignoreThrottled) { return this; } - public GeneralOptions build() { - return new GeneralOptions(allowAliasToMultipleIndices, allowClosedIndices, ignoreThrottled); + public GatekeeperOptions build() { + return new GatekeeperOptions(allowAliasToMultipleIndices, allowClosedIndices, allowFailureIndices, ignoreThrottled); } } @@ -363,8 +400,102 @@ public static Builder builder() { return new Builder(); } - public static Builder builder(GeneralOptions generalOptions) { - return new Builder(generalOptions); + public static Builder builder(GatekeeperOptions gatekeeperOptions) { + return new Builder(gatekeeperOptions); + } + } + + /** + * Applies to all indices already matched and controls the type of indices that will be returned. There are two types, data stream + * failure indices (only certain data streams have them) and data stream backing indices or stand-alone indices. + * @param includeRegularIndices, when true regular or data stream backing indices will be retrieved. + * @param includeFailureIndices, when true data stream failure indices will be included. + */ + public record FailureStoreOptions(boolean includeRegularIndices, boolean includeFailureIndices) + implements + Writeable, + ToXContentFragment { + + public static final String FAILURE_STORE = "failure_store"; + public static final String INCLUDE_ALL = "true"; + public static final String INCLUDE_ONLY_REGULAR_INDICES = "false"; + public static final String INCLUDE_ONLY_FAILURE_INDICES = "only"; + + public static final FailureStoreOptions DEFAULT = new FailureStoreOptions(true, false); + + public static FailureStoreOptions read(StreamInput in) throws IOException { + return new FailureStoreOptions(in.readBoolean(), in.readBoolean()); + } + + public static FailureStoreOptions parseParameters(Object failureStoreValue, FailureStoreOptions defaultOptions) { + if (failureStoreValue == null) { + return defaultOptions; + } + FailureStoreOptions.Builder builder = defaultOptions == null + ? new FailureStoreOptions.Builder() + : new FailureStoreOptions.Builder(defaultOptions); + return switch (failureStoreValue.toString()) { + case INCLUDE_ALL -> builder.includeRegularIndices(true).includeFailureIndices(true).build(); + case INCLUDE_ONLY_REGULAR_INDICES -> builder.includeRegularIndices(true).includeFailureIndices(false).build(); + case INCLUDE_ONLY_FAILURE_INDICES -> builder.includeRegularIndices(false).includeFailureIndices(true).build(); + default -> throw new IllegalArgumentException("No valid " + FAILURE_STORE + " value [" + failureStoreValue + "]"); + }; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.field(FAILURE_STORE, displayValue()); + } + + public String displayValue() { + if (includeRegularIndices && includeFailureIndices) { + return INCLUDE_ALL; + } else if (includeRegularIndices) { + return INCLUDE_ONLY_REGULAR_INDICES; + } + return INCLUDE_ONLY_FAILURE_INDICES; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeBoolean(includeRegularIndices); + out.writeBoolean(includeFailureIndices); + } + + public static class Builder { + private boolean includeRegularIndices; + private boolean includeFailureIndices; + + public Builder() { + this(DEFAULT); + } + + Builder(FailureStoreOptions options) { + includeRegularIndices = options.includeRegularIndices; + includeFailureIndices = options.includeFailureIndices; + } + + public Builder includeRegularIndices(boolean includeRegularIndices) { + this.includeRegularIndices = includeRegularIndices; + return this; + } + + public Builder includeFailureIndices(boolean includeFailureIndices) { + this.includeFailureIndices = includeFailureIndices; + return this; + } + + public FailureStoreOptions build() { + return new FailureStoreOptions(includeRegularIndices, includeFailureIndices); + } + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(FailureStoreOptions failureStoreOptions) { + return new Builder(failureStoreOptions); } } @@ -400,9 +531,10 @@ private enum Option { EXCLUDE_ALIASES, ALLOW_EMPTY_WILDCARD_EXPRESSIONS, ERROR_WHEN_ALIASES_TO_MULTIPLE_INDICES, - ERROR_WHEN_CLOSED_INDICES, - IGNORE_THROTTLED + IGNORE_THROTTLED, + + ALLOW_FAILURE_INDICES // Added in 8.14 } private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(IndicesOptions.class); @@ -415,7 +547,8 @@ private enum Option { public static final IndicesOptions DEFAULT = new IndicesOptions( ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS, WildcardOptions.DEFAULT, - GeneralOptions.DEFAULT + GatekeeperOptions.DEFAULT, + FailureStoreOptions.DEFAULT ); public static final IndicesOptions STRICT_EXPAND_OPEN = IndicesOptions.builder() @@ -428,7 +561,14 @@ private enum Option { .allowEmptyExpressions(true) .resolveAliases(true) ) - .generalOptions(GeneralOptions.builder().allowAliasToMultipleIndices(true).allowClosedIndices(true).ignoreThrottled(false)) + .gatekeeperOptions( + GatekeeperOptions.builder() + .allowAliasToMultipleIndices(true) + .allowClosedIndices(true) + .allowFailureIndices(true) + .ignoreThrottled(false) + ) + .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(false)) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -440,7 +580,14 @@ private enum Option { .allowEmptyExpressions(true) .resolveAliases(true) ) - .generalOptions(GeneralOptions.builder().allowAliasToMultipleIndices(true).allowClosedIndices(true).ignoreThrottled(false)) + .gatekeeperOptions( + GatekeeperOptions.builder() + .allowAliasToMultipleIndices(true) + .allowClosedIndices(true) + .allowFailureIndices(true) + .ignoreThrottled(false) + ) + .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(false)) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN_HIDDEN = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -452,7 +599,14 @@ private enum Option { .allowEmptyExpressions(true) .resolveAliases(true) ) - .generalOptions(GeneralOptions.builder().allowAliasToMultipleIndices(true).allowClosedIndices(true).ignoreThrottled(false)) + .gatekeeperOptions( + GatekeeperOptions.builder() + .allowAliasToMultipleIndices(true) + .allowClosedIndices(true) + .allowFailureIndices(true) + .ignoreThrottled(false) + ) + .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(false)) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -464,14 +618,28 @@ private enum Option { .allowEmptyExpressions(true) .resolveAliases(true) ) - .generalOptions(GeneralOptions.builder().allowAliasToMultipleIndices(true).allowClosedIndices(true).ignoreThrottled(false)) + .gatekeeperOptions( + GatekeeperOptions.builder() + .allowAliasToMultipleIndices(true) + .allowClosedIndices(true) + .allowFailureIndices(true) + .ignoreThrottled(false) + ) + .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(false)) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN_CLOSED_HIDDEN = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) .wildcardOptions( WildcardOptions.builder().matchOpen(true).matchClosed(true).includeHidden(true).allowEmptyExpressions(true).resolveAliases(true) ) - .generalOptions(GeneralOptions.builder().allowAliasToMultipleIndices(true).allowClosedIndices(true).ignoreThrottled(false)) + .gatekeeperOptions( + GatekeeperOptions.builder() + .allowAliasToMultipleIndices(true) + .allowClosedIndices(true) + .allowFailureIndices(true) + .ignoreThrottled(false) + ) + .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(false)) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -483,14 +651,28 @@ private enum Option { .allowEmptyExpressions(true) .resolveAliases(true) ) - .generalOptions(GeneralOptions.builder().allowAliasToMultipleIndices(true).allowClosedIndices(true).ignoreThrottled(false)) + .gatekeeperOptions( + GatekeeperOptions.builder() + .allowAliasToMultipleIndices(true) + .allowClosedIndices(true) + .allowFailureIndices(true) + .ignoreThrottled(false) + ) + .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(false)) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_CLOSED_HIDDEN = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) .wildcardOptions( WildcardOptions.builder().matchOpen(true).matchClosed(true).includeHidden(true).allowEmptyExpressions(true).resolveAliases(true) ) - .generalOptions(GeneralOptions.builder().allowAliasToMultipleIndices(true).allowClosedIndices(true).ignoreThrottled(false)) + .gatekeeperOptions( + GatekeeperOptions.builder() + .allowAliasToMultipleIndices(true) + .allowClosedIndices(true) + .allowFailureIndices(true) + .ignoreThrottled(false) + ) + .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(false)) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_FORBID_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -502,7 +684,14 @@ private enum Option { .allowEmptyExpressions(true) .resolveAliases(true) ) - .generalOptions(GeneralOptions.builder().allowClosedIndices(false).allowAliasToMultipleIndices(true).ignoreThrottled(false)) + .gatekeeperOptions( + GatekeeperOptions.builder() + .allowClosedIndices(false) + .allowAliasToMultipleIndices(true) + .allowFailureIndices(true) + .ignoreThrottled(false) + ) + .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(false)) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_HIDDEN_FORBID_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -514,7 +703,14 @@ private enum Option { .allowEmptyExpressions(true) .resolveAliases(true) ) - .generalOptions(GeneralOptions.builder().allowClosedIndices(false).allowAliasToMultipleIndices(true).ignoreThrottled(false)) + .gatekeeperOptions( + GatekeeperOptions.builder() + .allowClosedIndices(false) + .allowAliasToMultipleIndices(true) + .allowFailureIndices(true) + .ignoreThrottled(false) + ) + .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(false)) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_FORBID_CLOSED_IGNORE_THROTTLED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -526,7 +722,14 @@ private enum Option { .allowEmptyExpressions(true) .resolveAliases(true) ) - .generalOptions(GeneralOptions.builder().ignoreThrottled(true).allowClosedIndices(false).allowAliasToMultipleIndices(true)) + .gatekeeperOptions( + GatekeeperOptions.builder() + .ignoreThrottled(true) + .allowClosedIndices(false) + .allowFailureIndices(true) + .allowAliasToMultipleIndices(true) + ) + .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(false)) .build(); public static final IndicesOptions STRICT_SINGLE_INDEX_NO_EXPAND_FORBID_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -538,7 +741,14 @@ private enum Option { .allowEmptyExpressions(true) .resolveAliases(true) ) - .generalOptions(GeneralOptions.builder().allowAliasToMultipleIndices(false).allowClosedIndices(false).ignoreThrottled(false)) + .gatekeeperOptions( + GatekeeperOptions.builder() + .allowAliasToMultipleIndices(false) + .allowClosedIndices(false) + .allowFailureIndices(true) + .ignoreThrottled(false) + ) + .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(false)) .build(); public static final IndicesOptions STRICT_NO_EXPAND_FORBID_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -550,7 +760,14 @@ private enum Option { .allowEmptyExpressions(true) .resolveAliases(true) ) - .generalOptions(GeneralOptions.builder().allowClosedIndices(false).allowAliasToMultipleIndices(true).ignoreThrottled(false)) + .gatekeeperOptions( + GatekeeperOptions.builder() + .allowClosedIndices(false) + .allowAliasToMultipleIndices(true) + .allowFailureIndices(true) + .ignoreThrottled(false) + ) + .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(false)) .build(); /** @@ -604,14 +821,21 @@ public boolean expandWildcardsHidden() { * @return Whether execution on closed indices is allowed. */ public boolean forbidClosedIndices() { - return generalOptions.allowClosedIndices() == false; + return gatekeeperOptions.allowClosedIndices() == false; + } + + /** + * @return Whether execution on closed indices is allowed. + */ + public boolean allowFailureIndices() { + return gatekeeperOptions.allowFailureIndices(); } /** * @return whether aliases pointing to multiple indices are allowed */ public boolean allowAliasesToMultipleIndices() { - return generalOptions().allowAliasToMultipleIndices(); + return gatekeeperOptions().allowAliasToMultipleIndices(); } /** @@ -625,7 +849,21 @@ public boolean ignoreAliases() { * @return whether indices that are marked as throttled should be ignored */ public boolean ignoreThrottled() { - return generalOptions().ignoreThrottled(); + return gatekeeperOptions().ignoreThrottled(); + } + + /** + * @return whether regular indices (stand-alone or backing indices) will be included in the response + */ + public boolean includeRegularIndices() { + return failureStoreOptions().includeRegularIndices(); + } + + /** + * @return whether failure indices (only supported by certain data streams) will be included in the response + */ + public boolean includeFailureIndices() { + return failureStoreOptions().includeFailureIndices(); } public void writeIndicesOptions(StreamOutput out) throws IOException { @@ -648,6 +886,11 @@ public void writeIndicesOptions(StreamOutput out) throws IOException { if (ignoreUnavailable()) { backwardsCompatibleOptions.add(Option.ALLOW_UNAVAILABLE_CONCRETE_TARGETS); } + if (out.getTransportVersion().onOrAfter(TransportVersions.ADD_FAILURE_STORE_INDICES_OPTIONS)) { + if (allowFailureIndices()) { + backwardsCompatibleOptions.add(Option.ALLOW_FAILURE_INDICES); + } + } out.writeEnumSet(backwardsCompatibleOptions); EnumSet states = EnumSet.noneOf(WildcardStates.class); @@ -661,6 +904,9 @@ public void writeIndicesOptions(StreamOutput out) throws IOException { states.add(WildcardStates.HIDDEN); } out.writeEnumSet(states); + if (out.getTransportVersion().onOrAfter(TransportVersions.ADD_FAILURE_STORE_INDICES_OPTIONS)) { + failureStoreOptions.writeTo(out); + } } public static IndicesOptions readIndicesOptions(StreamInput in) throws IOException { @@ -670,24 +916,34 @@ public static IndicesOptions readIndicesOptions(StreamInput in) throws IOExcepti options.contains(Option.ALLOW_EMPTY_WILDCARD_EXPRESSIONS), options.contains(Option.EXCLUDE_ALIASES) ); - GeneralOptions generalOptions = GeneralOptions.builder() + boolean allowFailureIndices = true; + if (in.getTransportVersion().onOrAfter(TransportVersions.ADD_FAILURE_STORE_INDICES_OPTIONS)) { + allowFailureIndices = options.contains(Option.ALLOW_FAILURE_INDICES); + } + GatekeeperOptions gatekeeperOptions = GatekeeperOptions.builder() .allowClosedIndices(options.contains(Option.ERROR_WHEN_CLOSED_INDICES) == false) .allowAliasToMultipleIndices(options.contains(Option.ERROR_WHEN_ALIASES_TO_MULTIPLE_INDICES) == false) + .allowFailureIndices(allowFailureIndices) .ignoreThrottled(options.contains(Option.IGNORE_THROTTLED)) .build(); + FailureStoreOptions failureStoreOptions = in.getTransportVersion().onOrAfter(TransportVersions.ADD_FAILURE_STORE_INDICES_OPTIONS) + ? FailureStoreOptions.read(in) + : FailureStoreOptions.DEFAULT; return new IndicesOptions( options.contains(Option.ALLOW_UNAVAILABLE_CONCRETE_TARGETS) ? ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS : ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS, wildcardOptions, - generalOptions + gatekeeperOptions, + failureStoreOptions ); } public static class Builder { private ConcreteTargetOptions concreteTargetOptions; private WildcardOptions wildcardOptions; - private GeneralOptions generalOptions; + private GatekeeperOptions gatekeeperOptions; + private FailureStoreOptions failureStoreOptions; Builder() { this(DEFAULT); @@ -696,7 +952,8 @@ public static class Builder { Builder(IndicesOptions indicesOptions) { concreteTargetOptions = indicesOptions.concreteTargetOptions; wildcardOptions = indicesOptions.wildcardOptions; - generalOptions = indicesOptions.generalOptions; + gatekeeperOptions = indicesOptions.gatekeeperOptions; + failureStoreOptions = indicesOptions.failureStoreOptions; } public Builder concreteTargetOptions(ConcreteTargetOptions concreteTargetOptions) { @@ -714,18 +971,28 @@ public Builder wildcardOptions(WildcardOptions.Builder wildcardOptions) { return this; } - public Builder generalOptions(GeneralOptions generalOptions) { - this.generalOptions = generalOptions; + public Builder gatekeeperOptions(GatekeeperOptions gatekeeperOptions) { + this.gatekeeperOptions = gatekeeperOptions; + return this; + } + + public Builder gatekeeperOptions(GatekeeperOptions.Builder generalOptions) { + this.gatekeeperOptions = generalOptions.build(); return this; } - public Builder generalOptions(GeneralOptions.Builder generalOptions) { - this.generalOptions = generalOptions.build(); + public Builder failureStoreOptions(FailureStoreOptions failureStoreOptions) { + this.failureStoreOptions = failureStoreOptions; + return this; + } + + public Builder failureStoreOptions(FailureStoreOptions.Builder failureStoreOptions) { + this.failureStoreOptions = failureStoreOptions.build(); return this; } public IndicesOptions build() { - return new IndicesOptions(concreteTargetOptions, wildcardOptions, generalOptions); + return new IndicesOptions(concreteTargetOptions, wildcardOptions, gatekeeperOptions, failureStoreOptions); } } @@ -819,7 +1086,7 @@ public static IndicesOptions fromOptions( .resolveAliases(ignoreAliases == false) .allowEmptyExpressions(allowNoIndices) .build(); - final GeneralOptions generalOptions = GeneralOptions.builder() + final GatekeeperOptions gatekeeperOptions = GatekeeperOptions.builder() .allowAliasToMultipleIndices(allowAliasesToMultipleIndices) .allowClosedIndices(forbidClosedIndices == false) .ignoreThrottled(ignoreThrottled) @@ -827,12 +1094,13 @@ public static IndicesOptions fromOptions( return new IndicesOptions( ignoreUnavailable ? ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS : ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS, wildcards, - generalOptions + gatekeeperOptions, + FailureStoreOptions.DEFAULT ); } public static IndicesOptions fromRequest(RestRequest request, IndicesOptions defaultSettings) { - if (request.hasParam(GeneralOptions.IGNORE_THROTTLED)) { + if (request.hasParam(GatekeeperOptions.IGNORE_THROTTLED)) { DEPRECATION_LOGGER.warn(DeprecationCategory.API, "ignore_throttled_param", IGNORE_THROTTLED_DEPRECATION_MESSAGE); } @@ -840,19 +1108,36 @@ public static IndicesOptions fromRequest(RestRequest request, IndicesOptions def request.param(WildcardOptions.EXPAND_WILDCARDS), request.param(ConcreteTargetOptions.IGNORE_UNAVAILABLE), request.param(WildcardOptions.ALLOW_NO_INDICES), - request.param(GeneralOptions.IGNORE_THROTTLED), + request.param(GatekeeperOptions.IGNORE_THROTTLED), + DataStream.isFailureStoreEnabled() + ? request.param(FailureStoreOptions.FAILURE_STORE) + : FailureStoreOptions.INCLUDE_ONLY_REGULAR_INDICES, defaultSettings ); } public static IndicesOptions fromMap(Map map, IndicesOptions defaultSettings) { + if (DataStream.isFailureStoreEnabled()) { + return fromParameters( + map.containsKey(WildcardOptions.EXPAND_WILDCARDS) ? map.get(WildcardOptions.EXPAND_WILDCARDS) : map.get("expandWildcards"), + map.containsKey(ConcreteTargetOptions.IGNORE_UNAVAILABLE) + ? map.get(ConcreteTargetOptions.IGNORE_UNAVAILABLE) + : map.get("ignoreUnavailable"), + map.containsKey(WildcardOptions.ALLOW_NO_INDICES) ? map.get(WildcardOptions.ALLOW_NO_INDICES) : map.get("allowNoIndices"), + map.containsKey(GatekeeperOptions.IGNORE_THROTTLED) + ? map.get(GatekeeperOptions.IGNORE_THROTTLED) + : map.get("ignoreThrottled"), + map.containsKey(FailureStoreOptions.FAILURE_STORE) ? map.get(FailureStoreOptions.FAILURE_STORE) : map.get("failureStore"), + defaultSettings + ); + } return fromParameters( map.containsKey(WildcardOptions.EXPAND_WILDCARDS) ? map.get(WildcardOptions.EXPAND_WILDCARDS) : map.get("expandWildcards"), map.containsKey(ConcreteTargetOptions.IGNORE_UNAVAILABLE) ? map.get(ConcreteTargetOptions.IGNORE_UNAVAILABLE) : map.get("ignoreUnavailable"), map.containsKey(WildcardOptions.ALLOW_NO_INDICES) ? map.get(WildcardOptions.ALLOW_NO_INDICES) : map.get("allowNoIndices"), - map.containsKey(GeneralOptions.IGNORE_THROTTLED) ? map.get(GeneralOptions.IGNORE_THROTTLED) : map.get("ignoreThrottled"), + map.containsKey(GatekeeperOptions.IGNORE_THROTTLED) ? map.get(GatekeeperOptions.IGNORE_THROTTLED) : map.get("ignoreThrottled"), defaultSettings ); } @@ -866,10 +1151,22 @@ public static boolean isIndicesOptions(String name) { || "expandWildcards".equals(name) || ConcreteTargetOptions.IGNORE_UNAVAILABLE.equals(name) || "ignoreUnavailable".equals(name) - || GeneralOptions.IGNORE_THROTTLED.equals(name) + || GatekeeperOptions.IGNORE_THROTTLED.equals(name) || "ignoreThrottled".equals(name) || WildcardOptions.ALLOW_NO_INDICES.equals(name) - || "allowNoIndices".equals(name); + || "allowNoIndices".equals(name) + || (DataStream.isFailureStoreEnabled() && FailureStoreOptions.FAILURE_STORE.equals(name)) + || (DataStream.isFailureStoreEnabled() && "failureStore".equals(name)); + } + + public static IndicesOptions fromParameters( + Object wildcardsString, + Object ignoreUnavailableString, + Object allowNoIndicesString, + Object ignoreThrottled, + IndicesOptions defaultSettings + ) { + return fromParameters(wildcardsString, ignoreUnavailableString, allowNoIndicesString, ignoreThrottled, null, defaultSettings); } public static IndicesOptions fromParameters( @@ -877,20 +1174,29 @@ public static IndicesOptions fromParameters( Object ignoreUnavailableString, Object allowNoIndicesString, Object ignoreThrottled, + Object failureStoreString, IndicesOptions defaultSettings ) { - if (wildcardsString == null && ignoreUnavailableString == null && allowNoIndicesString == null && ignoreThrottled == null) { + if (wildcardsString == null + && ignoreUnavailableString == null + && allowNoIndicesString == null + && ignoreThrottled == null + && failureStoreString == null) { return defaultSettings; } WildcardOptions wildcards = WildcardOptions.parseParameters(wildcardsString, allowNoIndicesString, defaultSettings.wildcardOptions); - GeneralOptions generalOptions = GeneralOptions.parseParameter(ignoreThrottled, defaultSettings.generalOptions); + GatekeeperOptions gatekeeperOptions = GatekeeperOptions.parseParameter(ignoreThrottled, defaultSettings.gatekeeperOptions); + FailureStoreOptions failureStoreOptions = DataStream.isFailureStoreEnabled() + ? FailureStoreOptions.parseParameters(failureStoreString, defaultSettings.failureStoreOptions) + : FailureStoreOptions.DEFAULT; // note that allowAliasesToMultipleIndices is not exposed, always true (only for internal use) return IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.fromParameter(ignoreUnavailableString, defaultSettings.concreteTargetOptions)) .wildcardOptions(wildcards) - .generalOptions(generalOptions) + .gatekeeperOptions(gatekeeperOptions) + .failureStoreOptions(failureStoreOptions) .build(); } @@ -898,14 +1204,18 @@ public static IndicesOptions fromParameters( public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { concreteTargetOptions.toXContent(builder, params); wildcardOptions.toXContent(builder, params); - generalOptions.toXContent(builder, params); + gatekeeperOptions.toXContent(builder, params); + if (DataStream.isFailureStoreEnabled()) { + failureStoreOptions.toXContent(builder, params); + } return builder; } private static final ParseField EXPAND_WILDCARDS_FIELD = new ParseField(WildcardOptions.EXPAND_WILDCARDS); private static final ParseField IGNORE_UNAVAILABLE_FIELD = new ParseField(ConcreteTargetOptions.IGNORE_UNAVAILABLE); - private static final ParseField IGNORE_THROTTLED_FIELD = new ParseField(GeneralOptions.IGNORE_THROTTLED).withAllDeprecated(); + private static final ParseField IGNORE_THROTTLED_FIELD = new ParseField(GatekeeperOptions.IGNORE_THROTTLED).withAllDeprecated(); private static final ParseField ALLOW_NO_INDICES_FIELD = new ParseField(WildcardOptions.ALLOW_NO_INDICES); + private static final ParseField FAILURE_STORE_FIELD = new ParseField(FailureStoreOptions.FAILURE_STORE); public static IndicesOptions fromXContent(XContentParser parser) throws IOException { return fromXContent(parser, null); @@ -914,8 +1224,9 @@ public static IndicesOptions fromXContent(XContentParser parser) throws IOExcept public static IndicesOptions fromXContent(XContentParser parser, @Nullable IndicesOptions defaults) throws IOException { boolean parsedWildcardStates = false; WildcardOptions.Builder wildcards = defaults == null ? null : WildcardOptions.builder(defaults.wildcardOptions()); - GeneralOptions.Builder generalOptions = GeneralOptions.builder() - .ignoreThrottled(defaults != null && defaults.generalOptions().ignoreThrottled()); + GatekeeperOptions.Builder generalOptions = GatekeeperOptions.builder() + .ignoreThrottled(defaults != null && defaults.gatekeeperOptions().ignoreThrottled()); + FailureStoreOptions failureStoreOptions = defaults == null ? FailureStoreOptions.DEFAULT : defaults.failureStoreOptions(); Boolean allowNoIndices = defaults == null ? null : defaults.allowNoIndices(); Boolean ignoreUnavailable = defaults == null ? null : defaults.ignoreUnavailable(); Token token = parser.currentToken() == Token.START_OBJECT ? parser.currentToken() : parser.nextToken(); @@ -965,13 +1276,16 @@ public static IndicesOptions fromXContent(XContentParser parser, @Nullable Indic allowNoIndices = parser.booleanValue(); } else if (IGNORE_THROTTLED_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { generalOptions.ignoreThrottled(parser.booleanValue()); - } else { - throw new ElasticsearchParseException( - "could not read indices options. unexpected index option [" + currentFieldName + "]" - ); - } + } else if (DataStream.isFailureStoreEnabled() + && FAILURE_STORE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + failureStoreOptions = FailureStoreOptions.parseParameters(parser.text(), failureStoreOptions); + } else { + throw new ElasticsearchParseException( + "could not read indices options. Unexpected index option [" + currentFieldName + "]" + ); + } } else { - throw new ElasticsearchParseException("could not read indices options. unexpected object field [" + currentFieldName + "]"); + throw new ElasticsearchParseException("could not read indices options. Unexpected object field [" + currentFieldName + "]"); } } @@ -994,7 +1308,8 @@ public static IndicesOptions fromXContent(XContentParser parser, @Nullable Indic return IndicesOptions.builder() .concreteTargetOptions(new ConcreteTargetOptions(ignoreUnavailable)) .wildcardOptions(wildcards) - .generalOptions(generalOptions) + .gatekeeperOptions(generalOptions) + .failureStoreOptions(failureStoreOptions) .build(); } @@ -1108,6 +1423,14 @@ public String toString() { + ignoreAliases() + ", ignore_throttled=" + ignoreThrottled() + + (DataStream.isFailureStoreEnabled() + ? ", include_regular_indices=" + + includeRegularIndices() + + ", include_failure_indices=" + + includeFailureIndices() + + ", allow_failure_indices=" + + allowFailureIndices() + : "") + ']'; } } diff --git a/server/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java b/server/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java index 22f0da70137af..00384852d1472 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java @@ -28,6 +28,11 @@ public abstract class ClusterInfoRequest true); + private static final Permission BAD_DEFAULT_NUMBER_ONE = new BadDefaultPermission( + new RuntimePermission("stopThread"), + Predicates.always() + ); // default policy file states: // "allows anyone to listen on dynamic ports" diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Natives.java b/server/src/main/java/org/elasticsearch/bootstrap/Natives.java index 2404d5075f844..040c50b2b74e2 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Natives.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Natives.java @@ -10,8 +10,11 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.ReferenceDocs; +import java.lang.invoke.MethodHandles; import java.nio.file.Path; +import java.util.Locale; /** * The Natives class is a wrapper class that checks if the classes necessary for calling native methods are available on @@ -31,12 +34,19 @@ private Natives() {} try { // load one of the main JNA classes to see if the classes are available. this does not ensure that all native // libraries are available, only the ones necessary by JNA to function - Class.forName("com.sun.jna.Native"); + MethodHandles.publicLookup().ensureInitialized(com.sun.jna.Native.class); v = true; - } catch (ClassNotFoundException e) { - logger.warn("JNA not found. native methods will be disabled.", e); + } catch (IllegalAccessException e) { + throw new AssertionError(e); } catch (UnsatisfiedLinkError e) { - logger.warn("unable to load JNA native support library, native methods will be disabled.", e); + logger.warn( + String.format( + Locale.ROOT, + "unable to load JNA native support library, native methods will be disabled. See %s", + ReferenceDocs.EXECUTABLE_JNA_TMPDIR + ), + e + ); } JNA_AVAILABLE = v; } diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java index 5f682804a5b88..809e069b0028b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -29,6 +29,7 @@ import org.elasticsearch.cluster.routing.ShardRoutingRoleStrategy; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.AllocationService.RerouteStrategy; +import org.elasticsearch.cluster.routing.allocation.AllocationStatsService; import org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator; import org.elasticsearch.cluster.routing.allocation.WriteLoadForecaster; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; @@ -118,6 +119,7 @@ public class ClusterModule extends AbstractModule { final Collection deciderList; final ShardsAllocator shardsAllocator; private final ShardRoutingRoleStrategy shardRoutingRoleStrategy; + private final AllocationStatsService allocationStatsService; public ClusterModule( Settings settings, @@ -154,6 +156,7 @@ public ClusterModule( shardRoutingRoleStrategy ); this.metadataDeleteIndexService = new MetadataDeleteIndexService(settings, clusterService, allocationService); + this.allocationStatsService = new AllocationStatsService(clusterService, clusterInfoService, shardsAllocator, writeLoadForecaster); } static ShardRoutingRoleStrategy getShardRoutingRoleStrategy(List clusterPlugins) { @@ -440,6 +443,7 @@ protected void configure() { bind(AllocationDeciders.class).toInstance(allocationDeciders); bind(ShardsAllocator.class).toInstance(shardsAllocator); bind(ShardRoutingRoleStrategy.class).toInstance(shardRoutingRoleStrategy); + bind(AllocationStatsService.class).toInstance(allocationStatsService); } public void setExistingShardsAllocators(GatewayAllocator gatewayAllocator) { diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java b/server/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java index 74deb90ee411a..40ddafa498ecb 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Predicates; import org.elasticsearch.core.TimeValue; import org.elasticsearch.threadpool.ThreadPool; @@ -33,8 +34,6 @@ public class ClusterStateObserver { public static final Predicate NON_NULL_MASTER_PREDICATE = state -> state.nodes().getMasterNode() != null; - private static final Predicate MATCH_ALL_CHANGES_PREDICATE = state -> true; - private final ClusterApplierService clusterApplierService; private final ThreadPool threadPool; private final ThreadContext contextHolder; @@ -109,11 +108,11 @@ public boolean isTimedOut() { } public void waitForNextChange(Listener listener) { - waitForNextChange(listener, MATCH_ALL_CHANGES_PREDICATE); + waitForNextChange(listener, Predicates.always()); } public void waitForNextChange(Listener listener, @Nullable TimeValue timeOutValue) { - waitForNextChange(listener, MATCH_ALL_CHANGES_PREDICATE, timeOutValue); + waitForNextChange(listener, Predicates.always(), timeOutValue); } public void waitForNextChange(Listener listener, Predicate statePredicate) { diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/stateless/AtomicRegisterPreVoteCollector.java b/server/src/main/java/org/elasticsearch/cluster/coordination/stateless/AtomicRegisterPreVoteCollector.java index ae53fa19da655..e9659bde065d7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/stateless/AtomicRegisterPreVoteCollector.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/stateless/AtomicRegisterPreVoteCollector.java @@ -8,6 +8,8 @@ package org.elasticsearch.cluster.coordination.stateless; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.coordination.PreVoteCollector; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -16,6 +18,8 @@ import java.util.concurrent.atomic.AtomicBoolean; public class AtomicRegisterPreVoteCollector extends PreVoteCollector { + private static final Logger logger = LogManager.getLogger(AtomicRegisterPreVoteCollector.class); + private final StoreHeartbeatService heartbeatService; private final Runnable startElection; @@ -27,11 +31,11 @@ public AtomicRegisterPreVoteCollector(StoreHeartbeatService heartbeatService, Ru @Override public Releasable start(ClusterState clusterState, Iterable broadcastNodes) { final var shouldRun = new AtomicBoolean(true); - heartbeatService.runIfNoRecentLeader(() -> { + heartbeatService.checkLeaderHeartbeatAndRun(() -> { if (shouldRun.getAndSet(false)) { startElection.run(); } - }); + }, heartbeat -> logger.info("skipping election since there is a recent heartbeat[{}] from the leader", heartbeat)); return () -> shouldRun.set(false); } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatService.java index 0ea515012a190..d21add7e6954f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatService.java @@ -95,15 +95,15 @@ protected long absoluteTimeInMillis() { return threadPool.absoluteTimeInMillis(); } - void runIfNoRecentLeader(Runnable runnable) { + void checkLeaderHeartbeatAndRun(Runnable noRecentLeaderRunnable, Consumer recentLeaderHeartbeatConsumer) { heartbeatStore.readLatestHeartbeat(new ActionListener<>() { @Override public void onResponse(Heartbeat heartBeat) { if (heartBeat == null || maxTimeSinceLastHeartbeat.millis() <= heartBeat.timeSinceLastHeartbeatInMillis(absoluteTimeInMillis())) { - runnable.run(); + noRecentLeaderRunnable.run(); } else { - logger.trace("runIfNoRecentLeader: found recent leader [{}]", heartBeat); + recentLeaderHeartbeatConsumer.accept(heartBeat); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java b/server/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java index fe6efda67df29..106f4c1e4e387 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java @@ -8,6 +8,7 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.common.settings.Setting; @@ -18,6 +19,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.function.Supplier; import static org.elasticsearch.cluster.metadata.MetadataIndexStateService.isIndexVerifiedBeforeClosed; @@ -98,6 +100,13 @@ public boolean expandToAllNodes() { public int getDesiredNumberOfReplicas(IndexMetadata indexMetadata, RoutingAllocation allocation) { assert enabled : "should only be called when enabled"; + // Make sure in stateless auto-expand indices always have 1 replica to ensure all shard roles are always present + if (Objects.equals( + indexMetadata.getSettings().get(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_SETTING.getKey()), + "stateless" + )) { + return 1; + } int numMatchingDataNodes = 0; for (DiscoveryNode discoveryNode : allocation.nodes().getDataNodes().values()) { Decision decision = allocation.deciders().shouldAutoExpandToNode(indexMetadata, discoveryNode, allocation); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java index 14de79636be0d..073ba460a4698 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java @@ -70,6 +70,7 @@ public final class DataStream implements SimpleDiffable, ToXContentO public static final FeatureFlag FAILURE_STORE_FEATURE_FLAG = new FeatureFlag("failure_store"); public static final TransportVersion ADDED_FAILURE_STORE_TRANSPORT_VERSION = TransportVersions.V_8_12_0; + public static final TransportVersion ADDED_AUTO_SHARDING_EVENT_VERSION = TransportVersions.DATA_STREAM_AUTO_SHARDING_EVENT; public static boolean isFailureStoreEnabled() { return FAILURE_STORE_FEATURE_FLAG.isEnabled(); @@ -113,6 +114,9 @@ public static boolean isFailureStoreEnabled() { private final boolean rolloverOnWrite; private final boolean failureStore; private final List failureIndices; + private volatile Set failureStoreLookup; + @Nullable + private final DataStreamAutoShardingEvent autoShardingEvent; public DataStream( String name, @@ -126,7 +130,8 @@ public DataStream( IndexMode indexMode, DataStreamLifecycle lifecycle, boolean failureStore, - List failureIndices + List failureIndices, + @Nullable DataStreamAutoShardingEvent autoShardingEvent ) { this( name, @@ -142,7 +147,8 @@ public DataStream( lifecycle, failureStore, failureIndices, - false + false, + autoShardingEvent ); } @@ -159,7 +165,8 @@ public DataStream( DataStreamLifecycle lifecycle, boolean failureStore, List failureIndices, - boolean rolloverOnWrite + boolean rolloverOnWrite, + @Nullable DataStreamAutoShardingEvent autoShardingEvent ) { this( name, @@ -175,7 +182,8 @@ public DataStream( lifecycle, failureStore, failureIndices, - rolloverOnWrite + rolloverOnWrite, + autoShardingEvent ); } @@ -194,7 +202,8 @@ public DataStream( DataStreamLifecycle lifecycle, boolean failureStore, List failureIndices, - boolean rolloverOnWrite + boolean rolloverOnWrite, + @Nullable DataStreamAutoShardingEvent autoShardingEvent ) { this.name = name; this.indices = List.copyOf(indices); @@ -213,6 +222,7 @@ public DataStream( this.failureIndices = failureIndices; assert assertConsistent(this.indices); this.rolloverOnWrite = rolloverOnWrite; + this.autoShardingEvent = autoShardingEvent; } // mainly available for testing @@ -227,7 +237,7 @@ public DataStream( boolean allowCustomRouting, IndexMode indexMode ) { - this(name, indices, generation, metadata, hidden, replicated, system, allowCustomRouting, indexMode, null, false, List.of()); + this(name, indices, generation, metadata, hidden, replicated, system, allowCustomRouting, indexMode, null, false, List.of(), null); } private static boolean assertConsistent(List indices) { @@ -273,6 +283,32 @@ public Index getWriteIndex() { return indices.get(indices.size() - 1); } + /** + * @return the write failure index if the failure store is enabled and there is already at least one failure, null otherwise + */ + @Nullable + public Index getFailureStoreWriteIndex() { + return isFailureStore() == false || failureIndices.isEmpty() ? null : failureIndices.get(failureIndices.size() - 1); + } + + /** + * Returns true if the index name provided belongs to a failure store index. + * This method builds a local Set with all the failure store index names and then checks if it contains the name. + * This will perform better if there are multiple indices of this data stream checked. + */ + public boolean isFailureStoreIndex(String indexName) { + if (failureStoreLookup == null) { + // There is a chance this will be calculated twice, but it's a relatively cheap action, + // so it's not worth synchronising + if (failureIndices == null || failureIndices.isEmpty()) { + failureStoreLookup = Set.of(); + } else { + failureStoreLookup = failureIndices.stream().map(Index::getName).collect(Collectors.toSet()); + } + } + return failureStoreLookup.contains(indexName); + } + public boolean rolloverOnWrite() { return rolloverOnWrite; } @@ -412,6 +448,13 @@ public DataStreamLifecycle getLifecycle() { return lifecycle; } + /** + * Returns the latest auto sharding event that happened for this data stream + */ + public DataStreamAutoShardingEvent getAutoShardingEvent() { + return autoShardingEvent; + } + /** * Performs a rollover on a {@code DataStream} instance and returns a new instance containing * the updated list of backing indices and incremented generation. @@ -456,7 +499,8 @@ public DataStream unsafeRollover(Index writeIndex, long generation, boolean time indexMode, lifecycle, failureStore, - failureIndices + failureIndices, + autoShardingEvent ); } @@ -534,7 +578,8 @@ public DataStream removeBackingIndex(Index index) { indexMode, lifecycle, failureStore, - failureIndices + failureIndices, + autoShardingEvent ); } @@ -579,7 +624,8 @@ public DataStream replaceBackingIndex(Index existingBackingIndex, Index newBacki indexMode, lifecycle, failureStore, - failureIndices + failureIndices, + autoShardingEvent ); } @@ -639,7 +685,8 @@ public DataStream addBackingIndex(Metadata clusterMetadata, Index index) { indexMode, lifecycle, failureStore, - failureIndices + failureIndices, + autoShardingEvent ); } @@ -658,7 +705,8 @@ public DataStream promoteDataStream() { lifecycle, failureStore, failureIndices, - rolloverOnWrite + rolloverOnWrite, + autoShardingEvent ); } @@ -694,7 +742,8 @@ public DataStream snapshot(Collection indicesInSnapshot) { indexMode, lifecycle, failureStore, - failureIndices + failureIndices, + autoShardingEvent ); } @@ -705,7 +754,7 @@ public DataStream snapshot(Collection indicesInSnapshot) { * is treated differently for the write index (i.e. they first need to be rolled over) */ public List getIndicesPastRetention(Function indexMetadataSupplier, LongSupplier nowSupplier) { - if (lifecycle == null || lifecycle.getEffectiveDataRetention() == null) { + if (lifecycle == null || lifecycle.isEnabled() == false || lifecycle.getEffectiveDataRetention() == null) { return List.of(); } @@ -909,7 +958,10 @@ public DataStream(StreamInput in) throws IOException { in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X) ? in.readOptionalWriteable(DataStreamLifecycle::new) : null, in.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION) ? in.readBoolean() : false, in.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION) ? readIndices(in) : List.of(), - in.getTransportVersion().onOrAfter(TransportVersions.LAZY_ROLLOVER_ADDED) ? in.readBoolean() : false + in.getTransportVersion().onOrAfter(TransportVersions.LAZY_ROLLOVER_ADDED) ? in.readBoolean() : false, + in.getTransportVersion().onOrAfter(DataStream.ADDED_AUTO_SHARDING_EVENT_VERSION) + ? in.readOptionalWriteable(DataStreamAutoShardingEvent::new) + : null ); } @@ -953,6 +1005,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.LAZY_ROLLOVER_ADDED)) { out.writeBoolean(rolloverOnWrite); } + if (out.getTransportVersion().onOrAfter(DataStream.ADDED_AUTO_SHARDING_EVENT_VERSION)) { + out.writeOptionalWriteable(autoShardingEvent); + } } public static final ParseField NAME_FIELD = new ParseField("name"); @@ -969,13 +1024,14 @@ public void writeTo(StreamOutput out) throws IOException { public static final ParseField FAILURE_STORE_FIELD = new ParseField("failure_store"); public static final ParseField FAILURE_INDICES_FIELD = new ParseField("failure_indices"); public static final ParseField ROLLOVER_ON_WRITE_FIELD = new ParseField("rollover_on_write"); + public static final ParseField AUTO_SHARDING_FIELD = new ParseField("auto_sharding"); @SuppressWarnings("unchecked") private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("data_stream", args -> { // Fields behind a feature flag need to be parsed last otherwise the parser will fail when the feature flag is disabled. // Until the feature flag is removed we keep them separately to be mindful of this. - boolean failureStoreEnabled = DataStream.isFailureStoreEnabled() && args[11] != null && (boolean) args[11]; - List failureStoreIndices = DataStream.isFailureStoreEnabled() && args[12] != null ? (List) args[12] : List.of(); + boolean failureStoreEnabled = DataStream.isFailureStoreEnabled() && args[12] != null && (boolean) args[12]; + List failureStoreIndices = DataStream.isFailureStoreEnabled() && args[13] != null ? (List) args[13] : List.of(); return new DataStream( (String) args[0], (List) args[1], @@ -989,7 +1045,8 @@ public void writeTo(StreamOutput out) throws IOException { (DataStreamLifecycle) args[9], failureStoreEnabled, failureStoreIndices, - args[10] != null && (boolean) args[10] + args[10] != null && (boolean) args[10], + (DataStreamAutoShardingEvent) args[11] ); }); @@ -1013,6 +1070,11 @@ public void writeTo(StreamOutput out) throws IOException { PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), INDEX_MODE); PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> DataStreamLifecycle.fromXContent(p), LIFECYCLE); PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), ROLLOVER_ON_WRITE_FIELD); + PARSER.declareObject( + ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> DataStreamAutoShardingEvent.fromXContent(p), + AUTO_SHARDING_FIELD + ); // The fields behind the feature flag should always be last. if (DataStream.isFailureStoreEnabled()) { PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), FAILURE_STORE_FIELD); @@ -1067,6 +1129,11 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params, @Nulla lifecycle.toXContent(builder, params, rolloverConfiguration); } builder.field(ROLLOVER_ON_WRITE_FIELD.getPreferredName(), rolloverOnWrite); + if (autoShardingEvent != null) { + builder.startObject(AUTO_SHARDING_FIELD.getPreferredName()); + autoShardingEvent.toXContent(builder, params); + builder.endObject(); + } builder.endObject(); return builder; } @@ -1088,7 +1155,8 @@ public boolean equals(Object o) { && Objects.equals(lifecycle, that.lifecycle) && failureStore == that.failureStore && failureIndices.equals(that.failureIndices) - && rolloverOnWrite == that.rolloverOnWrite; + && rolloverOnWrite == that.rolloverOnWrite + && Objects.equals(autoShardingEvent, that.autoShardingEvent); } @Override @@ -1106,7 +1174,8 @@ public int hashCode() { lifecycle, failureStore, failureIndices, - rolloverOnWrite + rolloverOnWrite, + autoShardingEvent ); } @@ -1169,6 +1238,34 @@ public DataStream getParentDataStream() { "strict_date_optional_time_nanos||strict_date_optional_time||epoch_millis" ); + /** + * Returns the indices created within the {@param maxIndexAge} interval. Note that this strives to cover + * the entire {@param maxIndexAge} interval so one backing index created before the specified age will also + * be return. + */ + public static List getIndicesWithinMaxAgeRange( + DataStream dataStream, + Function indexProvider, + TimeValue maxIndexAge, + LongSupplier nowSupplier + ) { + final List dataStreamIndices = dataStream.getIndices(); + final long currentTimeMillis = nowSupplier.getAsLong(); + // Consider at least 1 index (including the write index) for cases where rollovers happen less often than maxIndexAge + int firstIndexWithinAgeRange = Math.max(dataStreamIndices.size() - 2, 0); + for (int i = 0; i < dataStreamIndices.size(); i++) { + Index index = dataStreamIndices.get(i); + final IndexMetadata indexMetadata = indexProvider.apply(index); + final long indexAge = currentTimeMillis - indexMetadata.getCreationDate(); + if (indexAge < maxIndexAge.getMillis()) { + // We need to consider the previous index too in order to cover the entire max-index-age range. + firstIndexWithinAgeRange = i == 0 ? 0 : i - 1; + break; + } + } + return dataStreamIndices.subList(firstIndexWithinAgeRange, dataStreamIndices.size()); + } + private static Instant getTimeStampFromRaw(Object rawTimestamp) { try { if (rawTimestamp instanceof Long lTimestamp) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamAutoShardingEvent.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamAutoShardingEvent.java new file mode 100644 index 0000000000000..ff143681827ca --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamAutoShardingEvent.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.SimpleDiffable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentFragment; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.function.LongSupplier; + +/** + * Represents the last auto sharding event that occured for a data stream. + */ +public record DataStreamAutoShardingEvent(String triggerIndexName, int targetNumberOfShards, long timestamp) + implements + SimpleDiffable, + ToXContentFragment { + + public static final ParseField TRIGGER_INDEX_NAME = new ParseField("trigger_index_name"); + public static final ParseField TARGET_NUMBER_OF_SHARDS = new ParseField("target_number_of_shards"); + public static final ParseField EVENT_TIME = new ParseField("event_time"); + public static final ParseField EVENT_TIME_MILLIS = new ParseField("event_time_millis"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "auto_sharding", + false, + (args, unused) -> new DataStreamAutoShardingEvent((String) args[0], (int) args[1], (long) args[2]) + ); + + static { + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), TRIGGER_INDEX_NAME); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), TARGET_NUMBER_OF_SHARDS); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), EVENT_TIME_MILLIS); + } + + public static DataStreamAutoShardingEvent fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + static Diff readDiffFrom(StreamInput in) throws IOException { + return SimpleDiffable.readDiffFrom(DataStreamAutoShardingEvent::new, in); + } + + DataStreamAutoShardingEvent(StreamInput in) throws IOException { + this(in.readString(), in.readVInt(), in.readVLong()); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(TRIGGER_INDEX_NAME.getPreferredName(), triggerIndexName); + builder.field(TARGET_NUMBER_OF_SHARDS.getPreferredName(), targetNumberOfShards); + builder.humanReadableField( + EVENT_TIME_MILLIS.getPreferredName(), + EVENT_TIME.getPreferredName(), + TimeValue.timeValueMillis(timestamp) + ); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(triggerIndexName); + out.writeVInt(targetNumberOfShards); + out.writeVLong(timestamp); + } + + public TimeValue getTimeSinceLastAutoShardingEvent(LongSupplier now) { + return TimeValue.timeValueMillis(Math.max(0L, now.getAsLong() - timestamp)); + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetention.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetention.java new file mode 100644 index 0000000000000..f3b88ba6083c3 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetention.java @@ -0,0 +1,148 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.cluster.AbstractNamedDiffable; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.NamedDiff; +import org.elasticsearch.common.collect.Iterators; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Iterator; +import java.util.Objects; + +/** + * A cluster state entry that contains global retention settings that are configurable by the user. These settings include: + * - default retention, applied on any data stream managed by DSL that does not have an explicit retention defined + * - max retention, applied on every data stream managed by DSL + */ +public final class DataStreamGlobalRetention extends AbstractNamedDiffable implements ClusterState.Custom { + + public static final String TYPE = "data-stream-global-retention"; + + public static final ParseField DEFAULT_RETENTION_FIELD = new ParseField("default_retention"); + public static final ParseField MAX_RETENTION_FIELD = new ParseField("max_retention"); + + public static final DataStreamGlobalRetention EMPTY = new DataStreamGlobalRetention(null, null); + + @Nullable + private final TimeValue defaultRetention; + @Nullable + private final TimeValue maxRetention; + + /** + * @param defaultRetention the default retention or null if it's undefined + * @param maxRetention the max retention or null if it's undefined + * @throws IllegalArgumentException when the default retention is greater than the max retention. + */ + public DataStreamGlobalRetention(TimeValue defaultRetention, TimeValue maxRetention) { + if (defaultRetention != null && maxRetention != null && defaultRetention.getMillis() > maxRetention.getMillis()) { + throw new IllegalArgumentException( + "Default global retention [" + + defaultRetention.getStringRep() + + "] cannot be greater than the max global retention [" + + maxRetention.getStringRep() + + "]." + ); + } + this.defaultRetention = defaultRetention; + this.maxRetention = maxRetention; + } + + public static DataStreamGlobalRetention read(StreamInput in) throws IOException { + return new DataStreamGlobalRetention(in.readOptionalTimeValue(), in.readOptionalTimeValue()); + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.ADD_DATA_STREAM_GLOBAL_RETENTION; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalTimeValue(defaultRetention); + out.writeOptionalTimeValue(maxRetention); + } + + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + return readDiffFrom(ClusterState.Custom.class, TYPE, in); + } + + @Override + public Iterator toXContentChunked(ToXContent.Params ignored) { + return Iterators.single(this::toXContentFragment); + } + + /** + * Adds to the XContentBuilder the two fields when they are not null. + */ + public XContentBuilder toXContentFragment(XContentBuilder builder, ToXContent.Params params) throws IOException { + if (defaultRetention != null) { + builder.field(DEFAULT_RETENTION_FIELD.getPreferredName(), defaultRetention.getStringRep()); + } + if (maxRetention != null) { + builder.field(MAX_RETENTION_FIELD.getPreferredName(), maxRetention.getStringRep()); + } + return builder; + } + + /** + * Returns the metadata found in the cluster state or null. + */ + public static DataStreamGlobalRetention getFromClusterState(ClusterState clusterState) { + return clusterState.custom(DataStreamGlobalRetention.TYPE); + } + + @Nullable + public TimeValue getDefaultRetention() { + return defaultRetention; + } + + @Nullable + public TimeValue getMaxRetention() { + return maxRetention; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + DataStreamGlobalRetention that = (DataStreamGlobalRetention) o; + return Objects.equals(defaultRetention, that.defaultRetention) && Objects.equals(maxRetention, that.maxRetention); + } + + @Override + public int hashCode() { + return Objects.hash(defaultRetention, maxRetention); + } + + @Override + public String toString() { + return "DataStreamGlobalRetention{" + + "defaultRetention=" + + (defaultRetention == null ? "null" : defaultRetention.getStringRep()) + + ", maxRetention=" + + (maxRetention == null ? "null" : maxRetention.getStringRep()) + + '}'; + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java index 215ed515748ab..b4a3a1eb3502a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java @@ -134,6 +134,16 @@ public boolean isEnabled() { */ @Nullable public TimeValue getEffectiveDataRetention() { + return getDataStreamRetention(); + } + + /** + * The least amount of time data the data stream is requesting es to keep the data. + * NOTE: this can be overriden by the {@link DataStreamLifecycle#getEffectiveDataRetention()}. + * @return the time period or null, null represents that data should never be deleted. + */ + @Nullable + public TimeValue getDataStreamRetention() { return dataRetention == null ? null : dataRetention.value; } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index 0446b479b191d..e8e8ca767cc34 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -26,10 +26,12 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Predicates; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; +import org.elasticsearch.indices.FailureIndexNotSupportedException; import org.elasticsearch.indices.IndexClosedException; import org.elasticsearch.indices.InvalidIndexNameException; import org.elasticsearch.indices.SystemIndices; @@ -59,8 +61,6 @@ public class IndexNameExpressionResolver { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(IndexNameExpressionResolver.class); - private static final Predicate ALWAYS_TRUE = s -> true; - public static final String EXCLUDED_DATA_STREAMS_KEY = "es.excluded_ds"; public static final IndexVersion SYSTEM_INDEX_ENFORCEMENT_INDEX_VERSION = IndexVersions.V_8_0_0; @@ -101,7 +101,7 @@ public String[] concreteIndexNamesWithSystemIndexAccess(ClusterState state, Indi false, request.includeDataStreams(), SystemIndexAccessLevel.BACKWARDS_COMPATIBLE_ONLY, - ALWAYS_TRUE, + Predicates.always(), this.getNetNewSystemIndexPredicate() ); return concreteIndexNames(context, request.indices()); @@ -355,16 +355,19 @@ Index[] concreteIndices(Context context, String... indexExpressions) { + " indices without one being designated as a write index" ); } - if (addIndex(writeIndex, null, context)) { - concreteIndicesResult.add(writeIndex); + if (indexAbstraction.isDataStreamRelated()) { + DataStream dataStream = indicesLookup.get(indexAbstraction.getWriteIndex().getName()).getParentDataStream(); + resolveWriteIndexForDataStreams(context, dataStream, concreteIndicesResult); + } else { + if (addIndex(writeIndex, null, context)) { + concreteIndicesResult.add(writeIndex); + } } } else if (indexAbstraction.getType() == Type.DATA_STREAM && context.isResolveToWriteIndex()) { - Index writeIndex = indexAbstraction.getWriteIndex(); - if (addIndex(writeIndex, null, context)) { - concreteIndicesResult.add(writeIndex); - } + resolveWriteIndexForDataStreams(context, (DataStream) indexAbstraction, concreteIndicesResult); } else { - if (indexAbstraction.getIndices().size() > 1 && context.getOptions().allowAliasesToMultipleIndices() == false) { + if (resolvesToMoreThanOneIndex(indexAbstraction, context) + && context.getOptions().allowAliasesToMultipleIndices() == false) { String[] indexNames = new String[indexAbstraction.getIndices().size()]; int i = 0; for (Index indexName : indexAbstraction.getIndices()) { @@ -380,11 +383,27 @@ Index[] concreteIndices(Context context, String... indexExpressions) { ); } - for (Index index : indexAbstraction.getIndices()) { - if (shouldTrackConcreteIndex(context, context.getOptions(), index)) { - concreteIndicesResult.add(index); + if (indexAbstraction.getType() == Type.DATA_STREAM) { + resolveIndicesForDataStream(context, (DataStream) indexAbstraction, concreteIndicesResult); + } else if (indexAbstraction.getType() == Type.ALIAS + && indexAbstraction.isDataStreamRelated() + && DataStream.isFailureStoreEnabled() + && context.getOptions().includeFailureIndices()) { + // Collect the data streams involved + Set aliasDataStreams = new HashSet<>(); + for (Index index : indexAbstraction.getIndices()) { + aliasDataStreams.add(indicesLookup.get(index.getName()).getParentDataStream()); + } + for (DataStream dataStream : aliasDataStreams) { + resolveIndicesForDataStream(context, dataStream, concreteIndicesResult); + } + } else { + for (Index index : indexAbstraction.getIndices()) { + if (shouldTrackConcreteIndex(context, context.getOptions(), index)) { + concreteIndicesResult.add(index); + } + } } - } } } @@ -395,9 +414,70 @@ Index[] concreteIndices(Context context, String... indexExpressions) { return concreteIndicesResult.toArray(Index.EMPTY_ARRAY); } + private static void resolveIndicesForDataStream(Context context, DataStream dataStream, Set concreteIndicesResult) { + if (shouldIncludeRegularIndices(context.getOptions())) { + for (Index index : dataStream.getIndices()) { + if (shouldTrackConcreteIndex(context, context.getOptions(), index)) { + concreteIndicesResult.add(index); + } + } + } + if (shouldIncludeFailureIndices(context.getOptions(), dataStream)) { + // We short-circuit here, if failure indices are not allowed and they can be skipped + if (context.getOptions().allowFailureIndices() || context.getOptions().ignoreUnavailable() == false) { + for (Index index : dataStream.getFailureIndices()) { + if (shouldTrackConcreteIndex(context, context.getOptions(), index)) { + concreteIndicesResult.add(index); + } + } + } + } + } + + private static void resolveWriteIndexForDataStreams(Context context, DataStream dataStream, Set concreteIndicesResult) { + if (shouldIncludeRegularIndices(context.getOptions())) { + Index writeIndex = dataStream.getWriteIndex(); + if (addIndex(writeIndex, null, context)) { + concreteIndicesResult.add(writeIndex); + } + } + if (shouldIncludeFailureIndices(context.getOptions(), dataStream)) { + Index failureStoreWriteIndex = dataStream.getFailureStoreWriteIndex(); + if (failureStoreWriteIndex != null && addIndex(failureStoreWriteIndex, null, context)) { + if (context.options.allowFailureIndices() == false) { + throw new FailureIndexNotSupportedException(failureStoreWriteIndex); + } + concreteIndicesResult.add(failureStoreWriteIndex); + } + } + } + + private static boolean shouldIncludeRegularIndices(IndicesOptions indicesOptions) { + return DataStream.isFailureStoreEnabled() == false || indicesOptions.includeRegularIndices(); + } + + private static boolean shouldIncludeFailureIndices(IndicesOptions indicesOptions, DataStream dataStream) { + return DataStream.isFailureStoreEnabled() && indicesOptions.includeFailureIndices() && dataStream.isFailureStore(); + } + + private static boolean resolvesToMoreThanOneIndex(IndexAbstraction indexAbstraction, Context context) { + if (indexAbstraction.getType() == Type.DATA_STREAM) { + DataStream dataStream = (DataStream) indexAbstraction; + int count = 0; + if (shouldIncludeRegularIndices(context.getOptions())) { + count += dataStream.getIndices().size(); + } + if (shouldIncludeFailureIndices(context.getOptions(), dataStream)) { + count += dataStream.getFailureIndices().size(); + } + return count > 1; + } + return indexAbstraction.getIndices().size() > 1; + } + private void checkSystemIndexAccess(Context context, Set concreteIndices) { final Predicate systemIndexAccessPredicate = context.getSystemIndexAccessPredicate(); - if (systemIndexAccessPredicate == ALWAYS_TRUE) { + if (systemIndexAccessPredicate == Predicates.always()) { return; } doCheckSystemIndexAccess(context, concreteIndices, systemIndexAccessPredicate); @@ -486,6 +566,21 @@ private static boolean shouldTrackConcreteIndex(Context context, IndicesOptions // Exclude this one as it's a net-new system index, and we explicitly don't want those. return false; } + if (DataStream.isFailureStoreEnabled()) { + IndexAbstraction indexAbstraction = context.getState().metadata().getIndicesLookup().get(index.getName()); + if (context.options.allowFailureIndices() == false) { + DataStream parentDataStream = indexAbstraction.getParentDataStream(); + if (parentDataStream != null && parentDataStream.isFailureStore()) { + if (parentDataStream.isFailureStoreIndex(index.getName())) { + if (options.ignoreUnavailable()) { + return false; + } else { + throw new FailureIndexNotSupportedException(index); + } + } + } + } + } final IndexMetadata imd = context.state.metadata().index(index); if (imd.getState() == IndexMetadata.State.CLOSE) { if (options.forbidClosedIndices() && options.ignoreUnavailable() == false) { @@ -947,11 +1042,11 @@ public Predicate getSystemIndexAccessPredicate() { final SystemIndexAccessLevel systemIndexAccessLevel = getSystemIndexAccessLevel(); final Predicate systemIndexAccessLevelPredicate; if (systemIndexAccessLevel == SystemIndexAccessLevel.NONE) { - systemIndexAccessLevelPredicate = s -> false; + systemIndexAccessLevelPredicate = Predicates.never(); } else if (systemIndexAccessLevel == SystemIndexAccessLevel.BACKWARDS_COMPATIBLE_ONLY) { systemIndexAccessLevelPredicate = getNetNewSystemIndexPredicate(); } else if (systemIndexAccessLevel == SystemIndexAccessLevel.ALL) { - systemIndexAccessLevelPredicate = ALWAYS_TRUE; + systemIndexAccessLevelPredicate = Predicates.always(); } else { // everything other than allowed should be included in the deprecation message systemIndexAccessLevelPredicate = systemIndices.getProductSystemIndexNamePredicate(threadContext); @@ -981,7 +1076,7 @@ public static class Context { private final Predicate netNewSystemIndexPredicate; Context(ClusterState state, IndicesOptions options, SystemIndexAccessLevel systemIndexAccessLevel) { - this(state, options, systemIndexAccessLevel, ALWAYS_TRUE, s -> false); + this(state, options, systemIndexAccessLevel, Predicates.always(), Predicates.never()); } Context( @@ -1310,7 +1405,7 @@ private static Map filterIndicesLookupForSuffixWildcar /** * Return the {@code Stream} of open and/or closed index names for the given {@param resources}. - * Datastreams and aliases are interpreted to refer to multiple indices, + * Data streams and aliases are interpreted to refer to multiple indices, * then all index resources are filtered by their open/closed status. */ private static Stream expandToOpenClosed(Context context, Stream resources) { @@ -1321,7 +1416,18 @@ private static Stream expandToOpenClosed(Context context, Stream indicesStateStream = indexAbstraction.getIndices().stream().map(context.state.metadata()::index); + Stream indicesStateStream = Stream.of(); + if (shouldIncludeRegularIndices(context.getOptions())) { + indicesStateStream = indexAbstraction.getIndices().stream().map(context.state.metadata()::index); + } + if (indexAbstraction.getType() == Type.DATA_STREAM + && shouldIncludeFailureIndices(context.getOptions(), (DataStream) indexAbstraction)) { + DataStream dataStream = (DataStream) indexAbstraction; + indicesStateStream = Stream.concat( + indicesStateStream, + dataStream.getFailureIndices().stream().map(context.state.metadata()::index) + ); + } if (excludeState != null) { indicesStateStream = indicesStateStream.filter(indexMeta -> indexMeta.getState() != excludeState); } @@ -1363,6 +1469,9 @@ private static List resolveEmptyOrTrivialWildcardWithAllowedSystemIndice } private static String[] resolveEmptyOrTrivialWildcardToAllIndices(IndicesOptions options, Metadata metadata) { + if (shouldIncludeRegularIndices(options) == false) { + return Strings.EMPTY_ARRAY; + } if (options.expandWildcardsOpen() && options.expandWildcardsClosed() && options.expandWildcardsHidden()) { return metadata.getConcreteAllIndices(); } else if (options.expandWildcardsOpen() && options.expandWildcardsClosed()) { @@ -1722,7 +1831,7 @@ public ResolverContext() { } public ResolverContext(long startTime) { - super(null, null, startTime, false, false, false, false, SystemIndexAccessLevel.ALL, name -> false, name -> false); + super(null, null, startTime, false, false, false, false, SystemIndexAccessLevel.ALL, Predicates.never(), Predicates.never()); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java index 4d76ead90e12a..b450251ff7e3f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java @@ -2597,6 +2597,9 @@ private static void collectIndices( private static boolean assertContainsIndexIfDataStream(DataStream parent, IndexMetadata indexMetadata) { assert parent == null || parent.getIndices().stream().anyMatch(index -> indexMetadata.getIndex().getName().equals(index.getName())) + || (DataStream.isFailureStoreEnabled() + && parent.isFailureStore() + && parent.getFailureIndices().stream().anyMatch(index -> indexMetadata.getIndex().getName().equals(index.getName()))) : "Expected data stream [" + parent.getName() + "] to contain index " + indexMetadata.getIndex(); return true; } @@ -2618,6 +2621,11 @@ private static void collectDataStreams( for (Index i : dataStream.getIndices()) { indexToDataStreamLookup.put(i.getName(), dataStream); } + if (DataStream.isFailureStoreEnabled() && dataStream.isFailureStore()) { + for (Index i : dataStream.getFailureIndices()) { + indexToDataStreamLookup.put(i.getName(), dataStream); + } + } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java index d500a8b8e6876..fd67a8ac7e230 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java @@ -25,10 +25,12 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; import org.elasticsearch.index.mapper.MappingLookup; import org.elasticsearch.index.mapper.MetadataFieldMapper; @@ -52,6 +54,8 @@ public class MetadataCreateDataStreamService { private static final Logger logger = LogManager.getLogger(MetadataCreateDataStreamService.class); + public static final String FAILURE_STORE_REFRESH_INTERVAL_SETTING_NAME = "data_streams.failure_store.refresh_interval"; + private final ThreadPool threadPool; private final ClusterService clusterService; private final MetadataCreateIndexService metadataCreateIndexService; @@ -98,6 +102,7 @@ public void createDataStream(CreateDataStreamClusterStateUpdateRequest request, public ClusterState execute(ClusterState currentState) throws Exception { ClusterState clusterState = createDataStream( metadataCreateIndexService, + clusterService.getSettings(), currentState, isDslOnlyMode, request, @@ -124,7 +129,7 @@ public ClusterState createDataStream( ClusterState current, ActionListener rerouteListener ) throws Exception { - return createDataStream(metadataCreateIndexService, current, isDslOnlyMode, request, rerouteListener); + return createDataStream(metadataCreateIndexService, clusterService.getSettings(), current, isDslOnlyMode, request, rerouteListener); } public static final class CreateDataStreamClusterStateUpdateRequest extends ClusterStateUpdateRequest< @@ -184,12 +189,22 @@ long getStartTime() { static ClusterState createDataStream( MetadataCreateIndexService metadataCreateIndexService, + Settings settings, ClusterState currentState, boolean isDslOnlyMode, CreateDataStreamClusterStateUpdateRequest request, ActionListener rerouteListener ) throws Exception { - return createDataStream(metadataCreateIndexService, currentState, isDslOnlyMode, request, List.of(), null, rerouteListener); + return createDataStream( + metadataCreateIndexService, + settings, + currentState, + isDslOnlyMode, + request, + List.of(), + null, + rerouteListener + ); } /** @@ -204,6 +219,7 @@ static ClusterState createDataStream( */ static ClusterState createDataStream( MetadataCreateIndexService metadataCreateIndexService, + Settings settings, ClusterState currentState, boolean isDslOnlyMode, CreateDataStreamClusterStateUpdateRequest request, @@ -260,6 +276,7 @@ static ClusterState createDataStream( String failureStoreIndexName = DataStream.getDefaultFailureStoreName(dataStreamName, 1, request.getStartTime()); currentState = createFailureStoreIndex( metadataCreateIndexService, + settings, currentState, request, dataStreamName, @@ -314,7 +331,8 @@ static ClusterState createDataStream( indexMode, lifecycle == null && isDslOnlyMode ? DataStreamLifecycle.DEFAULT : lifecycle, template.getDataStreamTemplate().hasFailureStore(), - failureIndices + failureIndices, + null ); Metadata.Builder builder = Metadata.builder(currentState.metadata()).put(newDataStream); @@ -383,6 +401,7 @@ private static ClusterState createBackingIndex( private static ClusterState createFailureStoreIndex( MetadataCreateIndexService metadataCreateIndexService, + Settings settings, ClusterState currentState, CreateDataStreamClusterStateUpdateRequest request, String dataStreamName, @@ -393,6 +412,16 @@ private static ClusterState createFailureStoreIndex( return currentState; } + var indexSettings = MetadataRolloverService.HIDDEN_INDEX_SETTINGS; + // Optionally set a custom refresh interval for the failure store index. + var refreshInterval = getFailureStoreRefreshInterval(settings); + if (refreshInterval != null) { + indexSettings = Settings.builder() + .put(indexSettings) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), refreshInterval) + .build(); + } + CreateIndexClusterStateUpdateRequest createIndexRequest = new CreateIndexClusterStateUpdateRequest( "initialize_data_stream", failureStoreIndexName, @@ -401,7 +430,7 @@ private static ClusterState createFailureStoreIndex( .nameResolvedInstant(request.getStartTime()) .performReroute(false) .setMatchingTemplate(template) - .settings(MetadataRolloverService.HIDDEN_INDEX_SETTINGS); + .settings(indexSettings); try { currentState = metadataCreateIndexService.applyCreateIndexRequest( @@ -450,4 +479,7 @@ public static void validateTimestampFieldMapping(MappingLookup mappingLookup) th fieldMapper.validate(mappingLookup); } + public static TimeValue getFailureStoreRefreshInterval(Settings settings) { + return settings.getAsTime(FAILURE_STORE_REFRESH_INTERVAL_SETTING_NAME, null); + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java index 6b81aa230f0d9..4006bc8d1a94a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java @@ -212,7 +212,8 @@ static ClusterState updateDataLifecycle( dataStream.getIndexMode(), lifecycle, dataStream.isFailureStore(), - dataStream.getFailureIndices() + dataStream.getFailureIndices(), + dataStream.getAutoShardingEvent() ) ); } @@ -249,7 +250,8 @@ public static ClusterState setRolloverOnWrite(ClusterState currentState, String dataStream.getLifecycle(), dataStream.isFailureStore(), dataStream.getFailureIndices(), - rolloverOnWrite + rolloverOnWrite, + dataStream.getAutoShardingEvent() ) ); return ClusterState.builder(currentState).metadata(builder.build()).build(); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java index f7fa34d76498a..c40c5a09e99ee 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java @@ -113,7 +113,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { } catch (IOException e) { throw new IllegalStateException(e); } - }, request, metadataCreateIndexService, delegate.reroute()); + }, request, metadataCreateIndexService, clusterService.getSettings(), delegate.reroute()); writeIndexRef.set(clusterState.metadata().dataStreams().get(request.aliasName).getWriteIndex().getName()); return clusterState; } @@ -132,6 +132,7 @@ static ClusterState migrateToDataStream( Function mapperSupplier, MigrateToDataStreamClusterStateUpdateRequest request, MetadataCreateIndexService metadataCreateIndexService, + Settings settings, ActionListener listener ) throws Exception { validateRequest(currentState, request); @@ -158,6 +159,7 @@ static ClusterState migrateToDataStream( CreateDataStreamClusterStateUpdateRequest req = new CreateDataStreamClusterStateUpdateRequest(request.aliasName); return createDataStream( metadataCreateIndexService, + settings, currentState, isDslOnlyMode, req, diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java index 8fbdd3790e158..6679f17a0427b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Predicates; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; @@ -54,11 +55,10 @@ public class IndexRoutingTable implements SimpleDiffable { private static final List> PRIORITY_REMOVE_CLAUSES = Stream.>of( shardRouting -> shardRouting.isPromotableToPrimary() == false, - shardRouting -> true + Predicates.always() ) .flatMap( - p1 -> Stream.>of(ShardRouting::unassigned, ShardRouting::initializing, shardRouting -> true) - .map(p1::and) + p1 -> Stream.>of(ShardRouting::unassigned, ShardRouting::initializing, Predicates.always()).map(p1::and) ) .toList(); private final Index index; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java index 8e257ff2c7a54..1e5aaa46c1157 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java @@ -114,7 +114,8 @@ public class IndexShardRoutingTable { allShardsStarted = false; } } - assert primary != null || shards.isEmpty() : shards; + assert shards.isEmpty() == false : "cannot have an empty shard routing table"; + assert primary != null : shards; this.primary = primary; this.replicas = CollectionUtils.wrapUnmodifiableOrEmptySingleton(replicas); this.activeShards = CollectionUtils.wrapUnmodifiableOrEmptySingleton(activeShards); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java index 723d65fbc2a3f..855793e9e9782 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Predicates; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.shard.ShardId; @@ -249,7 +250,7 @@ private GroupShardsIterator allSatisfyingPredicateShardsGrouped( } public ShardsIterator allShards(String[] indices) { - return allShardsSatisfyingPredicate(indices, shardRouting -> true, false); + return allShardsSatisfyingPredicate(indices, Predicates.always(), false); } public ShardsIterator allActiveShards(String[] indices) { @@ -257,7 +258,7 @@ public ShardsIterator allActiveShards(String[] indices) { } public ShardsIterator allShardsIncludingRelocationTargets(String[] indices) { - return allShardsSatisfyingPredicate(indices, shardRouting -> true, true); + return allShardsSatisfyingPredicate(indices, Predicates.always(), true); } private ShardsIterator allShardsSatisfyingPredicate( diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsService.java new file mode 100644 index 0000000000000..dbafd916b2a42 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsService.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.routing.allocation; + +import org.elasticsearch.cluster.ClusterInfoService; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalance; +import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceShardsAllocator; +import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.util.Maps; + +import java.util.Map; + +public class AllocationStatsService { + + private final ClusterService clusterService; + private final ClusterInfoService clusterInfoService; + private final DesiredBalanceShardsAllocator desiredBalanceShardsAllocator; + private final WriteLoadForecaster writeLoadForecaster; + + public AllocationStatsService( + ClusterService clusterService, + ClusterInfoService clusterInfoService, + ShardsAllocator shardsAllocator, + WriteLoadForecaster writeLoadForecaster + ) { + this.clusterService = clusterService; + this.clusterInfoService = clusterInfoService; + this.desiredBalanceShardsAllocator = shardsAllocator instanceof DesiredBalanceShardsAllocator allocator ? allocator : null; + this.writeLoadForecaster = writeLoadForecaster; + } + + public Map stats() { + var state = clusterService.state(); + var info = clusterInfoService.getClusterInfo(); + var desiredBalance = desiredBalanceShardsAllocator != null ? desiredBalanceShardsAllocator.getDesiredBalance() : null; + + var stats = Maps.newMapWithExpectedSize(state.getRoutingNodes().size()); + for (RoutingNode node : state.getRoutingNodes()) { + int shards = 0; + int undesiredShards = 0; + double forecastedWriteLoad = 0.0; + long forecastedDiskUsage = 0; + long currentDiskUsage = 0; + for (ShardRouting shardRouting : node) { + if (shardRouting.relocating()) { + continue; + } + shards++; + IndexMetadata indexMetadata = state.metadata().getIndexSafe(shardRouting.index()); + if (isDesiredAllocation(desiredBalance, shardRouting) == false) { + undesiredShards++; + } + long shardSize = info.getShardSize(shardRouting.shardId(), shardRouting.primary(), 0); + forecastedWriteLoad += writeLoadForecaster.getForecastedWriteLoad(indexMetadata).orElse(0.0); + forecastedDiskUsage += Math.max(indexMetadata.getForecastedShardSizeInBytes().orElse(0), shardSize); + currentDiskUsage += shardSize; + + } + stats.put( + node.nodeId(), + new NodeAllocationStats( + shards, + desiredBalanceShardsAllocator != null ? undesiredShards : -1, + forecastedWriteLoad, + forecastedDiskUsage, + currentDiskUsage + ) + ); + } + + return stats; + } + + private static boolean isDesiredAllocation(DesiredBalance desiredBalance, ShardRouting shardRouting) { + if (desiredBalance == null) { + return true; + } + var assignment = desiredBalance.getAssignment(shardRouting.shardId()); + if (assignment == null) { + return false; + } + return assignment.nodeIds().contains(shardRouting.currentNodeId()); + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationStats.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationStats.java new file mode 100644 index 0000000000000..57484d6da53c7 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationStats.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.routing.allocation; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.xcontent.ToXContentFragment; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; + +public record NodeAllocationStats( + int shards, + int undesiredShards, + double forecastedIngestLoad, + long forecastedDiskUsage, + long currentDiskUsage +) implements Writeable, ToXContentFragment { + + public NodeAllocationStats(StreamInput in) throws IOException { + this(in.readVInt(), in.readVInt(), in.readDouble(), in.readVLong(), in.readVLong()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(shards); + out.writeVInt(undesiredShards); + out.writeDouble(forecastedIngestLoad); + out.writeVLong(forecastedDiskUsage); + out.writeVLong(currentDiskUsage); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject("allocations") + .field("shards", shards) + .field("undesired_shards", undesiredShards) + .field("forecasted_ingest_load", forecastedIngestLoad) + .humanReadableField("forecasted_disk_usage_in_bytes", "forecasted_disk_usage", ByteSizeValue.ofBytes(forecastedDiskUsage)) + .humanReadableField("current_disk_usage_in_bytes", "current_disk_usage", ByteSizeValue.ofBytes(currentDiskUsage)) + .endObject(); + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java index effd5ec110c44..3a26bbcc7b280 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java @@ -77,7 +77,18 @@ public DesiredBalance compute( Predicate isFresh ) { - logger.debug("Recomputing desired balance for [{}]", desiredBalanceInput.index()); + if (logger.isTraceEnabled()) { + logger.trace( + "Recomputing desired balance for [{}]: {}, {}, {}, {}", + desiredBalanceInput.index(), + previousDesiredBalance, + desiredBalanceInput.routingAllocation().routingNodes().toString(), + desiredBalanceInput.routingAllocation().clusterInfo().toString(), + desiredBalanceInput.routingAllocation().snapshotShardSizeInfo().toString() + ); + } else { + logger.debug("Recomputing desired balance for [{}]", desiredBalanceInput.index()); + } final var routingAllocation = desiredBalanceInput.routingAllocation().mutableCloneForSimulation(); final var routingNodes = routingAllocation.routingNodes(); @@ -283,7 +294,6 @@ public DesiredBalance compute( hasChanges = true; clusterInfoSimulator.simulateShardStarted(shardRouting); routingNodes.startShard(logger, shardRouting, changes, 0L); - logger.trace("starting shard {}", shardRouting); } } } diff --git a/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java b/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java index 4c401ab0ad52c..8e370158d166a 100644 --- a/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java +++ b/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java @@ -71,6 +71,7 @@ public enum ReferenceDocs { BOOTSTRAP_CHECK_SECURITY_MINIMAL_SETUP, CONTACT_SUPPORT, UNASSIGNED_SHARDS, + EXECUTABLE_JNA_TMPDIR, // this comment keeps the ';' on the next line so every entry above has a trailing ',' which makes the diff for adding new links cleaner ; diff --git a/server/src/main/java/org/elasticsearch/common/bytes/BytesReferenceStreamInput.java b/server/src/main/java/org/elasticsearch/common/bytes/BytesReferenceStreamInput.java index d42e1874b2d58..22bed3ea0b1e9 100644 --- a/server/src/main/java/org/elasticsearch/common/bytes/BytesReferenceStreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/bytes/BytesReferenceStreamInput.java @@ -84,6 +84,24 @@ public long readLong() throws IOException { } } + @Override + public String readString() throws IOException { + final int chars = readArraySize(); + if (slice.hasArray()) { + // attempt reading bytes directly into a string to minimize copying + final String string = tryReadStringFromBytes( + slice.array(), + slice.position() + slice.arrayOffset(), + slice.limit() + slice.arrayOffset(), + chars + ); + if (string != null) { + return string; + } + } + return doReadString(chars); + } + @Override public int readVInt() throws IOException { if (slice.remaining() >= 5) { diff --git a/server/src/main/java/org/elasticsearch/common/collect/Iterators.java b/server/src/main/java/org/elasticsearch/common/collect/Iterators.java index 4b5cef4bbbd45..ea8eadd66acaa 100644 --- a/server/src/main/java/org/elasticsearch/common/collect/Iterators.java +++ b/server/src/main/java/org/elasticsearch/common/collect/Iterators.java @@ -16,6 +16,7 @@ import java.util.NoSuchElementException; import java.util.Objects; import java.util.function.BiPredicate; +import java.util.function.BooleanSupplier; import java.util.function.Consumer; import java.util.function.Function; import java.util.function.IntFunction; @@ -225,6 +226,38 @@ public U next() { } } + /** + * Returns an iterator over the same items as the provided {@code input} except that it stops yielding items (i.e. starts returning + * {@code false} from {@link Iterator#hasNext()} on failure. + */ + public static Iterator failFast(Iterator input, BooleanSupplier isFailingSupplier) { + if (isFailingSupplier.getAsBoolean()) { + return Collections.emptyIterator(); + } else { + return new FailFastIterator<>(input, isFailingSupplier); + } + } + + private static class FailFastIterator implements Iterator { + private final Iterator delegate; + private final BooleanSupplier isFailingSupplier; + + FailFastIterator(Iterator delegate, BooleanSupplier isFailingSupplier) { + this.delegate = delegate; + this.isFailingSupplier = isFailingSupplier; + } + + @Override + public boolean hasNext() { + return isFailingSupplier.getAsBoolean() == false && delegate.hasNext(); + } + + @Override + public T next() { + return delegate.next(); + } + } + public static boolean equals(Iterator iterator1, Iterator iterator2, BiPredicate itemComparer) { if (iterator1 == null) { return iterator2 == null; diff --git a/server/src/main/java/org/elasticsearch/common/inject/Binder.java b/server/src/main/java/org/elasticsearch/common/inject/Binder.java index d1ff5ff4b0d93..97aa924d32cb1 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/Binder.java +++ b/server/src/main/java/org/elasticsearch/common/inject/Binder.java @@ -52,11 +52,7 @@ * * Specifies that a request for a {@code Service} instance with no binding * annotations should be treated as if it were a request for a - * {@code ServiceImpl} instance. This overrides the function of any - * {@link ImplementedBy @ImplementedBy} or {@link ProvidedBy @ProvidedBy} - * annotations found on {@code Service}, since Guice will have already - * "moved on" to {@code ServiceImpl} before it reaches the point when it starts - * looking for these annotations. + * {@code ServiceImpl} instance. * *

  *     bind(Service.class).toProvider(ServiceProvider.class);
diff --git a/server/src/main/java/org/elasticsearch/common/inject/Binding.java b/server/src/main/java/org/elasticsearch/common/inject/Binding.java index 9f519e3daca0a..9bc446a867aa7 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/Binding.java +++ b/server/src/main/java/org/elasticsearch/common/inject/Binding.java @@ -31,9 +31,7 @@ *
  *     bind(Service.class).annotatedWith(Red.class).to(ServiceImpl.class);
  *     bindConstant().annotatedWith(ServerHost.class).to(args[0]);
- *
  • Implicitly by the Injector by following a type's {@link ImplementedBy - * pointer} {@link ProvidedBy annotations} or by using its {@link Inject annotated} or - * default constructor.
  • + *
  • Implicitly by the Injector by using its {@link Inject annotated} or default constructor.
  • *
  • By converting a bound instance to a different type.
  • *
  • For {@link Provider providers}, by delegating to the binding for the provided type.
  • * @@ -77,6 +75,6 @@ public interface Binding extends Element { * @param visitor to call back on * @since 2.0 */ - V acceptTargetVisitor(BindingTargetVisitor visitor); + void acceptTargetVisitor(BindingTargetVisitor visitor); } diff --git a/server/src/main/java/org/elasticsearch/common/inject/BindingProcessor.java b/server/src/main/java/org/elasticsearch/common/inject/BindingProcessor.java index 99a9d6fab9c1d..0865bf47090af 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/BindingProcessor.java +++ b/server/src/main/java/org/elasticsearch/common/inject/BindingProcessor.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.inject.internal.LinkedBindingImpl; import org.elasticsearch.common.inject.internal.LinkedProviderBindingImpl; import org.elasticsearch.common.inject.internal.ProviderInstanceBindingImpl; -import org.elasticsearch.common.inject.internal.ProviderMethod; import org.elasticsearch.common.inject.internal.Scoping; import org.elasticsearch.common.inject.internal.UntargettedBindingImpl; import org.elasticsearch.common.inject.spi.BindingTargetVisitor; @@ -62,12 +61,7 @@ public Boolean visit(Binding command) { final Object source = command.getSource(); if (Void.class.equals(command.getKey().getRawType())) { - if (command instanceof ProviderInstanceBinding - && ((ProviderInstanceBinding) command).getProviderInstance() instanceof ProviderMethod) { - errors.voidProviderMethod(); - } else { - errors.missingConstantValues(); - } + errors.missingConstantValues(); return true; } diff --git a/server/src/main/java/org/elasticsearch/common/inject/ConstructionProxyFactory.java b/server/src/main/java/org/elasticsearch/common/inject/ConstructionProxyFactory.java deleted file mode 100644 index 9a0cd367e1650..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/ConstructionProxyFactory.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright (C) 2006 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject; - -/** - * Creates {@link ConstructionProxy} instances. - * - * @author crazybob@google.com (Bob Lee) - */ -interface ConstructionProxyFactory { - - /** - * Gets a construction proxy for the given constructor. - */ - ConstructionProxy create(); -} diff --git a/server/src/main/java/org/elasticsearch/common/inject/ConstructorBindingImpl.java b/server/src/main/java/org/elasticsearch/common/inject/ConstructorBindingImpl.java index 0c690f7ed9fa1..153c9627d736e 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/ConstructorBindingImpl.java +++ b/server/src/main/java/org/elasticsearch/common/inject/ConstructorBindingImpl.java @@ -54,11 +54,11 @@ public void initialize(InjectorImpl injector, Errors errors) throws ErrorsExcept } @Override - public V acceptTargetVisitor(BindingTargetVisitor visitor) { + public void acceptTargetVisitor(BindingTargetVisitor visitor) { if (factory.constructorInjector == null) { throw new IllegalStateException("not initialized"); } - return visitor.visit(); + visitor.visit(); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/inject/ConstructorInjector.java b/server/src/main/java/org/elasticsearch/common/inject/ConstructorInjector.java index 7b9f4be9c5a99..d38a75e0720d7 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/ConstructorInjector.java +++ b/server/src/main/java/org/elasticsearch/common/inject/ConstructorInjector.java @@ -80,7 +80,6 @@ Object construct(Errors errors, InternalContext context, Class expectedType) constructionContext.setCurrentReference(t); membersInjector.injectMembers(t, errors, context); - membersInjector.notifyListeners(t, errors); return t; } catch (InvocationTargetException userException) { diff --git a/server/src/main/java/org/elasticsearch/common/inject/ConstructorInjectorStore.java b/server/src/main/java/org/elasticsearch/common/inject/ConstructorInjectorStore.java index 29ccae98c7d27..97a495f97cfbd 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/ConstructorInjectorStore.java +++ b/server/src/main/java/org/elasticsearch/common/inject/ConstructorInjectorStore.java @@ -21,6 +21,9 @@ import org.elasticsearch.common.inject.internal.FailableCache; import org.elasticsearch.common.inject.spi.InjectionPoint; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; + /** * Constructor injectors by type. * @@ -65,10 +68,28 @@ private ConstructorInjector createConstructor(TypeLiteral type, Errors ); MembersInjectorImpl membersInjector = injector.membersInjectorStore.get(type, errors); - ConstructionProxyFactory factory = new DefaultConstructionProxyFactory<>(injectionPoint); - errors.throwIfNewErrors(numErrorsBefore); - return new ConstructorInjector<>(factory.create(), constructorParameterInjectors, membersInjector); + @SuppressWarnings("unchecked") // the injection point is for a constructor of T + final Constructor constructor = (Constructor) injectionPoint.getMember(); + return new ConstructorInjector<>(new ConstructionProxy<>() { + @Override + public T newInstance(Object... arguments) throws InvocationTargetException { + try { + return constructor.newInstance(arguments); + } catch (InstantiationException e) { + throw new AssertionError(e); // shouldn't happen, we know this is a concrete type + } catch (IllegalAccessException e) { + // a security manager is blocking us, we're hosed + throw new AssertionError("Wrong access modifiers on " + constructor, e); + } + } + + @Override + public InjectionPoint getInjectionPoint() { + return injectionPoint; + } + + }, constructorParameterInjectors, membersInjector); } } diff --git a/server/src/main/java/org/elasticsearch/common/inject/DefaultConstructionProxyFactory.java b/server/src/main/java/org/elasticsearch/common/inject/DefaultConstructionProxyFactory.java deleted file mode 100644 index cc713893abd69..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/DefaultConstructionProxyFactory.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright (C) 2006 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject; - -import org.elasticsearch.common.inject.spi.InjectionPoint; - -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; - -/** - * Produces construction proxies that invoke the class constructor. - * - * @author crazybob@google.com (Bob Lee) - */ -class DefaultConstructionProxyFactory implements ConstructionProxyFactory { - - private final InjectionPoint injectionPoint; - - /** - * @param injectionPoint an injection point whose member is a constructor of {@code T}. - */ - DefaultConstructionProxyFactory(InjectionPoint injectionPoint) { - this.injectionPoint = injectionPoint; - } - - @Override - public ConstructionProxy create() { - @SuppressWarnings("unchecked") // the injection point is for a constructor of T - final Constructor constructor = (Constructor) injectionPoint.getMember(); - - return new ConstructionProxy<>() { - @Override - public T newInstance(Object... arguments) throws InvocationTargetException { - try { - return constructor.newInstance(arguments); - } catch (InstantiationException e) { - throw new AssertionError(e); // shouldn't happen, we know this is a concrete type - } catch (IllegalAccessException e) { - // a security manager is blocking us, we're hosed - throw new AssertionError("Wrong access modifiers on " + constructor, e); - } - } - - @Override - public InjectionPoint getInjectionPoint() { - return injectionPoint; - } - - }; - } -} diff --git a/server/src/main/java/org/elasticsearch/common/inject/Exposed.java b/server/src/main/java/org/elasticsearch/common/inject/Exposed.java deleted file mode 100644 index 4f557212da883..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/Exposed.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright (C) 2008 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject; - -import java.lang.annotation.Documented; -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.Target; - -import static java.lang.annotation.RetentionPolicy.RUNTIME; - -/** - * Accompanies a {@literal @}{@link org.elasticsearch.common.inject.Provides Provides} method annotation in a - * private module to indicate that the provided binding is exposed. - * - * @author jessewilson@google.com (Jesse Wilson) - * @since 2.0 - */ -@Target(ElementType.METHOD) -@Retention(RUNTIME) -@Documented -public @interface Exposed { -} diff --git a/server/src/main/java/org/elasticsearch/common/inject/ImplementedBy.java b/server/src/main/java/org/elasticsearch/common/inject/ImplementedBy.java deleted file mode 100644 index 652be0f3ed30c..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/ImplementedBy.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (C) 2006 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject; - -import java.lang.annotation.Retention; -import java.lang.annotation.Target; - -import static java.lang.annotation.ElementType.TYPE; -import static java.lang.annotation.RetentionPolicy.RUNTIME; - -/** - * A pointer to the default implementation of a type. - * - * @author crazybob@google.com (Bob Lee) - */ -@Retention(RUNTIME) -@Target(TYPE) -public @interface ImplementedBy { - - /** - * The implementation type. - */ - Class value(); -} diff --git a/server/src/main/java/org/elasticsearch/common/inject/Inject.java b/server/src/main/java/org/elasticsearch/common/inject/Inject.java index 0a30b7b97a2da..e56c4c21ad39e 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/Inject.java +++ b/server/src/main/java/org/elasticsearch/common/inject/Inject.java @@ -21,7 +21,6 @@ import java.lang.annotation.Target; import static java.lang.annotation.ElementType.CONSTRUCTOR; -import static java.lang.annotation.ElementType.FIELD; import static java.lang.annotation.ElementType.METHOD; import static java.lang.annotation.RetentionPolicy.RUNTIME; @@ -45,7 +44,7 @@ * * @author crazybob@google.com (Bob Lee) */ -@Target({ METHOD, CONSTRUCTOR, FIELD }) +@Target({ METHOD, CONSTRUCTOR }) @Retention(RUNTIME) @Documented public @interface Inject { diff --git a/server/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java b/server/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java index 8d51894bf9907..8614fd99da088 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java +++ b/server/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java @@ -17,15 +17,12 @@ package org.elasticsearch.common.inject; import org.elasticsearch.common.Classes; -import org.elasticsearch.common.inject.internal.Annotations; import org.elasticsearch.common.inject.internal.BindingImpl; import org.elasticsearch.common.inject.internal.Errors; import org.elasticsearch.common.inject.internal.ErrorsException; import org.elasticsearch.common.inject.internal.InstanceBindingImpl; import org.elasticsearch.common.inject.internal.InternalContext; import org.elasticsearch.common.inject.internal.InternalFactory; -import org.elasticsearch.common.inject.internal.LinkedBindingImpl; -import org.elasticsearch.common.inject.internal.LinkedProviderBindingImpl; import org.elasticsearch.common.inject.internal.MatcherAndConverter; import org.elasticsearch.common.inject.internal.Scoping; import org.elasticsearch.common.inject.internal.SourceProvider; @@ -39,7 +36,6 @@ import java.lang.annotation.Annotation; import java.lang.reflect.GenericArrayType; -import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Modifier; import java.lang.reflect.ParameterizedType; import java.lang.reflect.Type; @@ -195,8 +191,8 @@ static InternalFactory> createInternalFactory(Binding provide } @Override - public V acceptTargetVisitor(BindingTargetVisitor, V> visitor) { - return visitor.visit(); + public void acceptTargetVisitor(BindingTargetVisitor, V> visitor) { + visitor.visit(); } @Override @@ -270,8 +266,8 @@ public Provider getProvider() { } @Override - public V acceptTargetVisitor(BindingTargetVisitor visitor) { - return visitor.visit(); + public void acceptTargetVisitor(BindingTargetVisitor visitor) { + visitor.visit(); } @Override @@ -322,20 +318,6 @@ BindingImpl createUnitializedBinding(Key key, Scoping scoping, Object return binding; } - // Handle @ImplementedBy - ImplementedBy implementedBy = rawType.getAnnotation(ImplementedBy.class); - if (implementedBy != null) { - Annotations.checkForMisplacedScopeAnnotations(rawType, source, errors); - return createImplementedByBinding(key, scoping, implementedBy, errors); - } - - // Handle @ProvidedBy. - ProvidedBy providedBy = rawType.getAnnotation(ProvidedBy.class); - if (providedBy != null) { - Annotations.checkForMisplacedScopeAnnotations(rawType, source, errors); - return createProvidedByBinding(key, scoping, providedBy, errors); - } - // We can't inject abstract classes. // TODO: Method interceptors could actually enable us to implement // abstract types. Should we remove this restriction? @@ -385,80 +367,6 @@ private BindingImpl> createTypeLiteralBinding(Key(this, key, SourceProvider.UNKNOWN_SOURCE, factory, emptySet(), value); } - /** - * Creates a binding for a type annotated with @ProvidedBy. - */ - BindingImpl createProvidedByBinding(Key key, Scoping scoping, ProvidedBy providedBy, Errors errors) throws ErrorsException { - final Class rawType = key.getTypeLiteral().getRawType(); - final Class> providerType = providedBy.value(); - - // Make sure it's not the same type. TODO: Can we check for deeper loops? - if (providerType == rawType) { - throw errors.recursiveProviderType().toException(); - } - - // Assume the provider provides an appropriate type. We double check at runtime. - @SuppressWarnings("unchecked") - final Key> providerKey = (Key>) Key.get(providerType); - final BindingImpl> providerBinding = getBindingOrThrow(providerKey, errors); - - InternalFactory internalFactory = (errors1, context, dependency) -> { - errors1 = errors1.withSource(providerKey); - Provider provider = providerBinding.getInternalFactory().get(errors1, context, dependency); - try { - Object o = provider.get(); - if (o != null && rawType.isInstance(o) == false) { - throw errors1.subtypeNotProvided(providerType, rawType).toException(); - } - @SuppressWarnings("unchecked") // protected by isInstance() check above - T t = (T) o; - return t; - } catch (RuntimeException e) { - throw errors1.errorInProvider(e).toException(); - } - }; - - return new LinkedProviderBindingImpl<>( - this, - key, - rawType /* source */, - Scopes.scope(this, internalFactory, scoping), - scoping, - providerKey - ); - } - - /** - * Creates a binding for a type annotated with @ImplementedBy. - */ - BindingImpl createImplementedByBinding(Key key, Scoping scoping, ImplementedBy implementedBy, Errors errors) - throws ErrorsException { - Class rawType = key.getTypeLiteral().getRawType(); - Class implementationType = implementedBy.value(); - - // Make sure it's not the same type. TODO: Can we check for deeper cycles? - if (implementationType == rawType) { - throw errors.recursiveImplementationType().toException(); - } - - // Make sure implementationType extends type. - if (rawType.isAssignableFrom(implementationType) == false) { - throw errors.notASubtype(implementationType, rawType).toException(); - } - - @SuppressWarnings("unchecked") // After the preceding check, this cast is safe. - Class subclass = (Class) implementationType; - - // Look up the target binding. - final Key targetKey = Key.get(subclass); - final BindingImpl targetBinding = getBindingOrThrow(targetKey, errors); - - InternalFactory internalFactory = (errors1, context, dependency) -> targetBinding.getInternalFactory() - .get(errors1.withSource(targetKey), context, dependency); - - return new LinkedBindingImpl<>(this, key, rawType /* source */, Scopes.scope(this, internalFactory, scoping), scoping, targetKey); - } - /** * Attempts to create a just-in-time binding for {@code key} in the root injector, falling back to * other ancestor injectors until this injector is tried. @@ -584,13 +492,6 @@ SingleParameterInjector createParameterInjector(final Dependency depen return new SingleParameterInjector<>(dependency, factory); } - /** - * Invokes a method. - */ - interface MethodInvoker { - Object invoke(Object target, Object... parameters) throws IllegalAccessException, InvocationTargetException; - } - /** * Cached constructor injectors for each type */ diff --git a/server/src/main/java/org/elasticsearch/common/inject/MembersInjector.java b/server/src/main/java/org/elasticsearch/common/inject/MembersInjector.java index 0a4464a373e18..ffaee1648ab5a 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/MembersInjector.java +++ b/server/src/main/java/org/elasticsearch/common/inject/MembersInjector.java @@ -25,17 +25,4 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 */ -public interface MembersInjector { - - /** - * Injects dependencies into the fields and methods of {@code instance}. Ignores the presence or - * absence of an injectable constructor. - *

    - * Whenever Guice creates an instance, it performs this injection automatically (after first - * performing constructor injection), so if you're able to let Guice create all your objects for - * you, you'll never need to use this method. - * - * @param instance to inject members on. May be {@code null}. - */ - void injectMembers(T instance); -} +public interface MembersInjector {} diff --git a/server/src/main/java/org/elasticsearch/common/inject/MembersInjectorImpl.java b/server/src/main/java/org/elasticsearch/common/inject/MembersInjectorImpl.java index b32cddf9be4bc..8c190ef301651 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/MembersInjectorImpl.java +++ b/server/src/main/java/org/elasticsearch/common/inject/MembersInjectorImpl.java @@ -19,7 +19,6 @@ import org.elasticsearch.common.inject.internal.Errors; import org.elasticsearch.common.inject.internal.ErrorsException; import org.elasticsearch.common.inject.internal.InternalContext; -import org.elasticsearch.common.inject.spi.InjectionListener; import java.util.List; @@ -31,28 +30,12 @@ class MembersInjectorImpl implements MembersInjector { private final TypeLiteral typeLiteral; private final InjectorImpl injector; - private final List memberInjectors; - private final List> userMembersInjectors; - private final List> injectionListeners; + private final List memberInjectors; - MembersInjectorImpl(InjectorImpl injector, TypeLiteral typeLiteral, List memberInjectors) { + MembersInjectorImpl(InjectorImpl injector, TypeLiteral typeLiteral, List memberInjectors) { this.injector = injector; this.typeLiteral = typeLiteral; this.memberInjectors = memberInjectors; - this.userMembersInjectors = List.of(); - this.injectionListeners = List.of(); - } - - @Override - public void injectMembers(T instance) { - Errors errors = new Errors(typeLiteral); - try { - injectAndNotify(instance, errors); - } catch (ErrorsException e) { - errors.merge(e.getErrors()); - } - - errors.throwProvisionExceptionIfErrorsExist(); } void injectAndNotify(final T instance, final Errors errors) throws ErrorsException { @@ -64,20 +47,6 @@ void injectAndNotify(final T instance, final Errors errors) throws ErrorsExcepti injectMembers(instance, errors, context); return null; }); - - notifyListeners(instance, errors); - } - - void notifyListeners(T instance, Errors errors) throws ErrorsException { - int numErrorsBefore = errors.size(); - for (InjectionListener injectionListener : injectionListeners) { - try { - injectionListener.afterInjection(instance); - } catch (RuntimeException e) { - errors.errorNotifyingInjectionListener(injectionListener, typeLiteral, e); - } - } - errors.throwIfNewErrors(numErrorsBefore); } void injectMembers(T t, Errors errors, InternalContext context) { @@ -85,16 +54,6 @@ void injectMembers(T t, Errors errors, InternalContext context) { for (int i = 0, size = memberInjectors.size(); i < size; i++) { memberInjectors.get(i).inject(errors, context, t); } - - // optimization: use manual for/each to save allocating an iterator here - for (int i = 0, size = userMembersInjectors.size(); i < size; i++) { - MembersInjector userMembersInjector = userMembersInjectors.get(i); - try { - userMembersInjector.injectMembers(t); - } catch (RuntimeException e) { - errors.errorInUserInjector(userMembersInjector, typeLiteral, e); - } - } } @Override diff --git a/server/src/main/java/org/elasticsearch/common/inject/MembersInjectorStore.java b/server/src/main/java/org/elasticsearch/common/inject/MembersInjectorStore.java index 9352c84db28f6..925739af25742 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/MembersInjectorStore.java +++ b/server/src/main/java/org/elasticsearch/common/inject/MembersInjectorStore.java @@ -21,7 +21,6 @@ import org.elasticsearch.common.inject.internal.FailableCache; import org.elasticsearch.common.inject.spi.InjectionPoint; -import java.lang.reflect.Field; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -62,12 +61,12 @@ private MembersInjectorImpl createWithListeners(TypeLiteral type, Erro Set injectionPoints; try { - injectionPoints = InjectionPoint.forInstanceMethodsAndFields(type); + injectionPoints = InjectionPoint.forInstanceMethods(type); } catch (ConfigurationException e) { errors.merge(e.getErrorMessages()); injectionPoints = e.getPartialValue(); } - List injectors = getInjectors(injectionPoints, errors); + List injectors = getInjectors(injectionPoints, errors); errors.throwIfNewErrors(numErrorsBefore); return new MembersInjectorImpl<>(injector, type, injectors); @@ -76,14 +75,12 @@ private MembersInjectorImpl createWithListeners(TypeLiteral type, Erro /** * Returns the injectors for the specified injection points. */ - List getInjectors(Set injectionPoints, Errors errors) { - List injectors = new ArrayList<>(); + List getInjectors(Set injectionPoints, Errors errors) { + List injectors = new ArrayList<>(); for (InjectionPoint injectionPoint : injectionPoints) { try { Errors errorsForMember = injectionPoint.isOptional() ? new Errors(injectionPoint) : errors.withSource(injectionPoint); - SingleMemberInjector injector = injectionPoint.getMember() instanceof Field - ? new SingleFieldInjector(this.injector, injectionPoint, errorsForMember) - : new SingleMethodInjector(this.injector, injectionPoint, errorsForMember); + SingleMethodInjector injector = new SingleMethodInjector(this.injector, injectionPoint, errorsForMember); injectors.add(injector); } catch (ErrorsException ignoredForNow) { // ignored for now diff --git a/server/src/main/java/org/elasticsearch/common/inject/Module.java b/server/src/main/java/org/elasticsearch/common/inject/Module.java index f3a43d80f31ec..38eddcdb200b7 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/Module.java +++ b/server/src/main/java/org/elasticsearch/common/inject/Module.java @@ -24,11 +24,6 @@ *

    * Your Module classes can use a more streamlined syntax by extending * {@link AbstractModule} rather than implementing this interface directly. - *

    - * In addition to the bindings configured via {@link #configure}, bindings - * will be created for all methods annotated with {@literal @}{@link Provides}. - * Use scope and binding annotations on these methods to configure the - * bindings. */ public interface Module { @@ -36,8 +31,7 @@ public interface Module { * Contributes bindings and other configurations for this module to {@code binder}. *

    * Do not invoke this method directly to install submodules. Instead use - * {@link Binder#install(Module)}, which ensures that {@link Provides provider methods} are - * discovered. + * {@link Binder#install(Module)}. */ void configure(Binder binder); } diff --git a/server/src/main/java/org/elasticsearch/common/inject/PrivateBinder.java b/server/src/main/java/org/elasticsearch/common/inject/PrivateBinder.java index f1da98316465a..fd80e6271b2cf 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/PrivateBinder.java +++ b/server/src/main/java/org/elasticsearch/common/inject/PrivateBinder.java @@ -24,11 +24,6 @@ */ public interface PrivateBinder extends Binder { - /** - * Makes the binding for {@code key} available to the enclosing environment - */ - void expose(Key key); - @Override PrivateBinder withSource(Object source); diff --git a/server/src/main/java/org/elasticsearch/common/inject/ProvidedBy.java b/server/src/main/java/org/elasticsearch/common/inject/ProvidedBy.java deleted file mode 100644 index 945de83cf9116..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/ProvidedBy.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (C) 2006 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject; - -import java.lang.annotation.Retention; -import java.lang.annotation.Target; - -import static java.lang.annotation.ElementType.TYPE; -import static java.lang.annotation.RetentionPolicy.RUNTIME; - -/** - * A pointer to the default provider type for a type. - * - * @author crazybob@google.com (Bob Lee) - */ -@Retention(RUNTIME) -@Target(TYPE) -public @interface ProvidedBy { - - /** - * The implementation type. - */ - Class> value(); -} diff --git a/server/src/main/java/org/elasticsearch/common/inject/Provides.java b/server/src/main/java/org/elasticsearch/common/inject/Provides.java deleted file mode 100644 index 587005f883574..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/Provides.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright (C) 2007 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject; - -import java.lang.annotation.Documented; -import java.lang.annotation.Retention; -import java.lang.annotation.Target; - -import static java.lang.annotation.ElementType.METHOD; -import static java.lang.annotation.RetentionPolicy.RUNTIME; - -/** - * Annotates methods of a {@link Module} to create a provider method binding. The method's return - * type is bound to its returned value. Guice will pass dependencies to the method as parameters. - * - * @author crazybob@google.com (Bob Lee) - * @since 2.0 - */ -@Documented -@Target(METHOD) -@Retention(RUNTIME) -public @interface Provides { -} diff --git a/server/src/main/java/org/elasticsearch/common/inject/SingleFieldInjector.java b/server/src/main/java/org/elasticsearch/common/inject/SingleFieldInjector.java deleted file mode 100644 index 7e8bfed724d59..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/SingleFieldInjector.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright (C) 2008 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject; - -import org.elasticsearch.common.inject.internal.Errors; -import org.elasticsearch.common.inject.internal.ErrorsException; -import org.elasticsearch.common.inject.internal.InternalContext; -import org.elasticsearch.common.inject.internal.InternalFactory; -import org.elasticsearch.common.inject.spi.Dependency; -import org.elasticsearch.common.inject.spi.InjectionPoint; - -import java.lang.reflect.Field; - -/** - * Sets an injectable field. - */ -class SingleFieldInjector implements SingleMemberInjector { - final Field field; - final InjectionPoint injectionPoint; - final Dependency dependency; - final InternalFactory factory; - - SingleFieldInjector(InjectorImpl injector, InjectionPoint injectionPoint, Errors errors) throws ErrorsException { - this.injectionPoint = injectionPoint; - this.field = (Field) injectionPoint.getMember(); - this.dependency = injectionPoint.getDependencies().get(0); - factory = injector.getInternalFactory(dependency.getKey(), errors); - } - - @Override - public void inject(Errors errors, InternalContext context, Object o) { - errors = errors.withSource(dependency); - - context.setDependency(dependency); - try { - Object value = factory.get(errors, context, dependency); - field.set(o, value); - } catch (ErrorsException e) { - errors.withSource(injectionPoint).merge(e.getErrors()); - } catch (IllegalAccessException e) { - throw new AssertionError(e); // a security manager is blocking us, we're hosed - } finally { - context.setDependency(null); - } - } -} diff --git a/server/src/main/java/org/elasticsearch/common/inject/SingleMemberInjector.java b/server/src/main/java/org/elasticsearch/common/inject/SingleMemberInjector.java deleted file mode 100644 index a4e25f9fd000b..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/SingleMemberInjector.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (C) 2008 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject; - -import org.elasticsearch.common.inject.internal.Errors; -import org.elasticsearch.common.inject.internal.InternalContext; - -/** - * Injects a field or method of a given object. - */ -interface SingleMemberInjector { - void inject(Errors errors, InternalContext context, Object o); - -} diff --git a/server/src/main/java/org/elasticsearch/common/inject/SingleMethodInjector.java b/server/src/main/java/org/elasticsearch/common/inject/SingleMethodInjector.java index f6d9a2eb2c396..d36bc1e623a99 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/SingleMethodInjector.java +++ b/server/src/main/java/org/elasticsearch/common/inject/SingleMethodInjector.java @@ -16,7 +16,6 @@ package org.elasticsearch.common.inject; -import org.elasticsearch.common.inject.InjectorImpl.MethodInvoker; import org.elasticsearch.common.inject.internal.Errors; import org.elasticsearch.common.inject.internal.ErrorsException; import org.elasticsearch.common.inject.internal.InternalContext; @@ -28,19 +27,17 @@ /** * Invokes an injectable method. */ -class SingleMethodInjector implements SingleMemberInjector { - final MethodInvoker methodInvoker; +class SingleMethodInjector { + final Method method; final SingleParameterInjector[] parameterInjectors; final InjectionPoint injectionPoint; SingleMethodInjector(InjectorImpl injector, InjectionPoint injectionPoint, Errors errors) throws ErrorsException { this.injectionPoint = injectionPoint; - final Method method = (Method) injectionPoint.getMember(); - methodInvoker = method::invoke; + method = (Method) injectionPoint.getMember(); parameterInjectors = injector.getParametersInjectors(injectionPoint.getDependencies(), errors); } - @Override public void inject(Errors errors, InternalContext context, Object o) { Object[] parameters; try { @@ -51,7 +48,7 @@ public void inject(Errors errors, InternalContext context, Object o) { } try { - methodInvoker.invoke(o, parameters); + method.invoke(o, parameters); } catch (IllegalAccessException e) { throw new AssertionError(e); // a security manager is blocking us, we're hosed } catch (InvocationTargetException userException) { diff --git a/server/src/main/java/org/elasticsearch/common/inject/TypeLiteral.java b/server/src/main/java/org/elasticsearch/common/inject/TypeLiteral.java index 72bf444d2dd3b..d39c4e44d2ff9 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/TypeLiteral.java +++ b/server/src/main/java/org/elasticsearch/common/inject/TypeLiteral.java @@ -20,7 +20,6 @@ import org.elasticsearch.common.inject.util.Types; import java.lang.reflect.Constructor; -import java.lang.reflect.Field; import java.lang.reflect.GenericArrayType; import java.lang.reflect.Member; import java.lang.reflect.Method; @@ -249,19 +248,6 @@ public TypeLiteral getSupertype(Class supertype) { return resolve(MoreTypes.getGenericSupertype(type, rawType, supertype)); } - /** - * Returns the resolved generic type of {@code field}. - * - * @param field a field defined by this or any superclass. - * @since 2.0 - */ - public TypeLiteral getFieldType(Field field) { - if (field.getDeclaringClass().isAssignableFrom(rawType) == false) { - throw new IllegalArgumentException(field + " is not defined by a supertype of " + type); - } - return resolve(field.getGenericType()); - } - /** * Returns the resolved generic parameter types of {@code methodOrConstructor}. * @@ -291,17 +277,4 @@ public List> getParameterTypes(Member methodOrConstructor) { return resolveAll(genericParameterTypes); } - /** - * Returns the resolved generic return type of {@code method}. - * - * @param method a method defined by this or any supertype. - * @since 2.0 - */ - public TypeLiteral getReturnType(Method method) { - if (method.getDeclaringClass().isAssignableFrom(rawType) == false) { - throw new IllegalArgumentException(method + " is not defined by a supertype of " + type); - } - - return resolve(method.getGenericReturnType()); - } } diff --git a/server/src/main/java/org/elasticsearch/common/inject/binder/ScopedBindingBuilder.java b/server/src/main/java/org/elasticsearch/common/inject/binder/ScopedBindingBuilder.java index 88b7fd86370c6..dad91c3fb8878 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/binder/ScopedBindingBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/inject/binder/ScopedBindingBuilder.java @@ -16,8 +16,6 @@ package org.elasticsearch.common.inject.binder; -import java.lang.annotation.Annotation; - /** * See the EDSL examples at {@link org.elasticsearch.common.inject.Binder}. * @@ -25,11 +23,6 @@ */ public interface ScopedBindingBuilder { - /** - * See the EDSL examples at {@link org.elasticsearch.common.inject.Binder}. - */ - void in(Class scopeAnnotation); - /** * Instructs the {@link org.elasticsearch.common.inject.Injector} to eagerly initialize this * singleton-scoped binding upon creation. Useful for application diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/AbstractBindingBuilder.java b/server/src/main/java/org/elasticsearch/common/inject/internal/AbstractBindingBuilder.java index 083c7296fe883..60b6a74dec997 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/AbstractBindingBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/AbstractBindingBuilder.java @@ -21,9 +21,7 @@ import org.elasticsearch.common.inject.spi.Element; import org.elasticsearch.common.inject.spi.InstanceBinding; -import java.lang.annotation.Annotation; import java.util.List; -import java.util.Objects; /** * Bind a value or constant. @@ -61,12 +59,6 @@ protected BindingImpl setBinding(BindingImpl binding) { return binding; } - public void in(final Class scopeAnnotation) { - Objects.requireNonNull(scopeAnnotation, "scopeAnnotation"); - checkNotScoped(); - setBinding(getBinding().withScoping(Scoping.forAnnotation(scopeAnnotation))); - } - public void asEagerSingleton() { checkNotScoped(); setBinding(getBinding().withScoping(Scoping.EAGER_SINGLETON)); diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/BindingBuilder.java b/server/src/main/java/org/elasticsearch/common/inject/internal/BindingBuilder.java index 3837741bc3119..fd40879025c65 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/BindingBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/BindingBuilder.java @@ -60,7 +60,7 @@ public void toInstance(T instance) { Set injectionPoints; if (instance != null) { try { - injectionPoints = InjectionPoint.forInstanceMethodsAndFields(instance.getClass()); + injectionPoints = InjectionPoint.forInstanceMethods(instance.getClass()); } catch (ConfigurationException e) { for (Message message : e.getErrorMessages()) { binder.addError(message); @@ -84,7 +84,7 @@ public BindingBuilder toProvider(Provider provider) { // lookup the injection points, adding any errors to the binder's errors list Set injectionPoints; try { - injectionPoints = InjectionPoint.forInstanceMethodsAndFields(provider.getClass()); + injectionPoints = InjectionPoint.forInstanceMethods(provider.getClass()); } catch (ConfigurationException e) { for (Message message : e.getErrorMessages()) { binder.addError(message); diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/Errors.java b/server/src/main/java/org/elasticsearch/common/inject/internal/Errors.java index 03a584d5c508b..ea4b530f48b9b 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/Errors.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/Errors.java @@ -21,13 +21,10 @@ import org.elasticsearch.common.inject.CreationException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Key; -import org.elasticsearch.common.inject.MembersInjector; -import org.elasticsearch.common.inject.Provider; import org.elasticsearch.common.inject.ProvisionException; import org.elasticsearch.common.inject.Scope; import org.elasticsearch.common.inject.TypeLiteral; import org.elasticsearch.common.inject.spi.Dependency; -import org.elasticsearch.common.inject.spi.InjectionListener; import org.elasticsearch.common.inject.spi.InjectionPoint; import org.elasticsearch.common.inject.spi.Message; @@ -199,22 +196,6 @@ public Errors bindingToProvider() { return addMessage("Binding to Provider is not allowed."); } - public Errors subtypeNotProvided(Class> providerType, Class type) { - return addMessage("%s doesn't provide instances of %s.", providerType, type); - } - - public Errors notASubtype(Class implementationType, Class type) { - return addMessage("%s doesn't extend %s.", implementationType, type); - } - - public Errors recursiveImplementationType() { - return addMessage("@ImplementedBy points to the same class it annotates."); - } - - public Errors recursiveProviderType() { - return addMessage("@ProvidedBy points to the same class it annotates."); - } - public Errors missingRuntimeRetention(Object source) { return addMessage("Please annotate with @Retention(RUNTIME).%n" + " Bound at %s.", convert(source)); } @@ -268,10 +249,6 @@ public Errors duplicateScopes(Scope existing, Class annota return addMessage("Scope %s is already bound to %s. Cannot bind %s.", existing, annotationType, scope); } - public Errors voidProviderMethod() { - return addMessage("Provider methods must return a value. Do not return void."); - } - public Errors missingConstantValues() { return addMessage("Missing constant value. Please call to(...)."); } @@ -315,14 +292,6 @@ public Errors errorInProvider(RuntimeException runtimeException) { return errorInUserCode(runtimeException, "Error in custom provider, %s", runtimeException); } - public Errors errorInUserInjector(MembersInjector listener, TypeLiteral type, RuntimeException cause) { - return errorInUserCode(cause, "Error injecting %s using %s.%n" + " Reason: %s", type, listener, cause); - } - - public Errors errorNotifyingInjectionListener(InjectionListener listener, TypeLiteral type, RuntimeException cause) { - return errorInUserCode(cause, "Error notifying InjectionListener %s of %s.%n" + " Reason: %s", listener, type, cause); - } - public static Collection getMessagesFromThrowable(Throwable throwable) { if (throwable instanceof ProvisionException) { return ((ProvisionException) throwable).getErrorMessages(); @@ -381,14 +350,6 @@ public void throwConfigurationExceptionIfErrorsExist() { throw new ConfigurationException(getMessages()); } - public void throwProvisionExceptionIfErrorsExist() { - if (hasErrors() == false) { - return; - } - - throw new ProvisionException(getMessages()); - } - private Message merge(Message message) { List sources = new ArrayList<>(); sources.addAll(getSources()); diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/InstanceBindingImpl.java b/server/src/main/java/org/elasticsearch/common/inject/internal/InstanceBindingImpl.java index 07c9dd0e4cf25..f5b36cf33b800 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/InstanceBindingImpl.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/InstanceBindingImpl.java @@ -59,8 +59,8 @@ public Provider getProvider() { } @Override - public V acceptTargetVisitor(BindingTargetVisitor visitor) { - return visitor.visit(this); + public void acceptTargetVisitor(BindingTargetVisitor visitor) { + visitor.visit(this); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/LinkedBindingImpl.java b/server/src/main/java/org/elasticsearch/common/inject/internal/LinkedBindingImpl.java index 56e1a92c25018..135726f80e25b 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/LinkedBindingImpl.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/LinkedBindingImpl.java @@ -43,8 +43,8 @@ public LinkedBindingImpl(Object source, Key key, Scoping scoping, Key V acceptTargetVisitor(BindingTargetVisitor visitor) { - return visitor.visit(this); + public void acceptTargetVisitor(BindingTargetVisitor visitor) { + visitor.visit(this); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/LinkedProviderBindingImpl.java b/server/src/main/java/org/elasticsearch/common/inject/internal/LinkedProviderBindingImpl.java index a27692a68882b..0bfd2ef273a74 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/LinkedProviderBindingImpl.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/LinkedProviderBindingImpl.java @@ -44,8 +44,8 @@ public LinkedProviderBindingImpl( } @Override - public V acceptTargetVisitor(BindingTargetVisitor visitor) { - return visitor.visit(this); + public void acceptTargetVisitor(BindingTargetVisitor visitor) { + visitor.visit(this); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/ProviderInstanceBindingImpl.java b/server/src/main/java/org/elasticsearch/common/inject/internal/ProviderInstanceBindingImpl.java index 676c0717896d5..792c18920a6fa 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/ProviderInstanceBindingImpl.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/ProviderInstanceBindingImpl.java @@ -57,8 +57,8 @@ public ProviderInstanceBindingImpl( } @Override - public V acceptTargetVisitor(BindingTargetVisitor visitor) { - return visitor.visit(this); + public void acceptTargetVisitor(BindingTargetVisitor visitor) { + visitor.visit(this); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/ProviderMethod.java b/server/src/main/java/org/elasticsearch/common/inject/internal/ProviderMethod.java deleted file mode 100644 index 861f9ad77128e..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/ProviderMethod.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright (C) 2008 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject.internal; - -import org.elasticsearch.common.inject.Binder; -import org.elasticsearch.common.inject.Exposed; -import org.elasticsearch.common.inject.Key; -import org.elasticsearch.common.inject.PrivateBinder; -import org.elasticsearch.common.inject.Provider; -import org.elasticsearch.common.inject.spi.ProviderWithDependencies; - -import java.lang.annotation.Annotation; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.util.List; - -/** - * A provider that invokes a method and returns its result. - * - * @author jessewilson@google.com (Jesse Wilson) - */ -public class ProviderMethod implements ProviderWithDependencies { - private final Key key; - private final Class scopeAnnotation; - private final Object instance; - private final Method method; - private final List> parameterProviders; - private final boolean exposed; - - /** - * @param method the method to invoke. Its return type must be the same type as {@code key}. - */ - ProviderMethod( - Key key, - Method method, - Object instance, - List> parameterProviders, - Class scopeAnnotation - ) { - this.key = key; - this.scopeAnnotation = scopeAnnotation; - this.instance = instance; - this.method = method; - this.parameterProviders = parameterProviders; - this.exposed = method.getAnnotation(Exposed.class) != null; - } - - public void configure(Binder binder) { - binder = binder.withSource(method); - - if (scopeAnnotation != null) { - binder.bind(key).toProvider(this).in(scopeAnnotation); - } else { - binder.bind(key).toProvider(this); - } - - if (exposed) { - // the cast is safe 'cause the only binder we have implements PrivateBinder. If there's a - // misplaced @Exposed, calling this will add an error to the binder's error queue - ((PrivateBinder) binder).expose(key); - } - } - - @Override - public T get() { - Object[] parameters = new Object[parameterProviders.size()]; - for (int i = 0; i < parameters.length; i++) { - parameters[i] = parameterProviders.get(i).get(); - } - - try { - // We know this cast is safe because T is the method's return type. - @SuppressWarnings({ "unchecked" }) - T result = (T) method.invoke(instance, parameters); - return result; - } catch (IllegalAccessException e) { - throw new AssertionError(e); - } catch (InvocationTargetException e) { - throw new RuntimeException(e); - } - } - -} diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/ProviderMethodsModule.java b/server/src/main/java/org/elasticsearch/common/inject/internal/ProviderMethodsModule.java deleted file mode 100644 index 6a1d7aabed962..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/ProviderMethodsModule.java +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright (C) 2008 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject.internal; - -import org.elasticsearch.common.inject.Binder; -import org.elasticsearch.common.inject.Key; -import org.elasticsearch.common.inject.Module; -import org.elasticsearch.common.inject.Provider; -import org.elasticsearch.common.inject.Provides; -import org.elasticsearch.common.inject.TypeLiteral; -import org.elasticsearch.common.inject.spi.Message; -import org.elasticsearch.common.inject.util.Modules; - -import java.lang.annotation.Annotation; -import java.lang.reflect.Member; -import java.lang.reflect.Method; -import java.util.ArrayList; -import java.util.List; -import java.util.Objects; - -/** - * Creates bindings to methods annotated with {@literal @}{@link Provides}. Use the scope and - * binding annotations on the provider method to configure the binding. - * - * @author crazybob@google.com (Bob Lee) - * @author jessewilson@google.com (Jesse Wilson) - */ -public final class ProviderMethodsModule implements Module { - private final Object delegate; - private final TypeLiteral typeLiteral; - - private ProviderMethodsModule(Object delegate) { - this.delegate = Objects.requireNonNull(delegate, "delegate"); - this.typeLiteral = TypeLiteral.get(this.delegate.getClass()); - } - - /** - * Returns a module which creates bindings for provider methods from the given module. - */ - public static Module forModule(Module module) { - return forObject(module); - } - - /** - * Returns a module which creates bindings for provider methods from the given object. - * This is useful notably for GIN - */ - public static Module forObject(Object object) { - // avoid infinite recursion, since installing a module always installs itself - if (object instanceof ProviderMethodsModule) { - return Modules.EMPTY_MODULE; - } - - return new ProviderMethodsModule(object); - } - - @Override - public synchronized void configure(Binder binder) { - for (ProviderMethod providerMethod : getProviderMethods(binder)) { - providerMethod.configure(binder); - } - } - - public List> getProviderMethods(Binder binder) { - List> result = new ArrayList<>(); - for (Class c = delegate.getClass(); c != Object.class; c = c.getSuperclass()) { - for (Method method : c.getMethods()) { - if (method.getAnnotation(Provides.class) != null) { - result.add(createProviderMethod(binder, method)); - } - } - } - return result; - } - - ProviderMethod createProviderMethod(Binder binder, final Method method) { - binder = binder.withSource(method); - Errors errors = new Errors(method); - - // prepare the parameter providers - List> parameterProviders = new ArrayList<>(); - List> parameterTypes = typeLiteral.getParameterTypes(method); - Annotation[][] parameterAnnotations = method.getParameterAnnotations(); - for (int i = 0; i < parameterTypes.size(); i++) { - Key key = getKey(errors, parameterTypes.get(i), method, parameterAnnotations[i]); - parameterProviders.add(binder.getProvider(key)); - } - - @SuppressWarnings("unchecked") // Define T as the method's return type. - TypeLiteral returnType = (TypeLiteral) typeLiteral.getReturnType(method); - - Key key = getKey(errors, returnType, method, method.getAnnotations()); - Class scopeAnnotation = Annotations.findScopeAnnotation(errors, method.getAnnotations()); - - for (Message message : errors.getMessages()) { - binder.addError(message); - } - - return new ProviderMethod<>(key, method, delegate, parameterProviders, scopeAnnotation); - } - - static Key getKey(Errors errors, TypeLiteral type, Member member, Annotation[] annotations) { - Annotation bindingAnnotation = Annotations.findBindingAnnotation(errors, member, annotations); - return bindingAnnotation == null ? Key.get(type) : Key.get(type, bindingAnnotation); - } - - @Override - public boolean equals(Object o) { - return o instanceof ProviderMethodsModule && ((ProviderMethodsModule) o).delegate == delegate; - } - - @Override - public int hashCode() { - return delegate.hashCode(); - } -} diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/UntargettedBindingImpl.java b/server/src/main/java/org/elasticsearch/common/inject/internal/UntargettedBindingImpl.java index e5a916d4be62e..c5595d570563f 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/UntargettedBindingImpl.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/UntargettedBindingImpl.java @@ -32,8 +32,8 @@ public UntargettedBindingImpl(Object source, Key key, Scoping scoping) { } @Override - public V acceptTargetVisitor(BindingTargetVisitor visitor) { - return visitor.visit(this); + public void acceptTargetVisitor(BindingTargetVisitor visitor) { + visitor.visit(this); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/inject/spi/Elements.java b/server/src/main/java/org/elasticsearch/common/inject/spi/Elements.java index 47e5d7d0753c4..22f86d6991e84 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/spi/Elements.java +++ b/server/src/main/java/org/elasticsearch/common/inject/spi/Elements.java @@ -31,7 +31,6 @@ import org.elasticsearch.common.inject.internal.AbstractBindingBuilder; import org.elasticsearch.common.inject.internal.BindingBuilder; import org.elasticsearch.common.inject.internal.Errors; -import org.elasticsearch.common.inject.internal.ProviderMethodsModule; import org.elasticsearch.common.inject.internal.SourceProvider; import java.lang.annotation.Annotation; @@ -135,7 +134,6 @@ public void install(Module module) { addError(e); } } - binder.install(ProviderMethodsModule.forModule(module)); } } @@ -192,12 +190,6 @@ public RecordingBinder skipSources(Class... classesToSkip) { return new RecordingBinder(this, null, newSourceProvider); } - @Override - public void expose(Key key) { - addError("Cannot expose %s on a standard binder. " + "Exposed bindings are only applicable to private binders.", key); - - } - private static final Logger logger = LogManager.getLogger(Elements.class); protected Object getSource() { diff --git a/server/src/main/java/org/elasticsearch/common/inject/spi/InjectionListener.java b/server/src/main/java/org/elasticsearch/common/inject/spi/InjectionListener.java deleted file mode 100644 index 1f5b969559020..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/spi/InjectionListener.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright (C) 2009 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject.spi; - -/** - * Listens for injections into instances of type {@code I}. Useful for performing further - * injections, post-injection initialization, and more. - * - * @author crazybob@google.com (Bob Lee) - * @author jessewilson@google.com (Jesse Wilson) - * @since 2.0 - */ -public interface InjectionListener { - - /** - * Invoked by Guice after it injects the fields and methods of instance. - * - * @param injectee instance that Guice injected dependencies into - */ - void afterInjection(I injectee); -} diff --git a/server/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java b/server/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java index 4e20b26d83284..945dfca96072e 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java +++ b/server/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java @@ -29,7 +29,7 @@ import java.lang.annotation.Annotation; import java.lang.reflect.AnnotatedElement; import java.lang.reflect.Constructor; -import java.lang.reflect.Field; +import java.lang.reflect.Executable; import java.lang.reflect.Member; import java.lang.reflect.Method; import java.lang.reflect.Modifier; @@ -57,7 +57,7 @@ public final class InjectionPoint { private final boolean optional; - private final Member member; + private final Executable member; private final List> dependencies; InjectionPoint(TypeLiteral type, Method method) { @@ -75,26 +75,6 @@ public final class InjectionPoint { this.dependencies = forMember(constructor, type, constructor.getParameterAnnotations()); } - InjectionPoint(TypeLiteral type, Field field) { - this.member = field; - - Inject inject = field.getAnnotation(Inject.class); - this.optional = inject.optional(); - - Annotation[] annotations = field.getAnnotations(); - - Errors errors = new Errors(field); - Key key = null; - try { - key = Annotations.getKey(type.getFieldType(field), field, annotations, errors); - } catch (ErrorsException e) { - errors.merge(e.getErrors()); - } - errors.throwConfigurationExceptionIfErrorsExist(); - - this.dependencies = Collections.singletonList(newDependency(key, Nullability.allowsNull(annotations), -1)); - } - private List> forMember(Member member, TypeLiteral type, Annotation[][] parameterAnnotations) { Errors errors = new Errors(member); Iterator annotationsIterator = Arrays.asList(parameterAnnotations).iterator(); @@ -125,7 +105,7 @@ private Dependency newDependency(Key key, boolean allowsNull, int para /** * Returns the injected constructor, field, or method. */ - public Member getMember() { + public Executable getMember() { return member; } @@ -143,8 +123,7 @@ public List> getDependencies() { /** * Returns true if this injection point shall be skipped if the injector cannot resolve bindings * for all required dependencies. Both explicit bindings (as specified in a module), and implicit - * bindings ({@literal @}{@link org.elasticsearch.common.inject.ImplementedBy ImplementedBy}, default - * constructors etc.) may be used to satisfy optional injection points. + * bindings by default constructors etc.) may be used to satisfy optional injection points. */ public boolean isOptional() { return optional; @@ -230,13 +209,12 @@ public static InjectionPoint forConstructorOf(TypeLiteral type) { * ConfigurationException#getPartialValue() partial value} is a {@code Set} * of the valid injection points. */ - public static Set forInstanceMethodsAndFields(TypeLiteral type) { + public static Set forInstanceMethods(TypeLiteral type) { Set result = new HashSet<>(); Errors errors = new Errors(); // TODO (crazybob): Filter out overridden members. - addInjectionPoints(type, Factory.FIELDS, false, result, errors); - addInjectionPoints(type, Factory.METHODS, false, result, errors); + addInjectionPoints(type, false, result, errors); result = unmodifiableSet(result); if (errors.hasErrors()) { @@ -246,7 +224,7 @@ public static Set forInstanceMethodsAndFields(TypeLiteral typ } /** - * Returns all instance method and field injection points on {@code type}. + * Returns all instance method injection points on {@code type}. * * @return a possibly empty set of injection points. The set has a specified iteration order. All * fields are returned and then all methods. Within the fields, supertype fields are returned @@ -256,8 +234,8 @@ public static Set forInstanceMethodsAndFields(TypeLiteral typ * ConfigurationException#getPartialValue() partial value} is a {@code Set} * of the valid injection points. */ - public static Set forInstanceMethodsAndFields(Class type) { - return forInstanceMethodsAndFields(TypeLiteral.get(type)); + public static Set forInstanceMethods(Class type) { + return forInstanceMethods(TypeLiteral.get(type)); } private static void checkForMisplacedBindingAnnotations(Member member, Errors errors) { @@ -274,18 +252,16 @@ private static void checkForMisplacedBindingAnnotations(Member member, Errors er // name. In Scala, fields always get accessor methods (that we need to ignore). See bug 242. if (member instanceof Method) { try { - if (member.getDeclaringClass().getField(member.getName()) != null) { - return; - } + member.getDeclaringClass().getField(member.getName()); + return; } catch (NoSuchFieldException ignore) {} } errors.misplacedBindingAnnotation(member, misplacedBindingAnnotation); } - private static void addInjectionPoints( + private static void addInjectionPoints( TypeLiteral type, - Factory factory, boolean statics, Collection injectionPoints, Errors errors @@ -296,20 +272,19 @@ private static void addInjectionPoints( // Add injectors for superclass first. TypeLiteral superType = type.getSupertype(type.getRawType().getSuperclass()); - addInjectionPoints(superType, factory, statics, injectionPoints, errors); + addInjectionPoints(superType, statics, injectionPoints, errors); // Add injectors for all members next - addInjectorsForMembers(type, factory, statics, injectionPoints, errors); + addInjectorsForMembers(type, statics, injectionPoints, errors); } - private static void addInjectorsForMembers( + private static void addInjectorsForMembers( TypeLiteral typeLiteral, - Factory factory, boolean statics, Collection injectionPoints, Errors errors ) { - for (M member : factory.getMembers(getRawType(typeLiteral.getType()))) { + for (Method member : getRawType(typeLiteral.getType()).getMethods()) { if (isStatic(member) != statics) { continue; } @@ -320,7 +295,8 @@ private static void addInjectorsForMembers } try { - injectionPoints.add(factory.create(typeLiteral, member, errors)); + checkForMisplacedBindingAnnotations(member, errors); + injectionPoints.add(new InjectionPoint(typeLiteral, member)); } catch (ConfigurationException ignorable) { if (inject.optional() == false) { errors.merge(ignorable.getErrorMessages()); @@ -333,34 +309,4 @@ private static boolean isStatic(Member member) { return Modifier.isStatic(member.getModifiers()); } - private interface Factory { - Factory FIELDS = new Factory<>() { - @Override - public Field[] getMembers(Class type) { - return type.getFields(); - } - - @Override - public InjectionPoint create(TypeLiteral typeLiteral, Field member, Errors errors) { - return new InjectionPoint(typeLiteral, member); - } - }; - - Factory METHODS = new Factory<>() { - @Override - public Method[] getMembers(Class type) { - return type.getMethods(); - } - - @Override - public InjectionPoint create(TypeLiteral typeLiteral, Method member, Errors errors) { - checkForMisplacedBindingAnnotations(member, errors); - return new InjectionPoint(typeLiteral, member); - } - }; - - M[] getMembers(Class type); - - InjectionPoint create(TypeLiteral typeLiteral, M member, Errors errors); - } } diff --git a/server/src/main/java/org/elasticsearch/common/inject/util/Modules.java b/server/src/main/java/org/elasticsearch/common/inject/util/Modules.java deleted file mode 100644 index 1162bef25e682..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/util/Modules.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright (C) 2008 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject.util; - -import org.elasticsearch.common.inject.Module; - -/** - * Static utility methods for creating and working with instances of {@link Module}. - * - * @author jessewilson@google.com (Jesse Wilson) - * @since 2.0 - */ -public final class Modules { - private Modules() {} - - public static final Module EMPTY_MODULE = binder -> {}; - -} diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java index 478ae231e16ff..52eee5af3f6f5 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java @@ -31,6 +31,16 @@ public ByteArrayStreamInput(byte[] bytes) { reset(bytes); } + @Override + public String readString() throws IOException { + final int chars = readArraySize(); + String string = tryReadStringFromBytes(bytes, pos, limit, chars); + if (string != null) { + return string; + } + return doReadString(chars); + } + @Override public int read() throws IOException { if (limit - pos <= 0) { @@ -65,6 +75,20 @@ public void skipBytes(long count) { pos += (int) count; } + @Override + public long skip(long n) throws IOException { + if (n <= 0L) { + return 0L; + } + int available = available(); + if (n < available) { + pos += (int) n; + return n; + } + pos = limit; + return available; + } + @Override public void close() { // No-op diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/ByteBufferStreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/ByteBufferStreamInput.java index f4ae17175fa2d..41d129406551f 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/ByteBufferStreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/ByteBufferStreamInput.java @@ -120,6 +120,24 @@ public static long readVLong(ByteBuffer buffer) throws IOException { return i; } + @Override + public String readString() throws IOException { + final int chars = readArraySize(); + if (buffer.hasArray()) { + // attempt reading bytes directly into a string to minimize copying + final String string = tryReadStringFromBytes( + buffer.array(), + buffer.position() + buffer.arrayOffset(), + buffer.limit() + buffer.arrayOffset(), + chars + ); + if (string != null) { + return string; + } + } + return doReadString(chars); + } + @Override public int read() throws IOException { if (buffer.hasRemaining() == false) { diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/FilterStreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/FilterStreamInput.java index 0e817e16c0b76..c0ef0e0abf39b 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/FilterStreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/FilterStreamInput.java @@ -26,6 +26,11 @@ protected FilterStreamInput(StreamInput delegate) { this.delegate = delegate; } + @Override + public String readString() throws IOException { + return delegate.readString(); + } + @Override public byte readByte() throws IOException { return delegate.readByte(); diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/RecyclerBytesStreamOutput.java b/server/src/main/java/org/elasticsearch/common/io/stream/RecyclerBytesStreamOutput.java index 80917b530202b..7be964fc1be39 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/RecyclerBytesStreamOutput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/RecyclerBytesStreamOutput.java @@ -43,9 +43,7 @@ public class RecyclerBytesStreamOutput extends BytesStream implements Releasable public RecyclerBytesStreamOutput(Recycler recycler) { this.recycler = recycler; - try (Recycler.V obtain = recycler.obtain()) { - pageSize = obtain.v().length; - } + this.pageSize = recycler.pageSize(); this.currentPageOffset = pageSize; } diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index 83aa7fb096693..7281616a8d25f 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -58,6 +58,9 @@ import java.util.function.Function; import java.util.function.IntFunction; +import static java.nio.charset.StandardCharsets.ISO_8859_1; +import static java.nio.charset.StandardCharsets.UTF_8; + /** * A stream from this node to another node. Technically, it can also be streamed to a byte array but that is mostly for testing. * @@ -445,7 +448,10 @@ private char[] ensureLargeSpare(int charCount) { public String readString() throws IOException { final int charCount = readArraySize(); + return doReadString(charCount); + } + protected String doReadString(final int charCount) throws IOException { final char[] charBuffer = charCount > SMALL_STRING_LIMIT ? ensureLargeSpare(charCount) : smallSpare.get(); int charsOffset = 0; @@ -531,6 +537,60 @@ public String readString() throws IOException { return new String(charBuffer, 0, charCount); } + protected String tryReadStringFromBytes(final byte[] bytes, final int start, final int limit, final int chars) throws IOException { + final int end = start + chars; + if (limit < end) { + return null; // not enough bytes to read chars + } + for (int pos = start; pos < end; pos++) { + if ((bytes[pos] & 0x80) != 0) { + // not an ASCII char, fall back to reading a UTF-8 string + return tryReadUtf8StringFromBytes(bytes, start, limit, pos, end - pos); + } + } + skip(chars); // skip the number of chars (equals bytes) on the stream input + // We already validated the top bit is never set (so there's no negatives). + // Using ISO_8859_1 over US_ASCII safes another scan to check just that and is equivalent otherwise. + return new String(bytes, start, chars, ISO_8859_1); + } + + private String tryReadUtf8StringFromBytes(final byte[] bytes, final int start, final int limit, int pos, int chars) throws IOException { + while (pos < limit && chars-- > 0) { + int c = bytes[pos] & 0xff; + switch (c >> 4) { + case 0, 1, 2, 3, 4, 5, 6, 7 -> pos++; + case 12, 13 -> pos += 2; + case 14 -> { + // surrogate pairs are incorrectly encoded, these can't be directly read from bytes + if (maybeHighSurrogate(bytes, pos, limit)) return null; + pos += 3; + } + default -> throwOnBrokenChar(c); + } + } + + if (chars == 0 && pos <= limit) { + pos = pos - start; + skip(pos); // skip the number of bytes relative to start on the stream input + return new String(bytes, start, pos, UTF_8); + } + + // not enough bytes to read all chars from array + return null; + } + + private static boolean maybeHighSurrogate(final byte[] bytes, final int pos, final int limit) { + if (pos + 2 >= limit) { + return true; // beyond limit, we can't tell + } + int c1 = bytes[pos] & 0xff; + int c2 = bytes[pos + 1] & 0xff; + int c3 = bytes[pos + 2] & 0xff; + int surrogateCandidate = ((c1 & 0x0F) << 12) | ((c2 & 0x3F) << 6) | (c3 & 0x3F); + // check if in the high surrogate range + return surrogateCandidate >= 0xD800 && surrogateCandidate <= 0xDBFF; + } + private static void throwOnBrokenChar(int c) throws IOException { throw new IOException("Invalid string; unexpected character: " + c + " hex: " + Integer.toHexString(c)); } diff --git a/server/src/main/java/org/elasticsearch/common/network/CIDRUtils.java b/server/src/main/java/org/elasticsearch/common/network/CIDRUtils.java index ea4d6da9b7bec..3b5a9ae1589f8 100644 --- a/server/src/main/java/org/elasticsearch/common/network/CIDRUtils.java +++ b/server/src/main/java/org/elasticsearch/common/network/CIDRUtils.java @@ -48,7 +48,7 @@ public static boolean isInRange(byte[] addr, String cidrAddress) { return isBetween(addr, lower, upper); } - private static Tuple getLowerUpper(Tuple cidr) { + public static Tuple getLowerUpper(Tuple cidr) { final InetAddress value = cidr.v1(); final Integer prefixLength = cidr.v2(); @@ -81,7 +81,7 @@ private static boolean isBetween(byte[] addr, byte[] lower, byte[] upper) { // Borrowed from Lucene to make this consistent IP fields matching for the mix of IPv4 and IPv6 values // Modified signature to avoid extra conversions - private static byte[] encode(byte[] address) { + public static byte[] encode(byte[] address) { if (address.length == 4) { byte[] mapped = new byte[16]; System.arraycopy(IPV4_PREFIX, 0, mapped, 0, IPV4_PREFIX.length); diff --git a/server/src/main/java/org/elasticsearch/common/network/NetworkUtils.java b/server/src/main/java/org/elasticsearch/common/network/NetworkUtils.java index f7cfff8402304..b2602b9c4f9d0 100644 --- a/server/src/main/java/org/elasticsearch/common/network/NetworkUtils.java +++ b/server/src/main/java/org/elasticsearch/common/network/NetworkUtils.java @@ -10,6 +10,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Constants; +import org.elasticsearch.core.Predicates; import java.io.IOException; import java.net.Inet4Address; @@ -188,7 +189,7 @@ static InetAddress[] getGlobalAddresses() throws IOException { /** Returns all addresses (any scope) for interfaces that are up. * This is only used to pick a publish address, when the user set network.host to a wildcard */ public static InetAddress[] getAllAddresses() throws IOException { - return filterAllAddresses(address -> true, "no up-and-running addresses found"); + return filterAllAddresses(Predicates.always(), "no up-and-running addresses found"); } static Optional maybeGetInterfaceByName(List networkInterfaces, String name) { diff --git a/server/src/main/java/org/elasticsearch/common/recycler/AbstractRecycler.java b/server/src/main/java/org/elasticsearch/common/recycler/AbstractRecycler.java index b38c7195f55a2..ee4b8b9f93160 100644 --- a/server/src/main/java/org/elasticsearch/common/recycler/AbstractRecycler.java +++ b/server/src/main/java/org/elasticsearch/common/recycler/AbstractRecycler.java @@ -16,4 +16,8 @@ protected AbstractRecycler(Recycler.C c) { this.c = c; } + @Override + public int pageSize() { + return c.pageSize(); + } } diff --git a/server/src/main/java/org/elasticsearch/common/recycler/Recycler.java b/server/src/main/java/org/elasticsearch/common/recycler/Recycler.java index 316666e4267ec..999ee2f6beaab 100644 --- a/server/src/main/java/org/elasticsearch/common/recycler/Recycler.java +++ b/server/src/main/java/org/elasticsearch/common/recycler/Recycler.java @@ -30,6 +30,11 @@ interface C { /** Destroy the data. This operation allows the data structure to release any internal resources before GC. */ void destroy(T value); + + /** + * @return see {@link Recycler#pageSize()} + */ + int pageSize(); } interface V extends Releasable { @@ -44,4 +49,9 @@ interface V extends Releasable { V obtain(); + /** + * @return the page size of the recycled object if it is array backed. + */ + int pageSize(); + } diff --git a/server/src/main/java/org/elasticsearch/common/recycler/Recyclers.java b/server/src/main/java/org/elasticsearch/common/recycler/Recyclers.java index d4ddf73d4c389..200f8b055e51d 100644 --- a/server/src/main/java/org/elasticsearch/common/recycler/Recyclers.java +++ b/server/src/main/java/org/elasticsearch/common/recycler/Recyclers.java @@ -92,6 +92,10 @@ public boolean isRecycled() { }; } + @Override + public int pageSize() { + return getDelegate().pageSize(); + } }; } @@ -134,6 +138,10 @@ protected Recycler getDelegate() { return recyclers[slot()]; } + @Override + public int pageSize() { + return recyclers[slot()].pageSize(); + } }; } diff --git a/server/src/main/java/org/elasticsearch/common/regex/Regex.java b/server/src/main/java/org/elasticsearch/common/regex/Regex.java index 532fc2ae9a019..039f484f1ebca 100644 --- a/server/src/main/java/org/elasticsearch/common/regex/Regex.java +++ b/server/src/main/java/org/elasticsearch/common/regex/Regex.java @@ -14,6 +14,7 @@ import org.apache.lucene.util.automaton.CharacterRunAutomaton; import org.apache.lucene.util.automaton.Operations; import org.elasticsearch.common.Strings; +import org.elasticsearch.core.Predicates; import java.util.ArrayList; import java.util.Arrays; @@ -102,12 +103,12 @@ public static Automaton simpleMatchToAutomaton(String... patterns) { */ public static Predicate simpleMatcher(String... patterns) { if (patterns == null || patterns.length == 0) { - return str -> false; + return Predicates.never(); } boolean hasWildcard = false; for (String pattern : patterns) { if (isMatchAllPattern(pattern)) { - return str -> true; + return Predicates.always(); } if (isSimpleMatchPattern(pattern)) { hasWildcard = true; diff --git a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index 41dd840b0c0e7..452fc14025e2e 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -85,12 +85,14 @@ public final class IndexScopedSettings extends AbstractScopedSettings { SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING, SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING, SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING, + SearchSlowLog.INDEX_SEARCH_SLOWLOG_INCLUDE_USER_SETTING, IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING, IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING, IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING, IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING, IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING, IndexingSlowLog.INDEX_INDEXING_SLOWLOG_MAX_SOURCE_CHARS_TO_LOG_SETTING, + IndexingSlowLog.INDEX_INDEXING_SLOWLOG_INCLUDE_USER_SETTING, MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, MergePolicyConfig.INDEX_MERGE_POLICY_TYPE_SETTING, MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING, diff --git a/server/src/main/java/org/elasticsearch/common/util/BigArrays.java b/server/src/main/java/org/elasticsearch/common/util/BigArrays.java index edec336c2a028..d1367f41d9d87 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigArrays.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigArrays.java @@ -10,10 +10,12 @@ import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefIterator; import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.breaker.PreallocatedCircuitBreakerService; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.recycler.Recycler; import org.elasticsearch.core.Nullable; @@ -143,6 +145,27 @@ public void fill(long fromIndex, long toIndex, byte value) { Arrays.fill(array, (int) fromIndex, (int) toIndex, value); } + @Override + public BytesRefIterator iterator() { + return new BytesRefIterator() { + boolean visited = false; + + @Override + public BytesRef next() { + if (visited) { + return null; + } + visited = true; + return new BytesRef(array, 0, Math.toIntExact(size())); + } + }; + } + + @Override + public void fillWith(StreamInput in) throws IOException { + in.readBytes(array, 0, Math.toIntExact(size())); + } + @Override public boolean hasArray() { return true; diff --git a/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java b/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java index 2c623882afe14..c5a04e273e487 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java @@ -10,7 +10,9 @@ import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefIterator; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; @@ -18,6 +20,7 @@ import static org.elasticsearch.common.util.BigLongArray.writePages; import static org.elasticsearch.common.util.PageCacheRecycler.BYTE_PAGE_SIZE; +import static org.elasticsearch.common.util.PageCacheRecycler.PAGE_SIZE_IN_BYTES; /** * Byte array abstraction able to support more than 2B values. This implementation slices data into fixed-sized blocks of @@ -139,6 +142,30 @@ public byte[] array() { throw new UnsupportedOperationException(); } + @Override + public BytesRefIterator iterator() { + return new BytesRefIterator() { + int i = 0; + + @Override + public BytesRef next() { + if (i >= pages.length) { + return null; + } + int len = i == pages.length - 1 ? Math.toIntExact(size - (pages.length - 1L) * PAGE_SIZE_IN_BYTES) : PAGE_SIZE_IN_BYTES; + return new BytesRef(pages[i++], 0, len); + } + }; + } + + @Override + public void fillWith(StreamInput in) throws IOException { + for (int i = 0; i < pages.length - 1; i++) { + in.readBytes(pages[i], 0, PAGE_SIZE_IN_BYTES); + } + in.readBytes(pages[pages.length - 1], 0, Math.toIntExact(size - (pages.length - 1L) * PAGE_SIZE_IN_BYTES)); + } + @Override protected int numBytesPerElement() { return 1; diff --git a/server/src/main/java/org/elasticsearch/common/util/ByteArray.java b/server/src/main/java/org/elasticsearch/common/util/ByteArray.java index e3b51ee7d2e32..861aa4f9c7eea 100644 --- a/server/src/main/java/org/elasticsearch/common/util/ByteArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/ByteArray.java @@ -9,6 +9,7 @@ package org.elasticsearch.common.util; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefIterator; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Writeable; @@ -51,6 +52,17 @@ static ByteArray readFrom(StreamInput in) throws IOException { */ void fill(long fromIndex, long toIndex, byte value); + /** + * Fills this ByteArray with bytes from the given input stream + */ + void fillWith(StreamInput in) throws IOException; + + /** + * Returns a BytesRefIterator for this ByteArray. This method allows + * access to the internal pages of this reference without copying them. + */ + BytesRefIterator iterator(); + /** * Checks if this instance is backed by a single byte array analogous to {@link ByteBuffer#hasArray()}. */ diff --git a/server/src/main/java/org/elasticsearch/common/util/BytesRefArray.java b/server/src/main/java/org/elasticsearch/common/util/BytesRefArray.java index c78db448380b3..d8675135a8cfe 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BytesRefArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BytesRefArray.java @@ -10,6 +10,7 @@ import org.apache.lucene.util.Accountable; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefIterator; import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -64,10 +65,7 @@ public BytesRefArray(StreamInput in, BigArrays bigArrays) throws IOException { // bytes long sizeOfBytes = in.readVLong(); bytes = bigArrays.newByteArray(sizeOfBytes, false); - - for (long i = 0; i < sizeOfBytes; ++i) { - bytes.set(i, in.readByte()); - } + bytes.fillWith(in); success = true; } finally { @@ -149,11 +147,17 @@ public void writeTo(StreamOutput out) throws IOException { } // bytes might be overallocated, the last bucket of startOffsets contains the real size - long sizeOfBytes = startOffsets.get(size); + final long sizeOfBytes = startOffsets.get(size); out.writeVLong(sizeOfBytes); - for (long i = 0; i < sizeOfBytes; ++i) { - out.writeByte(bytes.get(i)); + final BytesRefIterator bytesIt = bytes.iterator(); + BytesRef bytesRef; + long remained = sizeOfBytes; + while (remained > 0 && (bytesRef = bytesIt.next()) != null) { + int length = Math.toIntExact(Math.min(remained, bytesRef.length)); + remained -= length; + out.writeBytes(bytesRef.bytes, bytesRef.offset, length); } + assert remained == 0 : remained; } @Override diff --git a/server/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java b/server/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java index a1430ae5ce784..49de2c822a99c 100644 --- a/server/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java +++ b/server/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java @@ -99,7 +99,7 @@ public PageCacheRecycler(Settings settings) { final int maxPageCount = (int) Math.min(Integer.MAX_VALUE, limit / PAGE_SIZE_IN_BYTES); final int maxBytePageCount = (int) (bytesWeight * maxPageCount / totalWeight); - bytePage = build(type, maxBytePageCount, allocatedProcessors, new AbstractRecyclerC() { + bytePage = build(type, maxBytePageCount, allocatedProcessors, new AbstractRecyclerC<>() { @Override public byte[] newInstance() { return new byte[BYTE_PAGE_SIZE]; @@ -109,10 +109,15 @@ public byte[] newInstance() { public void recycle(byte[] value) { // nothing to do } + + @Override + public int pageSize() { + return BYTE_PAGE_SIZE; + } }); final int maxObjectPageCount = (int) (objectsWeight * maxPageCount / totalWeight); - objectPage = build(type, maxObjectPageCount, allocatedProcessors, new AbstractRecyclerC() { + objectPage = build(type, maxObjectPageCount, allocatedProcessors, new AbstractRecyclerC<>() { @Override public Object[] newInstance() { return new Object[OBJECT_PAGE_SIZE]; @@ -122,6 +127,11 @@ public Object[] newInstance() { public void recycle(Object[] value) { Arrays.fill(value, null); // we need to remove the strong refs on the objects stored in the array } + + @Override + public int pageSize() { + return OBJECT_PAGE_SIZE; + } }); assert PAGE_SIZE_IN_BYTES * (maxBytePageCount + maxObjectPageCount) <= limit; diff --git a/server/src/main/java/org/elasticsearch/common/util/ReleasableByteArray.java b/server/src/main/java/org/elasticsearch/common/util/ReleasableByteArray.java index 0102195f4e809..abb13b5395333 100644 --- a/server/src/main/java/org/elasticsearch/common/util/ReleasableByteArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/ReleasableByteArray.java @@ -9,6 +9,7 @@ package org.elasticsearch.common.util; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefIterator; import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.ReleasableBytesReference; @@ -88,6 +89,17 @@ public byte[] array() { throw new UnsupportedOperationException(); } + @Override + public BytesRefIterator iterator() { + assert ref.hasReferences(); + return ref.iterator(); + } + + @Override + public void fillWith(StreamInput in) { + throw new UnsupportedOperationException("read-only ByteArray"); + } + @Override public long ramBytesUsed() { /* diff --git a/server/src/main/java/org/elasticsearch/env/BuildVersion.java b/server/src/main/java/org/elasticsearch/env/BuildVersion.java new file mode 100644 index 0000000000000..e19ad87932e7f --- /dev/null +++ b/server/src/main/java/org/elasticsearch/env/BuildVersion.java @@ -0,0 +1,122 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.env; + +import org.elasticsearch.Build; +import org.elasticsearch.Version; +import org.elasticsearch.internal.BuildExtension; +import org.elasticsearch.plugins.ExtensionLoader; + +import java.util.ServiceLoader; + +/** + * A version representing the code of Elasticsearch + * + *

    This class allows us to check whether an Elasticsearch release + * is "too old" or "too new," using an intentionally minimal API for + * comparisons. The static {@link #current()} method returns the current + * release version, and {@link #fromVersionId(int)} returns a version + * based on some identifier. By default, this identifier matches what the + * {@link Version} class uses, but the implementation is pluggable. + * If a module provides a {@link BuildExtension} service via Java SPI, this + * class's static methods will return a different implementation of {@link BuildVersion}, + * potentially with different behavior. This allows downstream projects to + * provide versions that accommodate different release models or versioning + * schemes.

    + */ +public abstract class BuildVersion { + + /** + * Check whether this version is on or after a minimum threshold. + * + *

    In some cases, the only thing we need to know about a version is whether + * it's compatible with the currently-running Elasticsearch. This method checks + * the lower bound, and returns false if the version is "too old."

    + * + *

    By default, the minimum compatible version is derived from {@code Version.CURRENT.minimumCompatibilityVersion()}, + * but this behavior is pluggable.

    + * @return True if this version is on or after the minimum compatible version + * for the currently running Elasticsearch, false otherwise. + */ + public abstract boolean onOrAfterMinimumCompatible(); + + /** + * Check whether this version comes from a release later than the + * currently running Elasticsearch. + * + *

    This is useful for checking whether a node would be downgraded.

    + * + * @return True if this version represents a release of Elasticsearch later + * than the one that's running. + */ + public abstract boolean isFutureVersion(); + + // temporary + // TODO[wrb]: remove from PersistedClusterStateService + // TODO[wrb]: remove from security bootstrap checks + @Deprecated + public Version toVersion() { + return null; + } + + /** + * Create a {@link BuildVersion} from a version ID number. + * + *

    By default, this identifier should match the integer ID of a {@link Version}; + * see that class for details on the default semantic versioning scheme. This behavior + * is, of course, pluggable.

    + * + * @param versionId An integer identifier for a version + * @return a version representing a build or release of Elasticsearch + */ + public static BuildVersion fromVersionId(int versionId) { + return CurrentExtensionHolder.BUILD_EXTENSION.fromVersionId(versionId); + } + + /** + * Get the current build version. + * + *

    By default, this value will be different for every public release of Elasticsearch, + * but downstream implementations aren't restricted by this condition.

    + * + * @return The BuildVersion for Elasticsearch + */ + public static BuildVersion current() { + return CurrentExtensionHolder.BUILD_EXTENSION.currentBuildVersion(); + } + + // only exists for NodeMetadata#toXContent + public abstract int id(); + + private static class CurrentExtensionHolder { + private static final BuildExtension BUILD_EXTENSION = findExtension(); + + private static BuildExtension findExtension() { + return ExtensionLoader.loadSingleton(ServiceLoader.load(BuildExtension.class)).orElse(new DefaultBuildExtension()); + } + } + + private static class DefaultBuildExtension implements BuildExtension { + @Override + public Build getCurrentBuild() { + return Build.current(); + } + + @Override + public BuildVersion currentBuildVersion() { + return DefaultBuildVersion.CURRENT; + } + + @Override + public BuildVersion fromVersionId(int versionId) { + return new DefaultBuildVersion(versionId); + } + } + +} diff --git a/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java b/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java new file mode 100644 index 0000000000000..8271b836269a7 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.env; + +import org.elasticsearch.Version; + +import java.util.Objects; + +/** + * A {@link BuildVersion} that uses the same identifiers and compatibility constraints + * as {@link Version}. + * + *

    This default implementation of BuildVersion delegates to the {@link Version} class. + * It's intended to let us check wither a version identifier is "too old" or "too new." + * "Too old" is determined by {@code Version.CURRENT.minimumCompatibilityVersion()}, + * and "too new" is anything that comes after {@code Version.CURRENT}. This lets us + * give users simple rules in terms of public-facing release versions for Elasticsearch + * compatibility when upgrading nodes and prevents downgrades in place.

    + */ +final class DefaultBuildVersion extends BuildVersion { + + public static BuildVersion CURRENT = new DefaultBuildVersion(Version.CURRENT.id()); + + private final int versionId; + private final Version version; + + DefaultBuildVersion(int versionId) { + assert versionId >= 0 : "Release version IDs must be non-negative integers"; + this.versionId = versionId; + this.version = Version.fromId(versionId); + } + + @Override + public boolean onOrAfterMinimumCompatible() { + return Version.CURRENT.minimumCompatibilityVersion().onOrBefore(version); + } + + @Override + public boolean isFutureVersion() { + return Version.CURRENT.before(version); + } + + @Override + public int id() { + return versionId; + } + + @Override + public Version toVersion() { + return version; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + DefaultBuildVersion that = (DefaultBuildVersion) o; + return versionId == that.versionId; + } + + @Override + public int hashCode() { + return Objects.hash(versionId); + } + + @Override + public String toString() { + return Version.fromId(versionId).toString(); + } +} diff --git a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 1d8a9ef1ce1c4..291e9697def4a 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -22,7 +22,6 @@ import org.apache.lucene.store.NativeFSLockFactory; import org.elasticsearch.Build; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; @@ -38,6 +37,7 @@ import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.core.CheckedRunnable; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.Predicates; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; @@ -627,7 +627,7 @@ private static NodeMetadata loadNodeMetadata(Settings settings, Logger logger, D assert nodeIds.isEmpty() : nodeIds; // If we couldn't find legacy metadata, we set the latest index version to this version. This happens // when we are starting a new node and there are no indices to worry about. - metadata = new NodeMetadata(generateNodeId(settings), Version.CURRENT, IndexVersion.current()); + metadata = new NodeMetadata(generateNodeId(settings), BuildVersion.current(), IndexVersion.current()); } else { assert nodeIds.equals(Collections.singleton(legacyMetadata.nodeId())) : nodeIds + " doesn't match " + legacyMetadata; metadata = legacyMetadata; @@ -635,7 +635,7 @@ private static NodeMetadata loadNodeMetadata(Settings settings, Logger logger, D } metadata = metadata.upgradeToCurrentVersion(); - assert metadata.nodeVersion().equals(Version.CURRENT) : metadata.nodeVersion() + " != " + Version.CURRENT; + assert metadata.nodeVersion().equals(BuildVersion.current()) : metadata.nodeVersion() + " != " + Build.current(); return metadata; } @@ -1119,7 +1119,7 @@ public Path[] availableShardPaths(ShardId shardId) { * Returns all folder names in ${data.paths}/indices folder */ public Set availableIndexFolders() throws IOException { - return availableIndexFolders(p -> false); + return availableIndexFolders(Predicates.never()); } /** @@ -1147,7 +1147,7 @@ public Set availableIndexFolders(Predicate excludeIndexPathIdsPr * @throws IOException if an I/O exception occurs traversing the filesystem */ public Set availableIndexFoldersForPath(final DataPath dataPath) throws IOException { - return availableIndexFoldersForPath(dataPath, p -> false); + return availableIndexFoldersForPath(dataPath, Predicates.never()); } /** diff --git a/server/src/main/java/org/elasticsearch/env/NodeMetadata.java b/server/src/main/java/org/elasticsearch/env/NodeMetadata.java index 2122e5fcc8b6c..8d8505f0147bc 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeMetadata.java +++ b/server/src/main/java/org/elasticsearch/env/NodeMetadata.java @@ -9,7 +9,6 @@ package org.elasticsearch.env; import org.elasticsearch.Build; -import org.elasticsearch.Version; import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.gateway.MetadataStateFormat; import org.elasticsearch.index.IndexVersion; @@ -36,26 +35,27 @@ public final class NodeMetadata { private final String nodeId; - private final Version nodeVersion; + private final BuildVersion nodeVersion; - private final Version previousNodeVersion; + private final BuildVersion previousNodeVersion; private final IndexVersion oldestIndexVersion; + @UpdateForV9 // version should be non-null in the node metadata from v9 onwards private NodeMetadata( final String nodeId, - final Version nodeVersion, - final Version previousNodeVersion, + final BuildVersion buildVersion, + final BuildVersion previousBuildVersion, final IndexVersion oldestIndexVersion ) { this.nodeId = Objects.requireNonNull(nodeId); - this.nodeVersion = Objects.requireNonNull(nodeVersion); - this.previousNodeVersion = Objects.requireNonNull(previousNodeVersion); + this.nodeVersion = Objects.requireNonNull(buildVersion); + this.previousNodeVersion = Objects.requireNonNull(previousBuildVersion); this.oldestIndexVersion = Objects.requireNonNull(oldestIndexVersion); } - public NodeMetadata(final String nodeId, final Version nodeVersion, final IndexVersion oldestIndexVersion) { - this(nodeId, nodeVersion, nodeVersion, oldestIndexVersion); + public NodeMetadata(final String nodeId, final BuildVersion buildVersion, final IndexVersion oldestIndexVersion) { + this(nodeId, buildVersion, buildVersion, oldestIndexVersion); } @Override @@ -93,7 +93,7 @@ public String nodeId() { return nodeId; } - public Version nodeVersion() { + public BuildVersion nodeVersion() { return nodeVersion; } @@ -103,7 +103,7 @@ public Version nodeVersion() { * the current version of the node ({@link NodeMetadata#upgradeToCurrentVersion()} before storing the node metadata again on disk. * In doing so, {@code previousNodeVersion} refers to the previously last known version that this node was started on. */ - public Version previousNodeVersion() { + public BuildVersion previousNodeVersion() { return previousNodeVersion; } @@ -111,11 +111,12 @@ public IndexVersion oldestIndexVersion() { return oldestIndexVersion; } + @UpdateForV9 public void verifyUpgradeToCurrentVersion() { - assert (nodeVersion.equals(Version.V_EMPTY) == false) || (Version.CURRENT.major <= Version.V_7_0_0.major + 1) - : "version is required in the node metadata from v9 onwards"; + // Enable the following assertion for V9: + // assert (nodeVersion.equals(BuildVersion.empty()) == false) : "version is required in the node metadata from v9 onwards"; - if (nodeVersion.before(Version.CURRENT.minimumCompatibilityVersion())) { + if (nodeVersion.onOrAfterMinimumCompatible() == false) { throw new IllegalStateException( "cannot upgrade a node from version [" + nodeVersion @@ -128,7 +129,7 @@ public void verifyUpgradeToCurrentVersion() { ); } - if (nodeVersion.after(Version.CURRENT)) { + if (nodeVersion.isFutureVersion()) { throw new IllegalStateException( "cannot downgrade a node from version [" + nodeVersion + "] to version [" + Build.current().version() + "]" ); @@ -138,13 +139,15 @@ public void verifyUpgradeToCurrentVersion() { public NodeMetadata upgradeToCurrentVersion() { verifyUpgradeToCurrentVersion(); - return nodeVersion.equals(Version.CURRENT) ? this : new NodeMetadata(nodeId, Version.CURRENT, nodeVersion, oldestIndexVersion); + return nodeVersion.equals(BuildVersion.current()) + ? this + : new NodeMetadata(nodeId, BuildVersion.current(), nodeVersion, oldestIndexVersion); } private static class Builder { String nodeId; - Version nodeVersion; - Version previousNodeVersion; + BuildVersion nodeVersion; + BuildVersion previousNodeVersion; IndexVersion oldestIndexVersion; public void setNodeId(String nodeId) { @@ -152,22 +155,20 @@ public void setNodeId(String nodeId) { } public void setNodeVersionId(int nodeVersionId) { - this.nodeVersion = Version.fromId(nodeVersionId); + this.nodeVersion = BuildVersion.fromVersionId(nodeVersionId); } public void setOldestIndexVersion(int oldestIndexVersion) { this.oldestIndexVersion = IndexVersion.fromId(oldestIndexVersion); } - private Version getVersionOrFallbackToEmpty() { - return Objects.requireNonNullElse(this.nodeVersion, Version.V_EMPTY); - } - + @UpdateForV9 // version is required in the node metadata from v9 onwards public NodeMetadata build() { - @UpdateForV9 // version is required in the node metadata from v9 onwards - final Version nodeVersion = getVersionOrFallbackToEmpty(); final IndexVersion oldestIndexVersion; + if (this.nodeVersion == null) { + nodeVersion = BuildVersion.fromVersionId(0); + } if (this.previousNodeVersion == null) { previousNodeVersion = nodeVersion; } @@ -207,7 +208,7 @@ protected XContentBuilder newXContentBuilder(XContentType type, OutputStream str @Override public void toXContent(XContentBuilder builder, NodeMetadata nodeMetadata) throws IOException { builder.field(NODE_ID_KEY, nodeMetadata.nodeId); - builder.field(NODE_VERSION_KEY, nodeMetadata.nodeVersion.id); + builder.field(NODE_VERSION_KEY, nodeMetadata.nodeVersion.id()); builder.field(OLDEST_INDEX_VERSION_KEY, nodeMetadata.oldestIndexVersion.id()); } diff --git a/server/src/main/java/org/elasticsearch/env/OverrideNodeVersionCommand.java b/server/src/main/java/org/elasticsearch/env/OverrideNodeVersionCommand.java index 14ae6cd5e26a0..c7dd913174782 100644 --- a/server/src/main/java/org/elasticsearch/env/OverrideNodeVersionCommand.java +++ b/server/src/main/java/org/elasticsearch/env/OverrideNodeVersionCommand.java @@ -82,7 +82,7 @@ protected void processDataPaths(Terminal terminal, Path[] paths, OptionSet optio confirm( terminal, - (nodeMetadata.nodeVersion().before(Version.CURRENT) ? TOO_OLD_MESSAGE : TOO_NEW_MESSAGE).replace( + (nodeMetadata.nodeVersion().onOrAfterMinimumCompatible() == false ? TOO_OLD_MESSAGE : TOO_NEW_MESSAGE).replace( "V_OLD", nodeMetadata.nodeVersion().toString() ).replace("V_NEW", nodeMetadata.nodeVersion().toString()).replace("V_CUR", Version.CURRENT.toString()) diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index e7b8eadb3f771..50fae2a1dda03 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -14,7 +14,6 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.coordination.CoordinationMetadata; @@ -35,6 +34,7 @@ import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Tuple; import org.elasticsearch.core.UpdateForV9; +import org.elasticsearch.env.BuildVersion; import org.elasticsearch.env.NodeMetadata; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.node.Node; @@ -222,7 +222,11 @@ private PersistedState createOnDiskPersistedState( } // write legacy node metadata to prevent accidental downgrades from spawning empty cluster state NodeMetadata.FORMAT.writeAndCleanup( - new NodeMetadata(persistedClusterStateService.getNodeId(), Version.CURRENT, clusterState.metadata().oldestIndexVersion()), + new NodeMetadata( + persistedClusterStateService.getNodeId(), + BuildVersion.current(), + clusterState.metadata().oldestIndexVersion() + ), persistedClusterStateService.getDataPaths() ); success = true; @@ -260,7 +264,11 @@ private PersistedState createInMemoryPersistedState( metaStateService.deleteAll(); // write legacy node metadata to prevent downgrades from spawning empty cluster state NodeMetadata.FORMAT.writeAndCleanup( - new NodeMetadata(persistedClusterStateService.getNodeId(), Version.CURRENT, clusterState.metadata().oldestIndexVersion()), + new NodeMetadata( + persistedClusterStateService.getNodeId(), + BuildVersion.current(), + clusterState.metadata().oldestIndexVersion() + ), persistedClusterStateService.getDataPaths() ); } diff --git a/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java b/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java index b86cfa6fdb7af..49ac38d656278 100644 --- a/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java @@ -69,6 +69,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.env.BuildVersion; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.NodeMetadata; import org.elasticsearch.index.IndexVersion; @@ -377,7 +378,8 @@ public static NodeMetadata nodeMetadata(Path... dataPaths) throws IOException { if (nodeId == null) { return null; } - return new NodeMetadata(nodeId, version, oldestIndexVersion); + // TODO: remove use of Version here (ES-7343) + return new NodeMetadata(nodeId, BuildVersion.fromVersionId(version.id()), oldestIndexVersion); } /** diff --git a/server/src/main/java/org/elasticsearch/health/HealthFeatures.java b/server/src/main/java/org/elasticsearch/health/HealthFeatures.java index 3a5d11f862efc..4b3bcf7e7278f 100644 --- a/server/src/main/java/org/elasticsearch/health/HealthFeatures.java +++ b/server/src/main/java/org/elasticsearch/health/HealthFeatures.java @@ -13,13 +13,21 @@ import org.elasticsearch.features.NodeFeature; import java.util.Map; +import java.util.Set; public class HealthFeatures implements FeatureSpecification { public static final NodeFeature SUPPORTS_HEALTH = new NodeFeature("health.supports_health"); + public static final NodeFeature SUPPORTS_SHARDS_CAPACITY_INDICATOR = new NodeFeature("health.shards_capacity_indicator"); + public static final NodeFeature SUPPORTS_EXTENDED_REPOSITORY_INDICATOR = new NodeFeature("health.extended_repository_indicator"); + + @Override + public Set getFeatures() { + return Set.of(SUPPORTS_EXTENDED_REPOSITORY_INDICATOR); + } @Override public Map getHistoricalFeatures() { - return Map.of(SUPPORTS_HEALTH, Version.V_8_5_0); + return Map.of(SUPPORTS_HEALTH, Version.V_8_5_0, SUPPORTS_SHARDS_CAPACITY_INDICATOR, Version.V_8_8_0); } } diff --git a/server/src/main/java/org/elasticsearch/health/node/DiskHealthIndicatorService.java b/server/src/main/java/org/elasticsearch/health/node/DiskHealthIndicatorService.java index 2805aa88a7e54..3304b71b4ca31 100644 --- a/server/src/main/java/org/elasticsearch/health/node/DiskHealthIndicatorService.java +++ b/server/src/main/java/org/elasticsearch/health/node/DiskHealthIndicatorService.java @@ -17,7 +17,9 @@ import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.health.Diagnosis; +import org.elasticsearch.health.HealthFeatures; import org.elasticsearch.health.HealthIndicatorDetails; import org.elasticsearch.health.HealthIndicatorImpact; import org.elasticsearch.health.HealthIndicatorResult; @@ -71,9 +73,11 @@ public class DiskHealthIndicatorService implements HealthIndicatorService { private static final String IMPACT_CLUSTER_FUNCTIONALITY_UNAVAILABLE_ID = "cluster_functionality_unavailable"; private final ClusterService clusterService; + private final FeatureService featureService; - public DiskHealthIndicatorService(ClusterService clusterService) { + public DiskHealthIndicatorService(ClusterService clusterService, FeatureService featureService) { this.clusterService = clusterService; + this.featureService = featureService; } @Override @@ -83,8 +87,18 @@ public String name() { @Override public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResourcesCount, HealthInfo healthInfo) { + ClusterState clusterState = clusterService.state(); Map diskHealthInfoMap = healthInfo.diskInfoByNode(); if (diskHealthInfoMap == null || diskHealthInfoMap.isEmpty()) { + if (featureService.clusterHasFeature(clusterState, HealthFeatures.SUPPORTS_HEALTH) == false) { + return createIndicator( + HealthStatus.GREEN, + "No disk usage data available. The cluster currently has mixed versions (an upgrade may be in progress).", + HealthIndicatorDetails.EMPTY, + List.of(), + List.of() + ); + } /* * If there is no disk health info, that either means that a new health node was just elected, or something is seriously * wrong with health data collection on the health node. Either way, we immediately return UNKNOWN. If there are at least @@ -98,7 +112,6 @@ public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResources Collections.emptyList() ); } - ClusterState clusterState = clusterService.state(); logNodesMissingHealthInfo(diskHealthInfoMap, clusterState); DiskHealthAnalyzer diskHealthAnalyzer = new DiskHealthAnalyzer(diskHealthInfoMap, clusterState); diff --git a/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java b/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java index d5d336b88b8ad..5ff147a11a06a 100644 --- a/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java +++ b/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java @@ -37,6 +37,7 @@ import java.util.List; import java.util.Objects; import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.core.Strings.format; @@ -77,6 +78,10 @@ public class LocalHealthMonitor implements ClusterStateListener { // Using a volatile reference to ensure that there is a single instance of monitoring running at all times. // No need for extra synchronization because all the writes are executed on the cluster applier thread. private volatile Monitoring monitoring; + // This variable keeps track of whether there's an in-flight request. We keep this variable here rather than the Monitoring class, + // as we'll create new instances of that class when we're (re)starting this local health monitoring process. + // This variable allows us to ensure that there's always, at most, 1 request in-flight, at any given moment. + private final AtomicBoolean inFlightRequest = new AtomicBoolean(false); private LocalHealthMonitor( Settings settings, @@ -152,7 +157,9 @@ private void stopMonitoring() { private void startMonitoringIfNecessary() { if (prerequisitesFulfilled && enabled) { if (isMonitorRunning() == false) { - monitoring = Monitoring.start(monitorInterval, threadPool, lastSeenHealthNode, healthTrackers, clusterService, client); + // First create the Monitoring instance, so we always have something to cancel. + monitoring = new Monitoring(monitorInterval, threadPool, healthTrackers, clusterService, client, inFlightRequest); + monitoring.start(); logger.debug("Local health monitoring started {}", monitoring); } else { logger.trace("Local health monitoring already started {}, skipping", monitoring); @@ -175,8 +182,6 @@ public void clusterChanged(ClusterChangedEvent event) { // On health node or on master node changes, the health node might be reset so the reported // health info gets reset to null, to ensure it will be resent. lastSeenHealthNode.set(currentHealthNode == null ? null : currentHealthNode.getId()); - // Reset the reference of each HealthTracker. - healthTrackers.forEach(HealthTracker::reset); if (logger.isDebugEnabled()) { String reason; if (healthNodeChanged && masterNodeChanged) { @@ -200,6 +205,11 @@ public void clusterChanged(ClusterChangedEvent event) { && currentMasterNode != null; if (prerequisitesFulfilled == false || healthNodeChanged || masterNodeChanged) { stopMonitoring(); + // Reset the reference of each HealthTracker. + // By doing this after `stopMonitoring()`, we're sure the `Monitoring` instance has been cancelled and therefore won't + // touch the `lastReportedValue` of the health trackers after we've reset them (only the new `Monitoring` instance will + // be able to update them). + healthTrackers.forEach(HealthTracker::reset); } if (prerequisitesFulfilled) { startMonitoringIfNecessary(); @@ -227,61 +237,49 @@ private boolean hasHealthNodeChanged(DiscoveryNode currentHealthNode, ClusterCha * This class is responsible for running the health monitoring. It evaluates and checks the health info of this node * in the configured intervals. The first run happens upon initialization. If there is an exception, it will log it * and continue to schedule the next run. + * Usually, there will only be one instance of this class alive. However, when we're restarting + * the monitoring process (e.g. due to a health node change, see {@link LocalHealthMonitor#clusterChanged}), there will likely (shortly) + * be two instances alive at the same time. To avoid any concurrency issues, we're ensuring that there's always only one in-flight + * request and if a {@link Monitoring} instance is cancelled while a request is in-flight, we'll prevent it from updating the state + * of the {@link HealthTracker}s (and it'll be up to the next/new {@link Monitoring} instance to send a new request and update the + * {@link HealthTracker}s' state). */ static class Monitoring implements Runnable, Scheduler.Cancellable { private final TimeValue interval; private final Executor executor; - private final Scheduler scheduler; + private final ThreadPool threadPool; private final ClusterService clusterService; private final Client client; - private final AtomicReference lastSeenHealthNode; private final List> healthTrackers; + private final AtomicBoolean inFlightRequest; private volatile boolean cancelled = false; private volatile Scheduler.ScheduledCancellable scheduledRun; private Monitoring( TimeValue interval, - Scheduler scheduler, - Executor executor, - AtomicReference lastSeenHealthNode, + ThreadPool threadPool, List> healthTrackers, ClusterService clusterService, - Client client + Client client, + AtomicBoolean inFlightRequest ) { this.interval = interval; - this.executor = executor; - this.scheduler = scheduler; - this.lastSeenHealthNode = lastSeenHealthNode; + this.threadPool = threadPool; + this.executor = threadPool.executor(ThreadPool.Names.MANAGEMENT); this.clusterService = clusterService; this.healthTrackers = healthTrackers; this.client = client; + this.inFlightRequest = inFlightRequest; } /** - * Creates a monitoring instance and starts the schedules the first run. + * Schedule the first run of the monitor. */ - static Monitoring start( - TimeValue interval, - ThreadPool threadPool, - AtomicReference lastSeenHealthNode, - List> healthTrackers, - ClusterService clusterService, - Client client - ) { - Monitoring monitoring = new Monitoring( - interval, - threadPool, - threadPool.executor(ThreadPool.Names.MANAGEMENT), - lastSeenHealthNode, - healthTrackers, - clusterService, - client - ); - monitoring.scheduledRun = threadPool.schedule(monitoring, TimeValue.ZERO, monitoring.executor); - return monitoring; + public void start() { + scheduledRun = threadPool.schedule(this, TimeValue.ZERO, executor); } /** @@ -301,7 +299,13 @@ public boolean cancel() { return false; } cancelled = true; - scheduledRun.cancel(); + var scheduledRun = this.scheduledRun; + // There is a chance this Monitoring instance gets cancelled before the `scheduledRun` field is assigned. + // However, this is not a problem as the most important thing is the `cancelled` field being set to false in this class, + // as that field actually prevents any updates to the HealthTrackers' states. + if (scheduledRun != null) { + scheduledRun.cancel(); + } return true; } @@ -318,8 +322,18 @@ public void run() { if (cancelled) { return; } + // Before we do anything, we're first going to make sure there is no in-flight request at this moment. + // If that's the case, we'll acquire the "lock", which prevents any other thread/instance from sending any requests. + if (inFlightRequest.compareAndSet(false, true) == false) { + logger.debug("Not allowed to send health info update request due to in-flight request, will try again."); + scheduleNextRunIfNecessary(); + return; + } boolean nextRunScheduled = false; - Runnable scheduleNextRun = new RunOnce(this::scheduleNextRunIfNecessary); + Runnable releaseAndScheduleNextRun = new RunOnce(() -> { + inFlightRequest.set(false); + scheduleNextRunIfNecessary(); + }); try { List> healthProgresses = getHealthProgresses(); if (healthProgresses.isEmpty()) { @@ -330,13 +344,13 @@ public void run() { var builder = new UpdateHealthInfoCacheAction.Request.Builder().nodeId(clusterService.localNode().getId()); healthProgresses.forEach(changedHealthInfo -> changedHealthInfo.updateRequestBuilder(builder)); - var healthNodeId = lastSeenHealthNode.get(); var listener = ActionListener.wrap(response -> { - // Don't update the latest health info if the health node has changed while this request was being processed. - if (Objects.equals(healthNodeId, lastSeenHealthNode.get()) == false) { - return; + // Only record health progress if this monitoring instance hasn't been cancelled in the meantime. + // This avoids any unwanted writes to the HealthTrackers' states after a new monitoring instance has possibly + // already started. + if (cancelled == false) { + healthProgresses.forEach(HealthTracker.HealthProgress::recordProgressIfRelevant); } - healthProgresses.forEach(HealthTracker.HealthProgress::recordProgressIfRelevant); }, e -> { if (e.getCause() instanceof NodeNotConnectedException || e.getCause() instanceof HealthNodeNotDiscoveredException) { logger.debug("Failed to connect to the health node [{}], will try again.", e.getCause().getMessage()); @@ -344,14 +358,18 @@ public void run() { logger.debug(() -> format("Failed to send health info to health node, will try again."), e); } }); - client.execute(UpdateHealthInfoCacheAction.INSTANCE, builder.build(), ActionListener.runAfter(listener, scheduleNextRun)); + client.execute( + UpdateHealthInfoCacheAction.INSTANCE, + builder.build(), + ActionListener.runAfter(listener, releaseAndScheduleNextRun) + ); nextRunScheduled = true; } catch (Exception e) { logger.warn(() -> format("Failed to run scheduled health monitoring on thread pool [%s]", executor), e); } finally { // If the next run isn't scheduled because for example the health info hasn't changed, we schedule it here. if (nextRunScheduled == false) { - scheduleNextRun.run(); + releaseAndScheduleNextRun.run(); } } } @@ -379,7 +397,7 @@ private void scheduleNextRunIfNecessary() { return; } try { - scheduledRun = scheduler.schedule(this, interval, executor); + scheduledRun = threadPool.schedule(this, interval, executor); } catch (final EsRejectedExecutionException e) { logger.debug(() -> format("Scheduled health monitoring was rejected on thread pool [%s]", executor), e); } diff --git a/server/src/main/java/org/elasticsearch/health/node/ShardsCapacityHealthIndicatorService.java b/server/src/main/java/org/elasticsearch/health/node/ShardsCapacityHealthIndicatorService.java index 1852e504b61db..16e18b69d5c1d 100644 --- a/server/src/main/java/org/elasticsearch/health/node/ShardsCapacityHealthIndicatorService.java +++ b/server/src/main/java/org/elasticsearch/health/node/ShardsCapacityHealthIndicatorService.java @@ -12,7 +12,9 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.health.Diagnosis; +import org.elasticsearch.health.HealthFeatures; import org.elasticsearch.health.HealthIndicatorDetails; import org.elasticsearch.health.HealthIndicatorImpact; import org.elasticsearch.health.HealthIndicatorResult; @@ -90,9 +92,11 @@ public class ShardsCapacityHealthIndicatorService implements HealthIndicatorServ ); private final ClusterService clusterService; + private final FeatureService featureService; - public ShardsCapacityHealthIndicatorService(ClusterService clusterService) { + public ShardsCapacityHealthIndicatorService(ClusterService clusterService, FeatureService featureService) { this.clusterService = clusterService; + this.featureService = featureService; } @Override @@ -105,6 +109,15 @@ public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResources var state = clusterService.state(); var healthMetadata = HealthMetadata.getFromClusterState(state); if (healthMetadata == null || healthMetadata.getShardLimitsMetadata() == null) { + if (featureService.clusterHasFeature(state, HealthFeatures.SUPPORTS_SHARDS_CAPACITY_INDICATOR) == false) { + return createIndicator( + HealthStatus.GREEN, + "No shard limits configured yet. The cluster currently has mixed versions (an upgrade may be in progress).", + HealthIndicatorDetails.EMPTY, + List.of(), + List.of() + ); + } return unknownIndicator(); } diff --git a/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutor.java b/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutor.java index f183d8c7f1a82..cc908cd7cad2c 100644 --- a/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutor.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.features.FeatureService; import org.elasticsearch.health.HealthFeatures; +import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.persistent.PersistentTaskParams; import org.elasticsearch.persistent.PersistentTaskState; @@ -164,7 +165,12 @@ void startTask(ClusterChangedEvent event) { TASK_NAME, TASK_NAME, new HealthNodeTaskParams(), + null, ActionListener.wrap(r -> logger.debug("Created the health node task"), e -> { + if (e instanceof NodeClosedException) { + logger.debug("Failed to create health node task because node is shutting down", e); + return; + } Throwable t = e instanceof RemoteTransportException ? e.getCause() : e; if (t instanceof ResourceAlreadyExistsException == false) { logger.error("Failed to create the health node task", e); diff --git a/server/src/main/java/org/elasticsearch/index/IndexModule.java b/server/src/main/java/org/elasticsearch/index/IndexModule.java index b768c2f5a7d28..06a5e13a208be 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexModule.java +++ b/server/src/main/java/org/elasticsearch/index/IndexModule.java @@ -194,13 +194,14 @@ public IndexModule( final Map directoryFactories, final BooleanSupplier allowExpensiveQueries, final IndexNameExpressionResolver expressionResolver, - final Map recoveryStateFactories + final Map recoveryStateFactories, + final SlowLogFieldProvider slowLogFieldProvider ) { this.indexSettings = indexSettings; this.analysisRegistry = analysisRegistry; this.engineFactory = Objects.requireNonNull(engineFactory); - this.searchOperationListeners.add(new SearchSlowLog(indexSettings)); - this.indexOperationListeners.add(new IndexingSlowLog(indexSettings)); + this.searchOperationListeners.add(new SearchSlowLog(indexSettings, slowLogFieldProvider)); + this.indexOperationListeners.add(new IndexingSlowLog(indexSettings, slowLogFieldProvider)); this.directoryFactories = Collections.unmodifiableMap(directoryFactories); this.allowExpensiveQueries = allowExpensiveQueries; this.expressionResolver = expressionResolver; diff --git a/server/src/main/java/org/elasticsearch/index/IndexVersions.java b/server/src/main/java/org/elasticsearch/index/IndexVersions.java index ae6185cdcc6b6..0ddcef2ac3a08 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexVersions.java +++ b/server/src/main/java/org/elasticsearch/index/IndexVersions.java @@ -101,6 +101,7 @@ private static IndexVersion def(int id, Version luceneVersion) { public static final IndexVersion NEW_INDEXVERSION_FORMAT = def(8_501_00_0, Version.LUCENE_9_9_1); public static final IndexVersion UPGRADE_LUCENE_9_9_2 = def(8_502_00_0, Version.LUCENE_9_9_2); public static final IndexVersion TIME_SERIES_ID_HASHING = def(8_502_00_1, Version.LUCENE_9_9_2); + public static final IndexVersion UPGRADE_TO_LUCENE_9_10 = def(8_503_00_0, Version.LUCENE_9_10_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java b/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java index 7b0a46f022dad..14c2c5440bd24 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java +++ b/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java @@ -69,6 +69,13 @@ public final class IndexingSlowLog implements IndexingOperationListener { Property.IndexScope ); + public static final Setting INDEX_INDEXING_SLOWLOG_INCLUDE_USER_SETTING = Setting.boolSetting( + INDEX_INDEXING_SLOWLOG_PREFIX + ".include.user", + false, + Property.Dynamic, + Property.IndexScope + ); + /** * Legacy index setting, kept for 7.x BWC compatibility. This setting has no effect in 8.x. Do not use. * TODO: Remove in 9.0 @@ -96,6 +103,7 @@ public final class IndexingSlowLog implements IndexingOperationListener { * characters of the source. */ private int maxSourceCharsToLog; + private final SlowLogFieldProvider slowLogFieldProvider; /** * Reads how much of the source to log. The user can specify any value they @@ -117,7 +125,8 @@ public final class IndexingSlowLog implements IndexingOperationListener { Property.IndexScope ); - IndexingSlowLog(IndexSettings indexSettings) { + IndexingSlowLog(IndexSettings indexSettings, SlowLogFieldProvider slowLogFieldProvider) { + this.slowLogFieldProvider = slowLogFieldProvider; this.indexLogger = LogManager.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".index"); Loggers.setLevel(this.indexLogger, Level.TRACE); this.index = indexSettings.getIndex(); @@ -171,22 +180,66 @@ public void postIndex(ShardId shardId, Engine.Index indexOperation, Engine.Index final ParsedDocument doc = indexOperation.parsedDoc(); final long tookInNanos = result.getTook(); if (indexWarnThreshold >= 0 && tookInNanos > indexWarnThreshold) { - indexLogger.warn(IndexingSlowLogMessage.of(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); + indexLogger.warn( + IndexingSlowLogMessage.of( + this.slowLogFieldProvider.indexSlowLogFields(), + index, + doc, + tookInNanos, + reformat, + maxSourceCharsToLog + ) + ); } else if (indexInfoThreshold >= 0 && tookInNanos > indexInfoThreshold) { - indexLogger.info(IndexingSlowLogMessage.of(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); + indexLogger.info( + IndexingSlowLogMessage.of( + this.slowLogFieldProvider.indexSlowLogFields(), + index, + doc, + tookInNanos, + reformat, + maxSourceCharsToLog + ) + ); } else if (indexDebugThreshold >= 0 && tookInNanos > indexDebugThreshold) { - indexLogger.debug(IndexingSlowLogMessage.of(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); + indexLogger.debug( + IndexingSlowLogMessage.of( + this.slowLogFieldProvider.indexSlowLogFields(), + index, + doc, + tookInNanos, + reformat, + maxSourceCharsToLog + ) + ); } else if (indexTraceThreshold >= 0 && tookInNanos > indexTraceThreshold) { - indexLogger.trace(IndexingSlowLogMessage.of(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); + indexLogger.trace( + IndexingSlowLogMessage.of( + this.slowLogFieldProvider.indexSlowLogFields(), + index, + doc, + tookInNanos, + reformat, + maxSourceCharsToLog + ) + ); } } } static final class IndexingSlowLogMessage { - public static ESLogMessage of(Index index, ParsedDocument doc, long tookInNanos, boolean reformat, int maxSourceCharsToLog) { + public static ESLogMessage of( + Map additionalFields, + Index index, + ParsedDocument doc, + long tookInNanos, + boolean reformat, + int maxSourceCharsToLog + ) { Map jsonFields = prepareMap(index, doc, tookInNanos, reformat, maxSourceCharsToLog); + jsonFields.putAll(additionalFields); return new ESLogMessage().withFields(jsonFields); } diff --git a/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java b/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java index c477f5e4978d5..eb227e6e1136d 100644 --- a/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java +++ b/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java @@ -44,7 +44,16 @@ public final class SearchSlowLog implements SearchOperationListener { private final Logger queryLogger; private final Logger fetchLogger; + private final SlowLogFieldProvider slowLogFieldProvider; + static final String INDEX_SEARCH_SLOWLOG_PREFIX = "index.search.slowlog"; + + public static final Setting INDEX_SEARCH_SLOWLOG_INCLUDE_USER_SETTING = Setting.boolSetting( + INDEX_SEARCH_SLOWLOG_PREFIX + ".include.user", + false, + Property.Dynamic, + Property.IndexScope + ); public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING = Setting.timeSetting( INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.warn", TimeValue.timeValueNanos(-1), @@ -118,7 +127,10 @@ public final class SearchSlowLog implements SearchOperationListener { private static final ToXContent.Params FORMAT_PARAMS = new ToXContent.MapParams(Collections.singletonMap("pretty", "false")); - public SearchSlowLog(IndexSettings indexSettings) { + public SearchSlowLog(IndexSettings indexSettings, SlowLogFieldProvider slowLogFieldProvider) { + slowLogFieldProvider.init(indexSettings); + this.slowLogFieldProvider = slowLogFieldProvider; + this.queryLogger = LogManager.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".query"); this.fetchLogger = LogManager.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".fetch"); Loggers.setLevel(this.fetchLogger, Level.TRACE); @@ -154,33 +166,34 @@ public SearchSlowLog(IndexSettings indexSettings) { @Override public void onQueryPhase(SearchContext context, long tookInNanos) { if (queryWarnThreshold >= 0 && tookInNanos > queryWarnThreshold) { - queryLogger.warn(SearchSlowLogMessage.of(context, tookInNanos)); + queryLogger.warn(SearchSlowLogMessage.of(this.slowLogFieldProvider.searchSlowLogFields(), context, tookInNanos)); } else if (queryInfoThreshold >= 0 && tookInNanos > queryInfoThreshold) { - queryLogger.info(SearchSlowLogMessage.of(context, tookInNanos)); + queryLogger.info(SearchSlowLogMessage.of(this.slowLogFieldProvider.searchSlowLogFields(), context, tookInNanos)); } else if (queryDebugThreshold >= 0 && tookInNanos > queryDebugThreshold) { - queryLogger.debug(SearchSlowLogMessage.of(context, tookInNanos)); + queryLogger.debug(SearchSlowLogMessage.of(this.slowLogFieldProvider.searchSlowLogFields(), context, tookInNanos)); } else if (queryTraceThreshold >= 0 && tookInNanos > queryTraceThreshold) { - queryLogger.trace(SearchSlowLogMessage.of(context, tookInNanos)); + queryLogger.trace(SearchSlowLogMessage.of(this.slowLogFieldProvider.searchSlowLogFields(), context, tookInNanos)); } } @Override public void onFetchPhase(SearchContext context, long tookInNanos) { if (fetchWarnThreshold >= 0 && tookInNanos > fetchWarnThreshold) { - fetchLogger.warn(SearchSlowLogMessage.of(context, tookInNanos)); + fetchLogger.warn(SearchSlowLogMessage.of(this.slowLogFieldProvider.searchSlowLogFields(), context, tookInNanos)); } else if (fetchInfoThreshold >= 0 && tookInNanos > fetchInfoThreshold) { - fetchLogger.info(SearchSlowLogMessage.of(context, tookInNanos)); + fetchLogger.info(SearchSlowLogMessage.of(this.slowLogFieldProvider.searchSlowLogFields(), context, tookInNanos)); } else if (fetchDebugThreshold >= 0 && tookInNanos > fetchDebugThreshold) { - fetchLogger.debug(SearchSlowLogMessage.of(context, tookInNanos)); + fetchLogger.debug(SearchSlowLogMessage.of(this.slowLogFieldProvider.searchSlowLogFields(), context, tookInNanos)); } else if (fetchTraceThreshold >= 0 && tookInNanos > fetchTraceThreshold) { - fetchLogger.trace(SearchSlowLogMessage.of(context, tookInNanos)); + fetchLogger.trace(SearchSlowLogMessage.of(this.slowLogFieldProvider.searchSlowLogFields(), context, tookInNanos)); } } static final class SearchSlowLogMessage { - public static ESLogMessage of(SearchContext context, long tookInNanos) { + public static ESLogMessage of(Map additionalFields, SearchContext context, long tookInNanos) { Map jsonFields = prepareMap(context, tookInNanos); + jsonFields.putAll(additionalFields); return new ESLogMessage().withFields(jsonFields); } diff --git a/server/src/main/java/org/elasticsearch/index/SlowLogFieldProvider.java b/server/src/main/java/org/elasticsearch/index/SlowLogFieldProvider.java new file mode 100644 index 0000000000000..c272ec23ef7e5 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/SlowLogFieldProvider.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index; + +import java.util.Map; + +/** + * Interface for providing additional fields to the slow log from a plugin. + * Intended to be loaded through SPI. + */ +public interface SlowLogFieldProvider { + /** + * Initialize field provider with index level settings to be able to listen for updates and set initial values + * @param indexSettings settings for the index + */ + void init(IndexSettings indexSettings); + + /** + * Slow log fields for indexing events + * @return map of field name to value + */ + Map indexSlowLogFields(); + + /** + * Slow log fields for search events + * @return map of field name to value + */ + Map searchSlowLogFields(); +} diff --git a/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java b/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java index f1f03eff88d08..f8bc40a395472 100644 --- a/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java +++ b/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java @@ -24,6 +24,8 @@ import org.apache.lucene.util.BitDocIdSet; import org.apache.lucene.util.BitSet; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.common.cache.Cache; import org.elasticsearch.common.cache.CacheBuilder; import org.elasticsearch.common.cache.RemovalListener; @@ -55,6 +57,8 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; +import static org.elasticsearch.index.IndexSettings.INDEX_FAST_REFRESH_SETTING; + /** * This is a cache for {@link BitDocIdSet} based filters and is unbounded by size or time. *

    @@ -92,10 +96,22 @@ public BitsetFilterCache(IndexSettings indexSettings, Listener listener) { throw new IllegalArgumentException("listener must not be null"); } this.index = indexSettings.getIndex(); - this.loadRandomAccessFiltersEagerly = indexSettings.getValue(INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING); + this.loadRandomAccessFiltersEagerly = shouldLoadRandomAccessFiltersEagerly(indexSettings); this.listener = listener; } + static boolean shouldLoadRandomAccessFiltersEagerly(IndexSettings settings) { + boolean loadFiltersEagerlySetting = settings.getValue(INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING); + boolean isStateless = DiscoveryNode.isStateless(settings.getNodeSettings()); + if (isStateless) { + return DiscoveryNode.hasRole(settings.getNodeSettings(), DiscoveryNodeRole.INDEX_ROLE) + && loadFiltersEagerlySetting + && INDEX_FAST_REFRESH_SETTING.get(settings.getSettings()); + } else { + return loadFiltersEagerlySetting; + } + } + public static BitSet bitsetFromQuery(Query query, LeafReaderContext context) throws IOException { final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context); final IndexSearcher searcher = new IndexSearcher(topLevelContext); diff --git a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java index 1a1e470519213..270bcd2297a67 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java +++ b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java @@ -97,21 +97,13 @@ public void onCommit(List commits) throws IOException { assert Thread.holdsLock(this) == false : "should not block concurrent acquire or release"; final int keptPosition = indexOfKeptCommits(commits, globalCheckpointSupplier.getAsLong()); final IndexCommit safeCommit = commits.get(keptPosition); - int totalDocsOfSafeCommit; - try { - totalDocsOfSafeCommit = getDocCountOfCommit(safeCommit); - } catch (IOException ex) { - logger.info("failed to get the total docs from the safe commit; use the total docs from the previous safe commit", ex); - totalDocsOfSafeCommit = safeCommitInfo.docCount; - } - IndexCommit newCommit = null; - IndexCommit previousLastCommit = null; + final var newSafeCommitInfo = getNewSafeCommitInfo(safeCommit); + final IndexCommit newCommit; + final IndexCommit previousLastCommit; List deletedCommits = null; synchronized (this) { - this.safeCommitInfo = new SafeCommitInfo( - Long.parseLong(safeCommit.getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)), - totalDocsOfSafeCommit - ); + // we are synchronized on the IndexWriter in this method so nothing concurrently changed safeCommitInfo since the previous read + this.safeCommitInfo = newSafeCommitInfo; previousLastCommit = this.lastCommit; this.lastCommit = commits.get(commits.size() - 1); this.safeCommit = safeCommit; @@ -123,6 +115,8 @@ public void onCommit(List commits) throws IOException { } if (commitsListener != null && previousLastCommit != this.lastCommit) { newCommit = acquireIndexCommit(false); + } else { + newCommit = null; } for (int i = 0; i < keptPosition; i++) { final IndexCommit commit = commits.get(i); @@ -149,6 +143,31 @@ public void onCommit(List commits) throws IOException { } } + private SafeCommitInfo getNewSafeCommitInfo(IndexCommit newSafeCommit) { + final var currentSafeCommitInfo = this.safeCommitInfo; + final long newSafeCommitLocalCheckpoint; + try { + newSafeCommitLocalCheckpoint = Long.parseLong(newSafeCommit.getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)); + } catch (Exception ex) { + logger.info("failed to get the local checkpoint from the safe commit; use the info from the previous safe commit", ex); + return currentSafeCommitInfo; + } + + if (currentSafeCommitInfo.localCheckpoint == newSafeCommitLocalCheckpoint) { + // the new commit could in principle have the same LCP but a different doc count due to extra operations between its LCP and + // MSN, but that is a transient state since we'll eventually advance the LCP. The doc count is only used for heuristics around + // expiring excessively-lagging retention leases, so a little inaccuracy is tolerable here. + return currentSafeCommitInfo; + } + + try { + return new SafeCommitInfo(newSafeCommitLocalCheckpoint, getDocCountOfCommit(newSafeCommit)); + } catch (IOException ex) { + logger.info("failed to get the total docs from the safe commit; use the total docs from the previous safe commit", ex); + return new SafeCommitInfo(newSafeCommitLocalCheckpoint, currentSafeCommitInfo.docCount); + } + } + private boolean assertSafeCommitUnchanged(IndexCommit safeCommit) { // This is protected from concurrent calls by a lock on the IndexWriter, but this assertion makes sure that we notice if that ceases // to be true in future. It is not disastrous if safeCommitInfo refers to an older safeCommit, it just means that we might retain a diff --git a/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java b/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java index 1005f8f486beb..e63d5ef87973b 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java @@ -22,7 +22,7 @@ import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TopDocs; -import org.apache.lucene.search.TopFieldCollector; +import org.apache.lucene.search.TopFieldCollectorManager; import org.apache.lucene.util.ArrayUtil; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.Lucene; @@ -296,14 +296,13 @@ private TopDocs searchOperations(FieldDoc after, boolean accurateTotalHits) thro final Query rangeQuery = rangeQuery(Math.max(fromSeqNo, lastSeenSeqNo), toSeqNo, indexVersionCreated); assert accurateTotalHits == false || after == null : "accurate total hits is required by the first batch only"; final SortField sortBySeqNo = new SortField(SeqNoFieldMapper.NAME, SortField.Type.LONG); - final TopFieldCollector collector = TopFieldCollector.create( + TopFieldCollectorManager topFieldCollectorManager = new TopFieldCollectorManager( new Sort(sortBySeqNo), searchBatchSize, after, accurateTotalHits ? Integer.MAX_VALUE : 0 ); - indexSearcher.search(rangeQuery, collector); - return collector.topDocs(); + return indexSearcher.search(rangeQuery, topFieldCollectorManager); } private Translog.Operation readDocAsOp(int docIndex) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/index/engine/TranslogDirectoryReader.java b/server/src/main/java/org/elasticsearch/index/engine/TranslogDirectoryReader.java index 708d042c91bf9..e5eeac72927c0 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/TranslogDirectoryReader.java +++ b/server/src/main/java/org/elasticsearch/index/engine/TranslogDirectoryReader.java @@ -161,6 +161,7 @@ private static class TranslogLeafReader extends LeafReader { 0, VectorEncoding.FLOAT32, VectorSimilarityFunction.EUCLIDEAN, + false, false ); private static final FieldInfo FAKE_ROUTING_FIELD = new FieldInfo( @@ -179,6 +180,7 @@ private static class TranslogLeafReader extends LeafReader { 0, VectorEncoding.FLOAT32, VectorSimilarityFunction.EUCLIDEAN, + false, false ); private static final FieldInfo FAKE_ID_FIELD = new FieldInfo( @@ -197,6 +199,7 @@ private static class TranslogLeafReader extends LeafReader { 0, VectorEncoding.FLOAT32, VectorSimilarityFunction.EUCLIDEAN, + false, false ); private static final Set TRANSLOG_FIELD_NAMES = Set.of(SourceFieldMapper.NAME, RoutingFieldMapper.NAME, IdFieldMapper.NAME); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentLeafReader.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentLeafReader.java index 49934776bc4a3..db90c8f052a5e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentLeafReader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentLeafReader.java @@ -291,6 +291,7 @@ private static FieldInfo fieldInfo(String name) { 0, VectorEncoding.FLOAT32, VectorSimilarityFunction.EUCLIDEAN, + false, false ); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java index 66c5de61bcd92..01e67377adafd 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java @@ -613,7 +613,7 @@ public final MapperBuilderContext createDynamicMapperBuilderContext() { if (objectMapper instanceof PassThroughObjectMapper passThroughObjectMapper) { containsDimensions = passThroughObjectMapper.containsDimensions(); } - return new MapperBuilderContext(p, mappingLookup().isSourceSynthetic(), false, containsDimensions); + return new MapperBuilderContext(p, mappingLookup().isSourceSynthetic(), false, containsDimensions, dynamic); } public abstract XContentParser parser(); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java index 75d9fed2a4d4b..71fd9edd49903 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java @@ -479,7 +479,7 @@ public MultiFields build(Mapper.Builder mainFieldBuilder, MapperBuilderContext c return empty(); } else { FieldMapper[] mappers = new FieldMapper[mapperBuilders.size()]; - context = context.createChildContext(mainFieldBuilder.name()); + context = context.createChildContext(mainFieldBuilder.name(), null); int i = 0; for (Map.Entry> entry : this.mapperBuilders.entrySet()) { mappers[i++] = entry.getValue().apply(context); @@ -1230,7 +1230,7 @@ protected void merge(FieldMapper in, Conflicts conflicts, MapperMergeContext map for (Parameter param : getParameters()) { param.merge(in, conflicts); } - MapperMergeContext childContext = mapperMergeContext.createChildContext(in.simpleName()); + MapperMergeContext childContext = mapperMergeContext.createChildContext(in.simpleName(), null); for (FieldMapper newSubField : in.multiFields.mappers) { multiFieldsBuilder.update(newSubField, childContext); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java index 8ee9665f60362..265374a687312 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java @@ -9,7 +9,6 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.FieldInfos; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.PrefixCodedTerms; @@ -644,12 +643,7 @@ public void validateMatchedRoutingPath(String routingPath) { * @return {@code true} if field is present in fieldInfos {@code false} otherwise */ public boolean fieldHasValue(FieldInfos fieldInfos) { - for (FieldInfo fieldInfo : fieldInfos) { - if (fieldInfo.getName().equals(name())) { - return true; - } - } - return false; + return fieldInfos.fieldInfo(name()) != null; } /** diff --git a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java index 14a71531c6abb..7c047125a80d3 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java @@ -24,10 +24,10 @@ public abstract class Mapper implements ToXContentFragment, Iterable { public abstract static class Builder { - private final String name; + private String name; protected Builder(String name) { - this.name = internFieldName(name); + setName(name); } // TODO rename this to leafName? @@ -37,6 +37,10 @@ public final String name() { /** Returns a newly built mapper. */ public abstract Mapper build(MapperBuilderContext context); + + void setName(String name) { + this.name = internFieldName(name); + } } public interface TypeParser { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperBuilderContext.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperBuilderContext.java index 4154c936bab52..bbfb9298c23ca 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperBuilderContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperBuilderContext.java @@ -9,6 +9,9 @@ package org.elasticsearch.index.mapper; import org.elasticsearch.common.Strings; +import org.elasticsearch.core.Nullable; + +import java.util.Objects; /** * Holds context for building Mapper objects from their Builders @@ -19,32 +22,69 @@ public class MapperBuilderContext { * The root context, to be used when building a tree of mappers */ public static MapperBuilderContext root(boolean isSourceSynthetic, boolean isDataStream) { - return new MapperBuilderContext(null, isSourceSynthetic, isDataStream, false); + return new MapperBuilderContext(null, isSourceSynthetic, isDataStream, false, ObjectMapper.Defaults.DYNAMIC); } private final String path; private final boolean isSourceSynthetic; private final boolean isDataStream; private final boolean parentObjectContainsDimensions; + private final ObjectMapper.Dynamic dynamic; MapperBuilderContext(String path) { - this(path, false, false, false); + this(path, false, false, false, ObjectMapper.Defaults.DYNAMIC); } - MapperBuilderContext(String path, boolean isSourceSynthetic, boolean isDataStream, boolean parentObjectContainsDimensions) { + MapperBuilderContext( + String path, + boolean isSourceSynthetic, + boolean isDataStream, + boolean parentObjectContainsDimensions, + ObjectMapper.Dynamic dynamic + ) { + Objects.requireNonNull(dynamic, "dynamic must not be null"); this.path = path; this.isSourceSynthetic = isSourceSynthetic; this.isDataStream = isDataStream; this.parentObjectContainsDimensions = parentObjectContainsDimensions; + this.dynamic = dynamic; + } + + /** + * Creates a new MapperBuilderContext that is a child of this context + * + * @param name the name of the child context + * @param dynamic strategy for handling dynamic mappings in this context + * @return a new MapperBuilderContext with this context as its parent + */ + public MapperBuilderContext createChildContext(String name, @Nullable ObjectMapper.Dynamic dynamic) { + return createChildContext(name, this.parentObjectContainsDimensions, dynamic); } /** * Creates a new MapperBuilderContext that is a child of this context - * @param name the name of the child context + * + * @param name the name of the child context + * @param dynamic strategy for handling dynamic mappings in this context + * @param parentObjectContainsDimensions whether the parent object contains dimensions * @return a new MapperBuilderContext with this context as its parent */ - public MapperBuilderContext createChildContext(String name) { - return new MapperBuilderContext(buildFullName(name), isSourceSynthetic, isDataStream, parentObjectContainsDimensions); + public MapperBuilderContext createChildContext( + String name, + boolean parentObjectContainsDimensions, + @Nullable ObjectMapper.Dynamic dynamic + ) { + return new MapperBuilderContext( + buildFullName(name), + this.isSourceSynthetic, + this.isDataStream, + parentObjectContainsDimensions, + getDynamic(dynamic) + ); + } + + protected ObjectMapper.Dynamic getDynamic(@Nullable ObjectMapper.Dynamic dynamic) { + return dynamic == null ? this.dynamic : dynamic; } /** @@ -78,4 +118,7 @@ public boolean parentObjectContainsDimensions() { return parentObjectContainsDimensions; } + public ObjectMapper.Dynamic getDynamic() { + return dynamic; + } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperMergeContext.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperMergeContext.java index 0af182f315559..8f8854ad47c7d 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperMergeContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperMergeContext.java @@ -46,8 +46,8 @@ public static MapperMergeContext from(MapperBuilderContext mapperBuilderContext, * @param name the name of the child context * @return a new {@link MapperMergeContext} with this context as its parent */ - MapperMergeContext createChildContext(String name) { - return createChildContext(mapperBuilderContext.createChildContext(name)); + MapperMergeContext createChildContext(String name, ObjectMapper.Dynamic dynamic) { + return createChildContext(mapperBuilderContext.createChildContext(name, dynamic)); } /** diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java index 1216618b1e986..f07d69d86f36c 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java @@ -62,7 +62,11 @@ public NestedObjectMapper build(MapperBuilderContext context) { this.includeInRoot = Explicit.IMPLICIT_FALSE; } } - NestedMapperBuilderContext nestedContext = new NestedMapperBuilderContext(context.buildFullName(name()), parentIncludedInRoot); + NestedMapperBuilderContext nestedContext = new NestedMapperBuilderContext( + context.buildFullName(name()), + parentIncludedInRoot, + context.getDynamic(dynamic) + ); final String fullPath = context.buildFullName(name()); final String nestedTypePath; if (indexCreatedVersion.before(IndexVersions.V_8_0_0)) { @@ -117,14 +121,14 @@ private static class NestedMapperBuilderContext extends MapperBuilderContext { final boolean parentIncludedInRoot; - NestedMapperBuilderContext(String path, boolean parentIncludedInRoot) { - super(path); + NestedMapperBuilderContext(String path, boolean parentIncludedInRoot, Dynamic dynamic) { + super(path, false, false, false, dynamic); this.parentIncludedInRoot = parentIncludedInRoot; } @Override - public MapperBuilderContext createChildContext(String name) { - return new NestedMapperBuilderContext(buildFullName(name), parentIncludedInRoot); + public MapperBuilderContext createChildContext(String name, Dynamic dynamic) { + return new NestedMapperBuilderContext(buildFullName(name), parentIncludedInRoot, getDynamic(dynamic)); } } @@ -280,7 +284,11 @@ protected MapperMergeContext createChildContext(MapperMergeContext mapperMergeCo parentIncludedInRoot |= this.includeInParent.value(); } return mapperMergeContext.createChildContext( - new NestedMapperBuilderContext(mapperBuilderContext.buildFullName(name), parentIncludedInRoot) + new NestedMapperBuilderContext( + mapperBuilderContext.buildFullName(name), + parentIncludedInRoot, + mapperBuilderContext.getDynamic(dynamic) + ) ); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java index a9de4bdd1467a..33e736ff122a1 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.core.Nullable; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.MapperService.MergeReason; @@ -40,6 +41,7 @@ public class ObjectMapper extends Mapper { public static class Defaults { public static final boolean ENABLED = true; public static final Explicit SUBOBJECTS = Explicit.IMPLICIT_TRUE; + public static final Dynamic DYNAMIC = Dynamic.TRUE; } public enum Dynamic { @@ -69,7 +71,7 @@ DynamicFieldsBuilder getDynamicFieldsBuilder() { */ static Dynamic getRootDynamic(MappingLookup mappingLookup) { ObjectMapper.Dynamic rootDynamic = mappingLookup.getMapping().getRoot().dynamic; - return rootDynamic == null ? ObjectMapper.Dynamic.TRUE : rootDynamic; + return rootDynamic == null ? Defaults.DYNAMIC : rootDynamic; } } @@ -154,7 +156,6 @@ protected final Map buildMappers(MapperBuilderContext mapperBuil Map mappers = new HashMap<>(); for (Mapper.Builder builder : mappersBuilders) { Mapper mapper = builder.build(mapperBuilderContext); - assert mapper instanceof ObjectMapper == false || subobjects.value() : "unexpected object while subobjects are disabled"; Mapper existing = mappers.get(mapper.simpleName()); if (existing != null) { // The same mappings or document may hold the same field twice, either because duplicated JSON keys are allowed or @@ -164,7 +165,12 @@ protected final Map buildMappers(MapperBuilderContext mapperBuil // mix of object notation and dot notation. mapper = existing.merge(mapper, MapperMergeContext.from(mapperBuilderContext, Long.MAX_VALUE)); } - mappers.put(mapper.simpleName(), mapper); + if (subobjects.value() == false && mapper instanceof ObjectMapper objectMapper) { + // We're parsing a mapping that has set `subobjects: false` but has defined sub-objects + objectMapper.asFlattenedFieldMappers(mapperBuilderContext).forEach(m -> mappers.put(m.simpleName(), m)); + } else { + mappers.put(mapper.simpleName(), mapper); + } } return mappers; } @@ -177,7 +183,7 @@ public ObjectMapper build(MapperBuilderContext context) { enabled, subobjects, dynamic, - buildMappers(context.createChildContext(name())) + buildMappers(context.createChildContext(name(), dynamic)) ); } } @@ -300,12 +306,9 @@ protected static void parseProperties( } } - if (objBuilder.subobjects.value() == false - && (type.equals(ObjectMapper.CONTENT_TYPE) - || type.equals(NestedObjectMapper.CONTENT_TYPE) - || type.equals(PassThroughObjectMapper.CONTENT_TYPE))) { + if (objBuilder.subobjects.value() == false && type.equals(NestedObjectMapper.CONTENT_TYPE)) { throw new MapperParsingException( - "Tried to add subobject [" + "Tried to add nested object [" + fieldName + "] to object [" + objBuilder.name() @@ -390,6 +393,8 @@ private static void validateFieldName(String fieldName, IndexVersion indexCreate } else { this.mappers = Map.copyOf(mappers); } + assert subobjects.value() || this.mappers.values().stream().noneMatch(m -> m instanceof ObjectMapper) + : "When subobjects is false, mappers must not contain an ObjectMapper"; } /** @@ -462,7 +467,7 @@ public void validate(MappingLookup mappers) { } protected MapperMergeContext createChildContext(MapperMergeContext mapperMergeContext, String name) { - return mapperMergeContext.createChildContext(name); + return mapperMergeContext.createChildContext(name, dynamic); } public ObjectMapper merge(Mapper mergeWith, MergeReason reason, MapperMergeContext parentMergeContext) { @@ -527,7 +532,13 @@ static MergeResult build( subObjects = existing.subobjects; } MapperMergeContext objectMergeContext = existing.createChildContext(parentMergeContext, existing.simpleName()); - Map mergedMappers = buildMergedMappers(existing, mergeWithObject, reason, objectMergeContext); + Map mergedMappers = buildMergedMappers( + existing, + mergeWithObject, + reason, + objectMergeContext, + subObjects.value() + ); return new MergeResult( enabled, subObjects, @@ -540,25 +551,36 @@ private static Map buildMergedMappers( ObjectMapper existing, ObjectMapper mergeWithObject, MergeReason reason, - MapperMergeContext objectMergeContext + MapperMergeContext objectMergeContext, + boolean subobjects ) { - Iterator iterator = mergeWithObject.iterator(); - if (iterator.hasNext() == false) { - return Map.copyOf(existing.mappers); + Map mergedMappers = new HashMap<>(); + for (Mapper childOfExistingMapper : existing.mappers.values()) { + if (subobjects == false && childOfExistingMapper instanceof ObjectMapper objectMapper) { + // An existing mapping with sub-objects is merged with a mapping that has set `subobjects: false` + objectMapper.asFlattenedFieldMappers(objectMergeContext.getMapperBuilderContext()) + .forEach(m -> mergedMappers.put(m.simpleName(), m)); + } else { + putMergedMapper(mergedMappers, childOfExistingMapper); + } } - Map mergedMappers = new HashMap<>(existing.mappers); - while (iterator.hasNext()) { - Mapper mergeWithMapper = iterator.next(); + for (Mapper mergeWithMapper : mergeWithObject) { Mapper mergeIntoMapper = mergedMappers.get(mergeWithMapper.simpleName()); - Mapper merged = null; if (mergeIntoMapper == null) { - if (objectMergeContext.decrementFieldBudgetIfPossible(mergeWithMapper.getTotalFieldsCount())) { - merged = mergeWithMapper; + if (subobjects == false && mergeWithMapper instanceof ObjectMapper objectMapper) { + // An existing mapping that has set `subobjects: false` is merged with a mapping with sub-objects + objectMapper.asFlattenedFieldMappers(objectMergeContext.getMapperBuilderContext()) + .stream() + .filter(m -> objectMergeContext.decrementFieldBudgetIfPossible(m.getTotalFieldsCount())) + .forEach(m -> putMergedMapper(mergedMappers, m)); + } else if (objectMergeContext.decrementFieldBudgetIfPossible(mergeWithMapper.getTotalFieldsCount())) { + putMergedMapper(mergedMappers, mergeWithMapper); } else if (mergeWithMapper instanceof ObjectMapper om) { - merged = truncateObjectMapper(reason, objectMergeContext, om); + putMergedMapper(mergedMappers, truncateObjectMapper(reason, objectMergeContext, om)); } } else if (mergeIntoMapper instanceof ObjectMapper objectMapper) { - merged = objectMapper.merge(mergeWithMapper, reason, objectMergeContext); + assert subobjects : "existing object mappers are supposed to be flattened if subobjects is false"; + putMergedMapper(mergedMappers, objectMapper.merge(mergeWithMapper, reason, objectMergeContext)); } else { assert mergeIntoMapper instanceof FieldMapper || mergeIntoMapper instanceof FieldAliasMapper; if (mergeWithMapper instanceof NestedObjectMapper) { @@ -570,18 +592,21 @@ private static Map buildMergedMappers( // If we're merging template mappings when creating an index, then a field definition always // replaces an existing one. if (reason == MergeReason.INDEX_TEMPLATE) { - merged = mergeWithMapper; + putMergedMapper(mergedMappers, mergeWithMapper); } else { - merged = mergeIntoMapper.merge(mergeWithMapper, objectMergeContext); + putMergedMapper(mergedMappers, mergeIntoMapper.merge(mergeWithMapper, objectMergeContext)); } } - if (merged != null) { - mergedMappers.put(merged.simpleName(), merged); - } } return Map.copyOf(mergedMappers); } + private static void putMergedMapper(Map mergedMappers, @Nullable Mapper merged) { + if (merged != null) { + mergedMappers.put(merged.simpleName(), merged); + } + } + private static ObjectMapper truncateObjectMapper(MergeReason reason, MapperMergeContext context, ObjectMapper objectMapper) { // there's not enough capacity for the whole object mapper, // so we're just trying to add the shallow object, without it's sub-fields @@ -594,6 +619,65 @@ private static ObjectMapper truncateObjectMapper(MergeReason reason, MapperMerge } } + /** + * Returns all FieldMappers this ObjectMapper or its children hold. + * The name of the FieldMappers will be updated to reflect the hierarchy. + * + * @throws IllegalArgumentException if the mapper cannot be flattened + */ + List asFlattenedFieldMappers(MapperBuilderContext context) { + List flattenedMappers = new ArrayList<>(); + ContentPath path = new ContentPath(); + asFlattenedFieldMappers(context, flattenedMappers, path); + return flattenedMappers; + } + + private void asFlattenedFieldMappers(MapperBuilderContext context, List flattenedMappers, ContentPath path) { + ensureFlattenable(context, path); + path.add(simpleName()); + for (Mapper mapper : mappers.values()) { + if (mapper instanceof FieldMapper fieldMapper) { + FieldMapper.Builder fieldBuilder = fieldMapper.getMergeBuilder(); + fieldBuilder.setName(path.pathAsText(mapper.simpleName())); + flattenedMappers.add(fieldBuilder.build(context)); + } else if (mapper instanceof ObjectMapper objectMapper) { + objectMapper.asFlattenedFieldMappers(context, flattenedMappers, path); + } + } + path.remove(); + } + + private void ensureFlattenable(MapperBuilderContext context, ContentPath path) { + if (dynamic != null && context.getDynamic() != dynamic) { + throwAutoFlatteningException( + path, + "the value of [dynamic] (" + + dynamic + + ") is not compatible with the value from its parent context (" + + context.getDynamic() + + ")" + ); + } + if (isEnabled() == false) { + throwAutoFlatteningException(path, "the value of [enabled] is [false]"); + } + if (subobjects.explicit() && subobjects()) { + throwAutoFlatteningException(path, "the value of [subobjects] is [true]"); + } + } + + private void throwAutoFlatteningException(ContentPath path, String reason) { + throw new IllegalArgumentException( + "Object mapper [" + + path.pathAsText(simpleName()) + + "] was found in a context where subobjects is set to false. " + + "Auto-flattening [" + + path.pathAsText(simpleName()) + + "] failed because " + + reason + ); + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { toXContent(builder, params, null); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/PassThroughObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/PassThroughObjectMapper.java index 4ce7f51ed7386..05ae7e59f69c3 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/PassThroughObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/PassThroughObjectMapper.java @@ -60,7 +60,7 @@ public PassThroughObjectMapper build(MapperBuilderContext context) { context.buildFullName(name()), enabled, dynamic, - buildMappers(context.createChildContext(name())), + buildMappers(context.createChildContext(name(), timeSeriesDimensionSubFields.value(), dynamic)), timeSeriesDimensionSubFields ); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java index a730d8c2da89e..82ff9ef818579 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java @@ -111,7 +111,7 @@ public RootObjectMapper.Builder addRuntimeFields(Map runti @Override public RootObjectMapper build(MapperBuilderContext context) { - Map mappers = buildMappers(context); + Map mappers = buildMappers(context.createChildContext(null, dynamic)); mappers.putAll(getAliasMappers(mappers, context)); return new RootObjectMapper( name(), @@ -294,7 +294,7 @@ RuntimeField getRuntimeField(String name) { @Override protected MapperMergeContext createChildContext(MapperMergeContext mapperMergeContext, String name) { assert Objects.equals(mapperMergeContext.getMapperBuilderContext().buildFullName("foo"), "foo"); - return mapperMergeContext; + return mapperMergeContext.createChildContext(null, dynamic); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java index 598a6383bfdaa..47efa0ca49771 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java @@ -250,8 +250,7 @@ public void readAndWriteValue(ByteBuffer byteBuffer, XContentBuilder b) throws I b.value(byteBuffer.get()); } - @Override - KnnByteVectorField createKnnVectorField(String name, byte[] vector, VectorSimilarityFunction function) { + private KnnByteVectorField createKnnVectorField(String name, byte[] vector, VectorSimilarityFunction function) { if (vector == null) { throw new IllegalArgumentException("vector value must not be null"); } @@ -261,11 +260,6 @@ KnnByteVectorField createKnnVectorField(String name, byte[] vector, VectorSimila return new KnnByteVectorField(name, vector, denseVectorFieldType); } - @Override - KnnFloatVectorField createKnnVectorField(String name, float[] vector, VectorSimilarityFunction function) { - throw new IllegalArgumentException("cannot create a float vector field from byte"); - } - @Override IndexFieldData.Builder fielddataBuilder(DenseVectorFieldType denseVectorFieldType, FieldDataContext fieldDataContext) { return new VectorIndexFieldData.Builder( @@ -452,8 +446,7 @@ public void readAndWriteValue(ByteBuffer byteBuffer, XContentBuilder b) throws I b.value(byteBuffer.getFloat()); } - @Override - KnnFloatVectorField createKnnVectorField(String name, float[] vector, VectorSimilarityFunction function) { + private KnnFloatVectorField createKnnVectorField(String name, float[] vector, VectorSimilarityFunction function) { if (vector == null) { throw new IllegalArgumentException("vector value must not be null"); } @@ -463,11 +456,6 @@ KnnFloatVectorField createKnnVectorField(String name, float[] vector, VectorSimi return new KnnFloatVectorField(name, vector, denseVectorFieldType); } - @Override - KnnByteVectorField createKnnVectorField(String name, byte[] vector, VectorSimilarityFunction function) { - throw new IllegalArgumentException("cannot create a byte vector field from float"); - } - @Override IndexFieldData.Builder fielddataBuilder(DenseVectorFieldType denseVectorFieldType, FieldDataContext fieldDataContext) { return new VectorIndexFieldData.Builder( @@ -615,10 +603,6 @@ ByteBuffer createByteBuffer(IndexVersion indexVersion, int numBytes) { public abstract void readAndWriteValue(ByteBuffer byteBuffer, XContentBuilder b) throws IOException; - abstract KnnFloatVectorField createKnnVectorField(String name, float[] vector, VectorSimilarityFunction function); - - abstract KnnByteVectorField createKnnVectorField(String name, byte[] vector, VectorSimilarityFunction function); - abstract IndexFieldData.Builder fielddataBuilder(DenseVectorFieldType denseVectorFieldType, FieldDataContext fieldDataContext); abstract void parseKnnVectorAndIndex(DocumentParserContext context, DenseVectorFieldMapper fieldMapper) throws IOException; @@ -1175,31 +1159,7 @@ public Query createKnnQuery( } public Query createExactKnnQuery(float[] queryVector) { - if (isIndexed() == false) { - throw new IllegalArgumentException( - "to perform knn search on field [" + name() + "], its mapping must have [index] set to [true]" - ); - } - if (queryVector.length != dims) { - throw new IllegalArgumentException( - "the query vector has a different dimension [" + queryVector.length + "] than the index vectors [" + dims + "]" - ); - } - elementType.checkVectorBounds(queryVector); - if (similarity == VectorSimilarity.DOT_PRODUCT || similarity == VectorSimilarity.COSINE) { - float squaredMagnitude = VectorUtil.dotProduct(queryVector, queryVector); - elementType.checkVectorMagnitude(similarity, ElementType.errorFloatElementsAppender(queryVector), squaredMagnitude); - if (similarity == VectorSimilarity.COSINE - && ElementType.FLOAT.equals(elementType) - && indexVersionCreated.onOrAfter(NORMALIZE_COSINE) - && isNotUnitVector(squaredMagnitude)) { - float length = (float) Math.sqrt(squaredMagnitude); - queryVector = Arrays.copyOf(queryVector, queryVector.length); - for (int i = 0; i < queryVector.length; i++) { - queryVector[i] /= length; - } - } - } + queryVector = validateAndNormalize(queryVector); VectorSimilarityFunction vectorSimilarityFunction = similarity.vectorSimilarityFunction(indexVersionCreated, elementType); return switch (elementType) { case BYTE -> { @@ -1242,12 +1202,38 @@ public Query createKnnQuery( Float similarityThreshold, BitSetProducer parentFilter ) { + queryVector = validateAndNormalize(queryVector); + Query knnQuery = switch (elementType) { + case BYTE -> { + byte[] bytes = new byte[queryVector.length]; + for (int i = 0; i < queryVector.length; i++) { + bytes[i] = (byte) queryVector[i]; + } + yield parentFilter != null + ? new ESDiversifyingChildrenByteKnnVectorQuery(name(), bytes, filter, numCands, parentFilter) + : new ESKnnByteVectorQuery(name(), bytes, numCands, filter); + } + case FLOAT -> parentFilter != null + ? new ESDiversifyingChildrenFloatKnnVectorQuery(name(), queryVector, filter, numCands, parentFilter) + : new ESKnnFloatVectorQuery(name(), queryVector, numCands, filter); + }; + + if (similarityThreshold != null) { + knnQuery = new VectorSimilarityQuery( + knnQuery, + similarityThreshold, + similarity.score(similarityThreshold, elementType, dims) + ); + } + return knnQuery; + } + + private float[] validateAndNormalize(float[] queryVector) { if (isIndexed() == false) { throw new IllegalArgumentException( "to perform knn search on field [" + name() + "], its mapping must have [index] set to [true]" ); } - if (queryVector.length != dims) { throw new IllegalArgumentException( "the query vector has a different dimension [" + queryVector.length + "] than the index vectors [" + dims + "]" @@ -1268,29 +1254,7 @@ && isNotUnitVector(squaredMagnitude)) { } } } - Query knnQuery = switch (elementType) { - case BYTE -> { - byte[] bytes = new byte[queryVector.length]; - for (int i = 0; i < queryVector.length; i++) { - bytes[i] = (byte) queryVector[i]; - } - yield parentFilter != null - ? new ESDiversifyingChildrenByteKnnVectorQuery(name(), bytes, filter, numCands, parentFilter) - : new ESKnnByteVectorQuery(name(), bytes, numCands, filter); - } - case FLOAT -> parentFilter != null - ? new ESDiversifyingChildrenFloatKnnVectorQuery(name(), queryVector, filter, numCands, parentFilter) - : new ESKnnFloatVectorQuery(name(), queryVector, numCands, filter); - }; - - if (similarityThreshold != null) { - knnQuery = new VectorSimilarityQuery( - knnQuery, - similarityThreshold, - similarity.score(similarityThreshold, elementType, dims) - ); - } - return knnQuery; + return queryVector; } VectorSimilarity getSimilarity() { diff --git a/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java b/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java index ca9de756ca211..389485ac4eaf2 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java @@ -33,6 +33,7 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.routing.IndexRouting; import org.elasticsearch.common.lucene.search.Queries; +import org.elasticsearch.core.Predicates; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; import org.elasticsearch.index.mapper.IdFieldMapper; @@ -140,7 +141,12 @@ public Scorer scorer(LeafReaderContext context) throws IOException { * of the document that contains them. */ FixedBitSet hasRoutingValue = new FixedBitSet(leafReader.maxDoc()); - findSplitDocs(RoutingFieldMapper.NAME, ref -> false, leafReader, maybeWrapConsumer.apply(hasRoutingValue::set)); + findSplitDocs( + RoutingFieldMapper.NAME, + Predicates.never(), + leafReader, + maybeWrapConsumer.apply(hasRoutingValue::set) + ); IntConsumer bitSetConsumer = maybeWrapConsumer.apply(bitSet::set); findSplitDocs(IdFieldMapper.NAME, includeInShard, leafReader, docId -> { if (hasRoutingValue.get(docId) == false) { diff --git a/server/src/main/java/org/elasticsearch/index/translog/BufferedChecksumStreamInput.java b/server/src/main/java/org/elasticsearch/index/translog/BufferedChecksumStreamInput.java index 6ff91a688c97c..6d1456040c8fa 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/BufferedChecksumStreamInput.java +++ b/server/src/main/java/org/elasticsearch/index/translog/BufferedChecksumStreamInput.java @@ -48,6 +48,11 @@ public long getChecksum() { return this.digest.getValue(); } + @Override + public String readString() throws IOException { + return doReadString(readArraySize()); // always use the unoptimized slow path + } + @Override public byte readByte() throws IOException { final byte b = delegate.readByte(); diff --git a/server/src/main/java/org/elasticsearch/indices/FailureIndexNotSupportedException.java b/server/src/main/java/org/elasticsearch/indices/FailureIndexNotSupportedException.java new file mode 100644 index 0000000000000..90fdd364b7035 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/indices/FailureIndexNotSupportedException.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.indices; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.index.Index; +import org.elasticsearch.rest.RestStatus; + +import java.io.IOException; + +/** + * Exception indicating that one or more requested indices are failure indices. + */ +public final class FailureIndexNotSupportedException extends ElasticsearchException { + + public FailureIndexNotSupportedException(Index index) { + super("failure index not supported"); + setIndex(index); + } + + public FailureIndexNotSupportedException(StreamInput in) throws IOException { + super(in); + } + + @Override + public RestStatus status() { + return RestStatus.BAD_REQUEST; + } + +} diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java b/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java index 7394e5eb89458..a40a5ab2e2fe8 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Predicates; import org.elasticsearch.index.cache.query.QueryCacheStats; import org.elasticsearch.index.shard.ShardId; @@ -78,7 +79,7 @@ public IndicesQueryCache(Settings settings) { logger.debug("using [node] query cache with size [{}] max filter count [{}]", size, count); if (INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING.get(settings)) { // Use the default skip_caching_factor (i.e., 10f) in Lucene - cache = new ElasticsearchLRUQueryCache(count, size.getBytes(), context -> true, 10f); + cache = new ElasticsearchLRUQueryCache(count, size.getBytes(), Predicates.always(), 10f); } else { cache = new ElasticsearchLRUQueryCache(count, size.getBytes()); } diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index b47d10882a5c1..3319b29df6dfa 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -85,6 +85,7 @@ import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.SlowLogFieldProvider; import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.bulk.stats.BulkStats; import org.elasticsearch.index.cache.request.ShardRequestCache; @@ -736,7 +737,8 @@ private synchronized IndexService createIndexService( directoryFactories, () -> allowExpensiveQueries, indexNameExpressionResolver, - recoveryStateFactories + recoveryStateFactories, + loadSlowLogFieldProvider() ); for (IndexingOperationListener operationListener : indexingOperationListeners) { indexModule.addIndexOperationListener(operationListener); @@ -812,7 +814,8 @@ public synchronized MapperService createIndexMapperServiceForValidation(IndexMet directoryFactories, () -> allowExpensiveQueries, indexNameExpressionResolver, - recoveryStateFactories + recoveryStateFactories, + loadSlowLogFieldProvider() ); pluginsService.forEach(p -> p.onIndexModule(indexModule)); return indexModule.newIndexMapperService(clusterService, parserConfig, mapperRegistry, scriptService); @@ -1390,6 +1393,31 @@ int numPendingDeletes(Index index) { } } + // pkg-private for testing + SlowLogFieldProvider loadSlowLogFieldProvider() { + List slowLogFieldProviders = pluginsService.loadServiceProviders(SlowLogFieldProvider.class); + return new SlowLogFieldProvider() { + @Override + public void init(IndexSettings indexSettings) { + slowLogFieldProviders.forEach(provider -> provider.init(indexSettings)); + } + + @Override + public Map indexSlowLogFields() { + return slowLogFieldProviders.stream() + .flatMap(provider -> provider.indexSlowLogFields().entrySet().stream()) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + } + + @Override + public Map searchSlowLogFields() { + return slowLogFieldProviders.stream() + .flatMap(provider -> provider.searchSlowLogFields().entrySet().stream()) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + } + }; + } + /** * Checks if all pending deletes have completed. Used by tests to ensure we don't check directory contents * while deletion still ongoing. * The reason is that, on Windows, browsing the directory contents can interfere diff --git a/server/src/main/java/org/elasticsearch/indices/SystemIndices.java b/server/src/main/java/org/elasticsearch/indices/SystemIndices.java index f23f28e4c1047..3261ac83a7e67 100644 --- a/server/src/main/java/org/elasticsearch/indices/SystemIndices.java +++ b/server/src/main/java/org/elasticsearch/indices/SystemIndices.java @@ -33,6 +33,7 @@ import org.elasticsearch.core.Booleans; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Predicates; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; import org.elasticsearch.plugins.SystemIndexPlugin; @@ -384,11 +385,11 @@ static SystemIndexDescriptor findMatchingDescriptor(SystemIndexDescriptor[] inde public Predicate getProductSystemIndexNamePredicate(ThreadContext threadContext) { final String product = threadContext.getHeader(EXTERNAL_SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY); if (product == null) { - return name -> false; + return Predicates.never(); } final CharacterRunAutomaton automaton = productToSystemIndicesMatcher.get(product); if (automaton == null) { - return name -> false; + return Predicates.never(); } return automaton::run; } diff --git a/server/src/main/java/org/elasticsearch/internal/BuildExtension.java b/server/src/main/java/org/elasticsearch/internal/BuildExtension.java index 921577317604a..b1b9a568e3083 100644 --- a/server/src/main/java/org/elasticsearch/internal/BuildExtension.java +++ b/server/src/main/java/org/elasticsearch/internal/BuildExtension.java @@ -9,6 +9,7 @@ package org.elasticsearch.internal; import org.elasticsearch.Build; +import org.elasticsearch.env.BuildVersion; /** * Allows plugging in current build info. @@ -26,4 +27,14 @@ public interface BuildExtension { default boolean hasReleaseVersioning() { return true; } + + /** + * Returns the {@link BuildVersion} for the running Elasticsearch code. + */ + BuildVersion currentBuildVersion(); + + /** + * Returns the {@link BuildVersion} for a given version identifier. + */ + BuildVersion fromVersionId(int versionId); } diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 165c5f6524104..8ff2ac5e5fca0 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -12,7 +12,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchTimeoutException; -import org.elasticsearch.Version; import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.bootstrap.BootstrapContext; @@ -47,6 +46,7 @@ import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.env.BuildVersion; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.NodeMetadata; @@ -340,7 +340,7 @@ public Node start() throws NodeValidationException { nodeEnvironment.nodeDataPaths() ); assert nodeMetadata != null; - assert nodeMetadata.nodeVersion().equals(Version.CURRENT); + assert nodeMetadata.nodeVersion().equals(BuildVersion.current()); assert nodeMetadata.nodeId().equals(localNodeFactory.getNode().getId()); } catch (IOException e) { assert false : e; diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index 9323ec63c0d2d..19a6d200189f2 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -1196,9 +1196,9 @@ private Module loadDiagnosticServices( var serverHealthIndicatorServices = Stream.of( new StableMasterHealthIndicatorService(coordinationDiagnosticsService, clusterService), - new RepositoryIntegrityHealthIndicatorService(clusterService), - new DiskHealthIndicatorService(clusterService), - new ShardsCapacityHealthIndicatorService(clusterService) + new RepositoryIntegrityHealthIndicatorService(clusterService, featureService), + new DiskHealthIndicatorService(clusterService, featureService), + new ShardsCapacityHealthIndicatorService(clusterService, featureService) ); var pluginHealthIndicatorServices = pluginsService.filterPlugins(HealthPlugin.class) .flatMap(plugin -> plugin.getHealthIndicatorServices().stream()); diff --git a/server/src/main/java/org/elasticsearch/node/NodeService.java b/server/src/main/java/org/elasticsearch/node/NodeService.java index 4b9e5dc83c538..87384b50d7ffd 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeService.java +++ b/server/src/main/java/org/elasticsearch/node/NodeService.java @@ -195,7 +195,8 @@ public NodeStats stats( adaptiveSelection ? responseCollectorService.getAdaptiveStats(searchTransportService.getPendingSearchRequests()) : null, scriptCache ? scriptService.cacheStats() : null, indexingPressure ? this.indexingPressure.stats() : null, - repositoriesStats ? this.repositoriesService.getRepositoriesThrottlingStats() : null + repositoriesStats ? this.repositoriesService.getRepositoriesThrottlingStats() : null, + null ); } diff --git a/server/src/main/java/org/elasticsearch/persistent/AllocatedPersistentTask.java b/server/src/main/java/org/elasticsearch/persistent/AllocatedPersistentTask.java index 0c003cda697d1..895fe65b92246 100644 --- a/server/src/main/java/org/elasticsearch/persistent/AllocatedPersistentTask.java +++ b/server/src/main/java/org/elasticsearch/persistent/AllocatedPersistentTask.java @@ -64,7 +64,7 @@ public void updatePersistentTaskState( final PersistentTaskState state, final ActionListener> listener ) { - persistentTasksService.sendUpdateStateRequest(persistentTaskId, allocationId, state, listener); + persistentTasksService.sendUpdateStateRequest(persistentTaskId, allocationId, state, null, listener); } public String getPersistentTaskId() { @@ -200,7 +200,8 @@ private void completeAndNotifyIfNeeded(@Nullable Exception failure, @Nullable St getAllocationId(), failure, localAbortReason, - new ActionListener>() { + null, + new ActionListener<>() { @Override public void onResponse(PersistentTasksCustomMetadata.PersistentTask persistentTask) { logger.trace("notification for task [{}] with id [{}] was successful", getAction(), getPersistentTaskId()); diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java index 63c97685c913e..b1deee6b21e0e 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java @@ -265,6 +265,7 @@ private void notifyMasterOfFailedTask( taskInProgress.getAllocationId(), originalException, null, + null, new ActionListener<>() { @Override public void onResponse(PersistentTask persistentTask) { @@ -300,7 +301,7 @@ private void cancelTask(Long allocationId) { if (task.markAsCancelled()) { // Cancel the local task using the task manager String reason = "task has been removed, cancelling locally"; - persistentTasksService.sendCancelRequest(task.getId(), reason, new ActionListener<>() { + persistentTasksService.sendCancelRequest(task.getId(), reason, null, new ActionListener<>() { @Override public void onResponse(ListTasksResponse cancelTasksResponse) { logger.trace( diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java index 227569341919a..5d3624238d0ce 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java @@ -52,15 +52,32 @@ public PersistentTasksService(ClusterService clusterService, ThreadPool threadPo /** * Notifies the master node to create new persistent task and to assign it to a node. */ + @Deprecated(forRemoval = true) public void sendStartRequest( final String taskId, final String taskName, final Params taskParams, final ActionListener> listener + ) { + sendStartRequest(taskId, taskName, taskParams, null, listener); + } + + /** + * Notifies the master node to create new persistent task and to assign it to a node. Accepts operation timeout as optional parameter + */ + public void sendStartRequest( + final String taskId, + final String taskName, + final Params taskParams, + final @Nullable TimeValue timeout, + final ActionListener> listener ) { @SuppressWarnings("unchecked") final ActionListener> wrappedListener = listener.map(t -> (PersistentTask) t); StartPersistentTaskAction.Request request = new StartPersistentTaskAction.Request(taskId, taskName, taskParams); + if (timeout != null) { + request.masterNodeTimeout(timeout); + } execute(request, StartPersistentTaskAction.INSTANCE, wrappedListener); } @@ -70,12 +87,14 @@ public void sendStartRequest( * At most one of {@code failure} and {@code localAbortReason} may be * provided. When both {@code failure} and {@code localAbortReason} are * {@code null}, the persistent task is considered as successfully completed. + * Accepts operation timeout as optional parameter */ public void sendCompletionRequest( final String taskId, final long taskAllocationId, final @Nullable Exception taskFailure, final @Nullable String localAbortReason, + final @Nullable TimeValue timeout, final ActionListener> listener ) { CompletionPersistentTaskAction.Request request = new CompletionPersistentTaskAction.Request( @@ -84,16 +103,27 @@ public void sendCompletionRequest( taskFailure, localAbortReason ); + if (timeout != null) { + request.masterNodeTimeout(timeout); + } execute(request, CompletionPersistentTaskAction.INSTANCE, listener); } /** - * Cancels a locally running task using the Task Manager API + * Cancels a locally running task using the Task Manager API. Accepts operation timeout as optional parameter */ - void sendCancelRequest(final long taskId, final String reason, final ActionListener listener) { + void sendCancelRequest( + final long taskId, + final String reason, + final @Nullable TimeValue timeout, + final ActionListener listener + ) { CancelTasksRequest request = new CancelTasksRequest(); request.setTargetTaskId(new TaskId(clusterService.localNode().getId(), taskId)); request.setReason(reason); + if (timeout != null) { + request.setTimeout(timeout); + } try { client.admin().cluster().cancelTasks(request, listener); } catch (Exception e) { @@ -105,12 +135,14 @@ void sendCancelRequest(final long taskId, final String reason, final ActionListe * Notifies the master node that the state of a persistent task has changed. *

    * Persistent task implementers shouldn't call this method directly and use - * {@link AllocatedPersistentTask#updatePersistentTaskState} instead + * {@link AllocatedPersistentTask#updatePersistentTaskState} instead. + * Accepts operation timeout as optional parameter */ void sendUpdateStateRequest( final String taskId, final long taskAllocationID, final PersistentTaskState taskState, + final @Nullable TimeValue timeout, final ActionListener> listener ) { UpdatePersistentTaskStatusAction.Request request = new UpdatePersistentTaskStatusAction.Request( @@ -118,14 +150,29 @@ void sendUpdateStateRequest( taskAllocationID, taskState ); + if (timeout != null) { + request.masterNodeTimeout(timeout); + } execute(request, UpdatePersistentTaskStatusAction.INSTANCE, listener); } + @Deprecated(forRemoval = true) + public void sendRemoveRequest(final String taskId, final ActionListener> listener) { + sendRemoveRequest(taskId, null, listener); + } + /** - * Notifies the master node to remove a persistent task from the cluster state + * Notifies the master node to remove a persistent task from the cluster state. Accepts operation timeout as optional parameter */ - public void sendRemoveRequest(final String taskId, final ActionListener> listener) { + public void sendRemoveRequest( + final String taskId, + final @Nullable TimeValue timeout, + final ActionListener> listener + ) { RemovePersistentTaskAction.Request request = new RemovePersistentTaskAction.Request(taskId); + if (timeout != null) { + request.masterNodeTimeout(timeout); + } execute(request, RemovePersistentTaskAction.INSTANCE, listener); } diff --git a/server/src/main/java/org/elasticsearch/plugins/MapperPlugin.java b/server/src/main/java/org/elasticsearch/plugins/MapperPlugin.java index 5124c94e545c0..401c014488f88 100644 --- a/server/src/main/java/org/elasticsearch/plugins/MapperPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/MapperPlugin.java @@ -8,6 +8,7 @@ package org.elasticsearch.plugins; +import org.elasticsearch.core.Predicates; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.RuntimeField; @@ -69,7 +70,7 @@ default Function> getFieldFilter() { * The default field predicate applied, which doesn't filter anything. That means that by default get mappings, get index * get field mappings and field capabilities API will return every field that's present in the mappings. */ - Predicate NOOP_FIELD_PREDICATE = field -> true; + Predicate NOOP_FIELD_PREDICATE = Predicates.always(); /** * The default field filter applied, which doesn't filter anything. That means that by default get mappings, get index diff --git a/server/src/main/java/org/elasticsearch/plugins/internal/RestExtension.java b/server/src/main/java/org/elasticsearch/plugins/internal/RestExtension.java index 4864e6bf31222..175d10a096b55 100644 --- a/server/src/main/java/org/elasticsearch/plugins/internal/RestExtension.java +++ b/server/src/main/java/org/elasticsearch/plugins/internal/RestExtension.java @@ -8,6 +8,7 @@ package org.elasticsearch.plugins.internal; +import org.elasticsearch.core.Predicates; import org.elasticsearch.rest.RestHandler; import org.elasticsearch.rest.action.cat.AbstractCatAction; @@ -38,12 +39,12 @@ static RestExtension allowAll() { return new RestExtension() { @Override public Predicate getCatActionsFilter() { - return action -> true; + return Predicates.always(); } @Override public Predicate getActionsFilter() { - return handler -> true; + return Predicates.always(); } }; } diff --git a/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java b/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java index c88bbcfa91b98..37f1850c1fb2d 100644 --- a/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.component.LifecycleListener; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; @@ -23,11 +24,13 @@ import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotId; +import org.elasticsearch.snapshots.SnapshotInfo; import java.io.IOException; import java.util.Collection; import java.util.Set; import java.util.concurrent.Executor; +import java.util.function.BooleanSupplier; public class FilterRepository implements Repository { @@ -47,8 +50,14 @@ public RepositoryMetadata getMetadata() { } @Override - public void getSnapshotInfo(GetSnapshotInfoContext context) { - in.getSnapshotInfo(context); + public void getSnapshotInfo( + Collection snapshotIds, + boolean abortOnFailure, + BooleanSupplier isCancelled, + CheckedConsumer consumer, + ActionListener listener + ) { + in.getSnapshotInfo(snapshotIds, abortOnFailure, isCancelled, consumer, listener); } @Override diff --git a/server/src/main/java/org/elasticsearch/repositories/InvalidRepository.java b/server/src/main/java/org/elasticsearch/repositories/InvalidRepository.java index 6bd967d84c89b..948ae747e11a9 100644 --- a/server/src/main/java/org/elasticsearch/repositories/InvalidRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/InvalidRepository.java @@ -15,6 +15,7 @@ import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; @@ -22,10 +23,12 @@ import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotId; +import org.elasticsearch.snapshots.SnapshotInfo; import java.io.IOException; import java.util.Collection; import java.util.concurrent.Executor; +import java.util.function.BooleanSupplier; /** * Represents a repository that exists in the cluster state but could not be instantiated on a node, typically due to invalid configuration. @@ -54,8 +57,14 @@ public RepositoryMetadata getMetadata() { } @Override - public void getSnapshotInfo(GetSnapshotInfoContext context) { - throw createCreationException(); + public void getSnapshotInfo( + Collection snapshotIds, + boolean abortOnFailure, + BooleanSupplier isCancelled, + CheckedConsumer consumer, + ActionListener listener + ) { + listener.onFailure(createCreationException()); } @Override diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesMetrics.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesMetrics.java index b4d79d89ec4c6..50aa7881cd2b6 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesMetrics.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesMetrics.java @@ -13,6 +13,7 @@ import org.elasticsearch.telemetry.metric.MeterRegistry; public record RepositoriesMetrics( + MeterRegistry meterRegistry, LongCounter requestCounter, LongCounter exceptionCounter, LongCounter throttleCounter, @@ -36,6 +37,7 @@ public record RepositoriesMetrics( public RepositoriesMetrics(MeterRegistry meterRegistry) { this( + meterRegistry, meterRegistry.registerLongCounter(METRIC_REQUESTS_TOTAL, "repository request counter", "unit"), meterRegistry.registerLongCounter(METRIC_EXCEPTIONS_TOTAL, "repository request exception counter", "unit"), meterRegistry.registerLongCounter(METRIC_THROTTLES_TOTAL, "repository request throttle counter", "unit"), diff --git a/server/src/main/java/org/elasticsearch/repositories/Repository.java b/server/src/main/java/org/elasticsearch/repositories/Repository.java index 5782dedf3cfbc..a90b0a217285c 100644 --- a/server/src/main/java/org/elasticsearch/repositories/Repository.java +++ b/server/src/main/java/org/elasticsearch/repositories/Repository.java @@ -15,6 +15,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; @@ -31,6 +32,7 @@ import java.util.List; import java.util.Set; import java.util.concurrent.Executor; +import java.util.function.BooleanSupplier; import java.util.function.Function; /** @@ -70,11 +72,24 @@ default Repository create(RepositoryMetadata metadata, Function snapshotIds, + boolean abortOnFailure, + BooleanSupplier isCancelled, + CheckedConsumer consumer, + ActionListener listener + ); /** * Reads a single snapshot description from the repository @@ -83,7 +98,7 @@ default Repository create(RepositoryMetadata metadata, Function listener) { - getSnapshotInfo(new GetSnapshotInfoContext(List.of(snapshotId), true, () -> false, (context, snapshotInfo) -> { + getSnapshotInfo(List.of(snapshotId), true, () -> false, snapshotInfo -> { assert Repository.assertSnapshotMetaThread(); listener.onResponse(snapshotInfo); }, new ActionListener<>() { @@ -96,7 +111,7 @@ public void onResponse(Void o) { public void onFailure(Exception e) { listener.onFailure(e); } - })); + }); } /** diff --git a/server/src/main/java/org/elasticsearch/repositories/ResolvedRepositories.java b/server/src/main/java/org/elasticsearch/repositories/ResolvedRepositories.java new file mode 100644 index 0000000000000..ab4821ad942b0 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/repositories/ResolvedRepositories.java @@ -0,0 +1,81 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.repositories; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.RepositoriesMetadata; +import org.elasticsearch.cluster.metadata.RepositoryMetadata; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.regex.Regex; + +import java.util.ArrayList; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Set; + +/** + * The result of calling {@link #resolve(ClusterState, String[])} to resolve a description of some snapshot repositories (from a path + * component of a request to the get-repositories or get-snapshots APIs) against the known repositories in the cluster state: the + * {@link RepositoryMetadata} for the extant repositories that match the description, together with a list of the parts of the description + * that failed to match any known repository. + * + * @param repositoryMetadata The {@link RepositoryMetadata} for the repositories that matched the description. + * @param missing The parts of the description which matched no repositories. + */ +public record ResolvedRepositories(List repositoryMetadata, List missing) { + + public static final String ALL_PATTERN = "_all"; + + public static boolean isMatchAll(String[] patterns) { + return patterns.length == 0 + || (patterns.length == 1 && (ALL_PATTERN.equalsIgnoreCase(patterns[0]) || Regex.isMatchAllPattern(patterns[0]))); + } + + public static ResolvedRepositories resolve(ClusterState state, String[] patterns) { + final var repositories = RepositoriesMetadata.get(state); + if (isMatchAll(patterns)) { + return new ResolvedRepositories(repositories.repositories(), List.of()); + } + + final List missingRepositories = new ArrayList<>(); + final List includePatterns = new ArrayList<>(); + final List excludePatterns = new ArrayList<>(); + boolean seenWildcard = false; + for (final var pattern : patterns) { + if (seenWildcard && pattern.length() > 1 && pattern.startsWith("-")) { + excludePatterns.add(pattern.substring(1)); + } else { + if (Regex.isSimpleMatchPattern(pattern)) { + seenWildcard = true; + } else { + if (repositories.repository(pattern) == null) { + missingRepositories.add(pattern); + } + } + includePatterns.add(pattern); + } + } + final var excludes = excludePatterns.toArray(Strings.EMPTY_ARRAY); + final Set repositoryListBuilder = new LinkedHashSet<>(); // to keep insertion order + for (String repositoryOrPattern : includePatterns) { + for (RepositoryMetadata repository : repositories.repositories()) { + if (repositoryListBuilder.contains(repository) == false + && Regex.simpleMatch(repositoryOrPattern, repository.name()) + && Regex.simpleMatch(excludes, repository.name()) == false) { + repositoryListBuilder.add(repository); + } + } + } + return new ResolvedRepositories(List.copyOf(repositoryListBuilder), missingRepositories); + } + + public boolean hasMissingRepositories() { + return missing.isEmpty() == false; + } +} diff --git a/server/src/main/java/org/elasticsearch/repositories/UnknownTypeRepository.java b/server/src/main/java/org/elasticsearch/repositories/UnknownTypeRepository.java index 30f167d8c5cf6..7821c865e166c 100644 --- a/server/src/main/java/org/elasticsearch/repositories/UnknownTypeRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/UnknownTypeRepository.java @@ -15,6 +15,7 @@ import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; @@ -22,10 +23,12 @@ import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotId; +import org.elasticsearch.snapshots.SnapshotInfo; import java.io.IOException; import java.util.Collection; import java.util.concurrent.Executor; +import java.util.function.BooleanSupplier; /** * This class represents a repository that could not be initialized due to unknown type. @@ -52,8 +55,14 @@ public RepositoryMetadata getMetadata() { } @Override - public void getSnapshotInfo(GetSnapshotInfoContext context) { - throw createUnknownTypeException(); + public void getSnapshotInfo( + Collection snapshotIds, + boolean abortOnFailure, + BooleanSupplier isCancelled, + CheckedConsumer consumer, + ActionListener listener + ) { + listener.onFailure(createUnknownTypeException()); } @Override diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index b8b0498d95125..52cfa2fd5275f 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -97,7 +97,6 @@ import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.repositories.FinalizeSnapshotContext; -import org.elasticsearch.repositories.GetSnapshotInfoContext; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.IndexMetaDataGenerations; import org.elasticsearch.repositories.RepositoriesService; @@ -151,6 +150,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BooleanSupplier; import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Supplier; @@ -1778,7 +1778,20 @@ public void onAfter() { } @Override - public void getSnapshotInfo(GetSnapshotInfoContext context) { + public void getSnapshotInfo( + Collection snapshotIds, + boolean abortOnFailure, + BooleanSupplier isCancelled, + CheckedConsumer consumer, + ActionListener listener + ) { + final var context = new GetSnapshotInfoContext(snapshotIds, abortOnFailure, isCancelled, (ctx, sni) -> { + try { + consumer.accept(sni); + } catch (Exception e) { + ctx.onFailure(e); + } + }, listener); // put snapshot info downloads into a task queue instead of pushing them all into the queue to not completely monopolize the // snapshot meta pool for a single request final int workers = Math.min(threadPool.info(ThreadPool.Names.SNAPSHOT_META).getMax(), context.snapshotIds().size()); @@ -2617,9 +2630,11 @@ public String toString() { if (snapshotIdsWithMissingDetails.isEmpty() == false) { final Map extraDetailsMap = new ConcurrentHashMap<>(); getSnapshotInfo( - new GetSnapshotInfoContext(snapshotIdsWithMissingDetails, false, () -> false, (context, snapshotInfo) -> { - extraDetailsMap.put(snapshotInfo.snapshotId(), SnapshotDetails.fromSnapshotInfo(snapshotInfo)); - }, ActionListener.runAfter(new ActionListener<>() { + snapshotIdsWithMissingDetails, + false, + () -> false, + snapshotInfo -> extraDetailsMap.put(snapshotInfo.snapshotId(), SnapshotDetails.fromSnapshotInfo(snapshotInfo)), + ActionListener.runAfter(new ActionListener<>() { @Override public void onResponse(Void aVoid) { logger.info( @@ -2636,7 +2651,7 @@ public void onResponse(Void aVoid) { public void onFailure(Exception e) { logger.warn("Failure when trying to load missing details from snapshot metadata", e); } - }, () -> filterRepositoryDataStep.onResponse(repositoryData.withExtraDetails(extraDetailsMap)))) + }, () -> filterRepositoryDataStep.onResponse(repositoryData.withExtraDetails(extraDetailsMap))) ); } else { filterRepositoryDataStep.onResponse(repositoryData); diff --git a/server/src/main/java/org/elasticsearch/repositories/GetSnapshotInfoContext.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/GetSnapshotInfoContext.java similarity index 92% rename from server/src/main/java/org/elasticsearch/repositories/GetSnapshotInfoContext.java rename to server/src/main/java/org/elasticsearch/repositories/blobstore/GetSnapshotInfoContext.java index ec8777e71ba9b..96782bca31a15 100644 --- a/server/src/main/java/org/elasticsearch/repositories/GetSnapshotInfoContext.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/GetSnapshotInfoContext.java @@ -5,12 +5,13 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -package org.elasticsearch.repositories; +package org.elasticsearch.repositories.blobstore; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.util.concurrent.CountDown; +import org.elasticsearch.repositories.Repository; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.threadpool.ThreadPool; @@ -21,9 +22,9 @@ import java.util.function.BooleanSupplier; /** - * Describes the context of fetching one or more {@link SnapshotInfo} via {@link Repository#getSnapshotInfo(GetSnapshotInfoContext)}. + * Describes the context of fetching one or more {@link SnapshotInfo} via {@link Repository#getSnapshotInfo}. */ -public final class GetSnapshotInfoContext implements ActionListener { +final class GetSnapshotInfoContext implements ActionListener { private static final Logger logger = LogManager.getLogger(GetSnapshotInfoContext.class); @@ -59,7 +60,7 @@ public final class GetSnapshotInfoContext implements ActionListener snapshotIds, boolean abortOnFailure, BooleanSupplier isCancelled, @@ -77,28 +78,28 @@ public GetSnapshotInfoContext( this.doneListener = listener; } - public List snapshotIds() { + List snapshotIds() { return snapshotIds; } /** * @return true if fetching {@link SnapshotInfo} should be stopped after encountering any exception */ - public boolean abortOnFailure() { + boolean abortOnFailure() { return abortOnFailure; } /** * @return true if fetching {@link SnapshotInfo} has been cancelled */ - public boolean isCancelled() { + boolean isCancelled() { return isCancelled.getAsBoolean(); } /** * @return true if fetching {@link SnapshotInfo} is either complete or should be stopped because of an error */ - public boolean done() { + boolean done() { return counter.isCountedDown(); } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/MeteredBlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/MeteredBlobStoreRepository.java index 6ecab2f8c77f2..c5ea99b0e5c14 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/MeteredBlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/MeteredBlobStoreRepository.java @@ -14,7 +14,6 @@ import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.indices.recovery.RecoverySettings; -import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.RepositoryInfo; import org.elasticsearch.repositories.RepositoryStatsSnapshot; import org.elasticsearch.threadpool.ThreadPool; @@ -24,7 +23,6 @@ public abstract class MeteredBlobStoreRepository extends BlobStoreRepository { private final RepositoryInfo repositoryInfo; - protected final RepositoriesMetrics repositoriesMetrics; public MeteredBlobStoreRepository( RepositoryMetadata metadata, @@ -33,11 +31,9 @@ public MeteredBlobStoreRepository( BigArrays bigArrays, RecoverySettings recoverySettings, BlobPath basePath, - Map location, - RepositoriesMetrics repositoriesMetrics + Map location ) { super(metadata, namedXContentRegistry, clusterService, bigArrays, recoverySettings, basePath); - this.repositoriesMetrics = repositoriesMetrics; ThreadPool threadPool = clusterService.getClusterApplierService().threadPool(); this.repositoryInfo = new RepositoryInfo( UUIDs.randomBase64UUID(), diff --git a/server/src/main/java/org/elasticsearch/rest/RestRequest.java b/server/src/main/java/org/elasticsearch/rest/RestRequest.java index e3e27ddf5cf5b..66ba0c743813e 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestRequest.java +++ b/server/src/main/java/org/elasticsearch/rest/RestRequest.java @@ -48,11 +48,7 @@ public class RestRequest implements ToXContent.Params, Traceable { - @Deprecated() - // TODO remove once Serverless is updated - public static final String RESPONSE_RESTRICTED = "responseRestricted"; - // TODO rename to `pathRestricted` once Serverless is updated - public static final String PATH_RESTRICTED = "responseRestricted"; + public static final String PATH_RESTRICTED = "pathRestricted"; // tchar pattern as defined by RFC7230 section 3.2.6 private static final Pattern TCHAR_PATTERN = Pattern.compile("[a-zA-Z0-9!#$%&'*+\\-.\\^_`|~]+"); @@ -629,12 +625,6 @@ public void markPathRestricted(String restriction) { consumedParams.add(PATH_RESTRICTED); } - @Deprecated() - // TODO remove once Serverless is updated - public void markResponseRestricted(String restriction) { - markPathRestricted(restriction); - } - @Override public String getSpanId() { return "rest-" + getRequestId(); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteDesiredBalanceAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteDesiredBalanceAction.java index 66382c20cae82..f0b516a876622 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteDesiredBalanceAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteDesiredBalanceAction.java @@ -13,11 +13,14 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; import java.util.List; +@ServerlessScope(Scope.INTERNAL) public class RestDeleteDesiredBalanceAction extends BaseRestHandler { @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetDesiredBalanceAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetDesiredBalanceAction.java index a93c1e3d04fd6..0bb7cc5ff7473 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetDesiredBalanceAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetDesiredBalanceAction.java @@ -13,11 +13,14 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.io.IOException; import java.util.List; +@ServerlessScope(Scope.INTERNAL) public class RestGetDesiredBalanceAction extends BaseRestHandler { @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java index 5cc77d3d50a01..7482ae7683b4a 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java @@ -9,6 +9,7 @@ package org.elasticsearch.rest.action.admin.cluster; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; +import org.elasticsearch.action.admin.cluster.snapshots.get.SnapshotSortKey; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.rest.BaseRestHandler; @@ -59,7 +60,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC GetSnapshotsRequest getSnapshotsRequest = new GetSnapshotsRequest(repositories).snapshots(snapshots); getSnapshotsRequest.ignoreUnavailable(request.paramAsBoolean("ignore_unavailable", getSnapshotsRequest.ignoreUnavailable())); getSnapshotsRequest.verbose(request.paramAsBoolean("verbose", getSnapshotsRequest.verbose())); - final GetSnapshotsRequest.SortBy sort = GetSnapshotsRequest.SortBy.of(request.param("sort", getSnapshotsRequest.sort().toString())); + final SnapshotSortKey sort = SnapshotSortKey.of(request.param("sort", getSnapshotsRequest.sort().toString())); getSnapshotsRequest.sort(sort); final int size = request.paramAsInt("size", getSnapshotsRequest.size()); getSnapshotsRequest.size(size); @@ -67,7 +68,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC getSnapshotsRequest.offset(offset); final String afterString = request.param("after"); if (afterString != null) { - getSnapshotsRequest.after(GetSnapshotsRequest.After.fromQueryParam(afterString)); + getSnapshotsRequest.after(SnapshotSortKey.decodeAfterQueryParam(afterString)); } final String fromSortValue = request.param("from_sort_value"); if (fromSortValue != null) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java index 068c809554631..570fb0ebc7c77 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java @@ -18,6 +18,7 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationStats; import org.elasticsearch.common.Strings; import org.elasticsearch.common.Table; import org.elasticsearch.common.unit.ByteSizeValue; @@ -37,6 +38,8 @@ @ServerlessScope(Scope.INTERNAL) public class RestAllocationAction extends AbstractCatAction { + private static final String UNASSIGNED = "UNASSIGNED"; + @Override public List routes() { return List.of(new Route(GET, "/_cat/allocation"), new Route(GET, "/_cat/allocation/{nodes}")); @@ -67,9 +70,10 @@ public void processResponse(final ClusterStateResponse state) { statsRequest.setIncludeShardsStats(false); statsRequest.clear() .addMetric(NodesStatsRequestParameters.Metric.FS.metricName()) + .addMetric(NodesStatsRequestParameters.Metric.ALLOCATIONS.metricName()) .indices(new CommonStatsFlags(CommonStatsFlags.Flag.Store)); - client.admin().cluster().nodesStats(statsRequest, new RestResponseListener(channel) { + client.admin().cluster().nodesStats(statsRequest, new RestResponseListener<>(channel) { @Override public RestResponse buildResponse(NodesStatsResponse stats) throws Exception { Table tab = buildTable(request, state, stats); @@ -86,6 +90,9 @@ protected Table getTableWithHeader(final RestRequest request) { final Table table = new Table(); table.startHeaders(); table.addCell("shards", "alias:s;text-align:right;desc:number of shards on node"); + table.addCell("shards.undesired", "text-align:right;desc:number of shards that are scheduled to be moved elsewhere in the cluster"); + table.addCell("write_load.forecast", "alias:wlf,writeLoadForecast;text-align:right;desc:sum of index write load forecasts"); + table.addCell("disk.indices.forecast", "alias:dif,diskIndicesForecast;text-align:right;desc:sum of shard size forecasts"); table.addCell("disk.indices", "alias:di,diskIndices;text-align:right;desc:disk used by ES indices"); table.addCell("disk.used", "alias:du,diskUsed;text-align:right;desc:disk used (total, not just ES)"); table.addCell("disk.avail", "alias:da,diskAvail;text-align:right;desc:disk available"); @@ -100,22 +107,17 @@ protected Table getTableWithHeader(final RestRequest request) { } private Table buildTable(RestRequest request, final ClusterStateResponse state, final NodesStatsResponse stats) { - final Map allocs = new HashMap<>(); + final Map shardCounts = new HashMap<>(); for (ShardRouting shard : state.getState().routingTable().allShardsIterator()) { - String nodeId = "UNASSIGNED"; - if (shard.assignedToNode()) { - nodeId = shard.currentNodeId(); - } - allocs.merge(nodeId, 1, Integer::sum); + String nodeId = shard.assignedToNode() ? shard.currentNodeId() : UNASSIGNED; + shardCounts.merge(nodeId, 1, Integer::sum); } Table table = getTableWithHeader(request); for (NodeStats nodeStats : stats.getNodes()) { DiscoveryNode node = nodeStats.getNode(); - int shardCount = allocs.getOrDefault(node.getId(), 0); - ByteSizeValue total = nodeStats.getFs().getTotal().getTotal(); ByteSizeValue avail = nodeStats.getFs().getTotal().getAvailable(); // if we don't know how much we use (non data nodes), it means 0 @@ -127,9 +129,13 @@ private Table buildTable(RestRequest request, final ClusterStateResponse state, diskPercent = (short) (used * 100 / (used + avail.getBytes())); } } + NodeAllocationStats nodeAllocationStats = nodeStats.getNodeAllocationStats(); table.startRow(); - table.addCell(shardCount); + table.addCell(shardCounts.getOrDefault(node.getId(), 0)); + table.addCell(nodeAllocationStats != null ? nodeAllocationStats.undesiredShards() : null); + table.addCell(nodeAllocationStats != null ? nodeAllocationStats.forecastedIngestLoad() : null); + table.addCell(nodeAllocationStats != null ? ByteSizeValue.ofBytes(nodeAllocationStats.forecastedDiskUsage()) : null); table.addCell(nodeStats.getIndices().getStore().size()); table.addCell(used < 0 ? null : ByteSizeValue.ofBytes(used)); table.addCell(avail.getBytes() < 0 ? null : avail); @@ -142,10 +148,12 @@ private Table buildTable(RestRequest request, final ClusterStateResponse state, table.endRow(); } - final String UNASSIGNED = "UNASSIGNED"; - if (allocs.containsKey(UNASSIGNED)) { + if (shardCounts.containsKey(UNASSIGNED)) { table.startRow(); - table.addCell(allocs.get(UNASSIGNED)); + table.addCell(shardCounts.get(UNASSIGNED)); + table.addCell(null); + table.addCell(null); + table.addCell(null); table.addCell(null); table.addCell(null); table.addCell(null); @@ -160,5 +168,4 @@ private Table buildTable(RestRequest request, final ClusterStateResponse state, return table; } - } diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java index 39045a99aa4a2..e5e0f9ee926f3 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java @@ -57,7 +57,6 @@ import org.elasticsearch.search.suggest.completion.CompletionStats; import java.util.List; -import java.util.Locale; import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.rest.RestRequest.Method.GET; @@ -316,7 +315,7 @@ protected Table getTableWithHeader(final RestRequest request) { table.addCell( "shard_stats.total_count", - "alias:sstc,shardStatsTotalCount;default:false;text-align:right;desc:number of shards assigned" + "alias:sstc,shards,shardStatsTotalCount;default:false;text-align:right;desc:number of shards assigned" ); table.addCell("mappings.total_count", "alias:mtc,mappingsTotalCount;default:false;text-align:right;desc:number of mappings"); @@ -375,14 +374,14 @@ Table buildTable( ByteSizeValue diskTotal = null; ByteSizeValue diskUsed = null; ByteSizeValue diskAvailable = null; - String diskUsedPercent = null; + RestTable.FormattedDouble diskUsedPercent = null; if (fsInfo != null) { diskTotal = fsInfo.getTotal().getTotal(); diskAvailable = fsInfo.getTotal().getAvailable(); diskUsed = ByteSizeValue.ofBytes(diskTotal.getBytes() - diskAvailable.getBytes()); double diskUsedRatio = diskTotal.getBytes() == 0 ? 1.0 : (double) diskUsed.getBytes() / diskTotal.getBytes(); - diskUsedPercent = String.format(Locale.ROOT, "%.2f", 100.0 * diskUsedRatio); + diskUsedPercent = RestTable.FormattedDouble.format2DecimalPlaces(100.0 * diskUsedRatio); } table.addCell(diskTotal); table.addCell(diskUsed); @@ -408,17 +407,17 @@ Table buildTable( table.addCell( hasLoadAverage == false || osStats.getCpu().getLoadAverage()[0] == -1 ? null - : String.format(Locale.ROOT, "%.2f", osStats.getCpu().getLoadAverage()[0]) + : RestTable.FormattedDouble.format2DecimalPlaces(osStats.getCpu().getLoadAverage()[0]) ); table.addCell( hasLoadAverage == false || osStats.getCpu().getLoadAverage()[1] == -1 ? null - : String.format(Locale.ROOT, "%.2f", osStats.getCpu().getLoadAverage()[1]) + : RestTable.FormattedDouble.format2DecimalPlaces(osStats.getCpu().getLoadAverage()[1]) ); table.addCell( hasLoadAverage == false || osStats.getCpu().getLoadAverage()[2] == -1 ? null - : String.format(Locale.ROOT, "%.2f", osStats.getCpu().getLoadAverage()[2]) + : RestTable.FormattedDouble.format2DecimalPlaces(osStats.getCpu().getLoadAverage()[2]) ); table.addCell(jvmStats == null ? null : jvmStats.getUptime()); diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java index 3f13205aad6b4..9b4c6534a452f 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java @@ -9,7 +9,6 @@ package org.elasticsearch.rest.action.cat; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.admin.cluster.repositories.get.TransportGetRepositoriesAction; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; import org.elasticsearch.client.internal.node.NodeClient; @@ -17,6 +16,7 @@ import org.elasticsearch.common.Table; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.repositories.ResolvedRepositories; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.Scope; @@ -50,7 +50,7 @@ public String getName() { @Override protected RestChannelConsumer doCatRequest(final RestRequest request, NodeClient client) { - final String[] matchAll = { TransportGetRepositoriesAction.ALL_PATTERN }; + final String[] matchAll = { ResolvedRepositories.ALL_PATTERN }; GetSnapshotsRequest getSnapshotsRequest = new GetSnapshotsRequest().repositories(request.paramAsStringArray("repository", matchAll)) .snapshots(matchAll); diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestTable.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestTable.java index 6845fec4db6fe..cfe5d6d2aef39 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestTable.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestTable.java @@ -496,4 +496,24 @@ public boolean isReversed() { return reverse; } } + + /** + * A formatted number, such that it sorts according to its numeric value but captures a specific string representation too + */ + record FormattedDouble(String displayValue, double numericValue) implements Comparable { + + static FormattedDouble format2DecimalPlaces(double numericValue) { + return new FormattedDouble(Strings.format("%.2f", numericValue), numericValue); + } + + @Override + public int compareTo(FormattedDouble other) { + return Double.compare(numericValue, other.numericValue); + } + + @Override + public String toString() { + return displayValue; + } + } } diff --git a/server/src/main/java/org/elasticsearch/search/SearchHit.java b/server/src/main/java/org/elasticsearch/search/SearchHit.java index fe11aa8af39f4..60ced289929a0 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchHit.java +++ b/server/src/main/java/org/elasticsearch/search/SearchHit.java @@ -204,7 +204,7 @@ public SearchHit( this.innerHits = innerHits; this.documentFields = documentFields; this.metaFields = metaFields; - this.refCounted = refCounted == null ? LeakTracker.wrap(new SimpleRefCounted()) : ALWAYS_REFERENCED; + this.refCounted = refCounted == null ? LeakTracker.wrap(new SimpleRefCounted()) : refCounted; } public static SearchHit readFrom(StreamInput in, boolean pooled) throws IOException { @@ -233,8 +233,10 @@ public static SearchHit readFrom(StreamInput in, boolean pooled) throws IOExcept } final Map documentFields = in.readMap(DocumentField::new); final Map metaFields = in.readMap(DocumentField::new); - final Map highlightFields = in.readMapValues(HighlightField::new, HighlightField::name); - final SearchSortValues sortValues = new SearchSortValues(in); + Map highlightFields = in.readMapValues(HighlightField::new, HighlightField::name); + highlightFields = highlightFields.isEmpty() ? null : unmodifiableMap(highlightFields); + + final SearchSortValues sortValues = SearchSortValues.readFrom(in); final Map matchedQueries; if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { @@ -257,12 +259,17 @@ public static SearchHit readFrom(StreamInput in, boolean pooled) throws IOExcept index = shardTarget.getIndex(); clusterAlias = shardTarget.getClusterAlias(); } + + boolean isPooled = pooled && source != null; final Map innerHits; int size = in.readVInt(); if (size > 0) { innerHits = Maps.newMapWithExpectedSize(size); for (int i = 0; i < size; i++) { - innerHits.put(in.readString(), SearchHits.readFrom(in, pooled)); + var key = in.readString(); + var nestedHits = SearchHits.readFrom(in, pooled); + innerHits.put(key, nestedHits); + isPooled = isPooled || nestedHits.isPooled(); } } else { innerHits = null; @@ -277,7 +284,7 @@ public static SearchHit readFrom(StreamInput in, boolean pooled) throws IOExcept seqNo, primaryTerm, source, - unmodifiableMap(highlightFields), + highlightFields, sortValues, matchedQueries, explanation, @@ -288,7 +295,7 @@ public static SearchHit readFrom(StreamInput in, boolean pooled) throws IOExcept innerHits, documentFields, metaFields, - pooled ? null : ALWAYS_REFERENCED + isPooled ? null : ALWAYS_REFERENCED ); } diff --git a/server/src/main/java/org/elasticsearch/search/SearchHits.java b/server/src/main/java/org/elasticsearch/search/SearchHits.java index ce8ccf4b7f0e6..d559fc60fa72d 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchHits.java +++ b/server/src/main/java/org/elasticsearch/search/SearchHits.java @@ -132,24 +132,31 @@ public static SearchHits readFrom(StreamInput in, boolean pooled) throws IOExcep final float maxScore = in.readFloat(); int size = in.readVInt(); final SearchHit[] hits; + boolean isPooled = false; if (size == 0) { hits = EMPTY; } else { hits = new SearchHit[size]; for (int i = 0; i < hits.length; i++) { - hits[i] = SearchHit.readFrom(in, pooled); + var hit = SearchHit.readFrom(in, pooled); + hits[i] = hit; + isPooled = isPooled || hit.isPooled(); } } var sortFields = in.readOptionalArray(Lucene::readSortField, SortField[]::new); var collapseField = in.readOptionalString(); var collapseValues = in.readOptionalArray(Lucene::readSortValue, Object[]::new); - if (pooled) { + if (isPooled) { return new SearchHits(hits, totalHits, maxScore, sortFields, collapseField, collapseValues); } else { return unpooled(hits, totalHits, maxScore, sortFields, collapseField, collapseValues); } } + public boolean isPooled() { + return refCounted != ALWAYS_REFERENCED; + } + @Override public void writeTo(StreamOutput out) throws IOException { assert hasReferences(); diff --git a/server/src/main/java/org/elasticsearch/search/SearchSortValues.java b/server/src/main/java/org/elasticsearch/search/SearchSortValues.java index b82e6632ca9ec..38bc705bdf5ae 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchSortValues.java +++ b/server/src/main/java/org/elasticsearch/search/SearchSortValues.java @@ -32,8 +32,7 @@ public class SearchSortValues implements ToXContentFragment, Writeable { private final Object[] rawSortValues; SearchSortValues(Object[] sortValues) { - this.formattedSortValues = Objects.requireNonNull(sortValues, "sort values must not be empty"); - this.rawSortValues = EMPTY_ARRAY; + this(Objects.requireNonNull(sortValues, "sort values must not be empty"), EMPTY_ARRAY); } public SearchSortValues(Object[] rawSortValues, DocValueFormat[] sortValueFormats) { @@ -52,9 +51,18 @@ public SearchSortValues(Object[] rawSortValues, DocValueFormat[] sortValueFormat } } - SearchSortValues(StreamInput in) throws IOException { - this.formattedSortValues = in.readArray(Lucene::readSortValue, Object[]::new); - this.rawSortValues = in.readArray(Lucene::readSortValue, Object[]::new); + public static SearchSortValues readFrom(StreamInput in) throws IOException { + Object[] formattedSortValues = in.readArray(Lucene::readSortValue, Object[]::new); + Object[] rawSortValues = in.readArray(Lucene::readSortValue, Object[]::new); + if (formattedSortValues.length == 0 && rawSortValues.length == 0) { + return EMPTY; + } + return new SearchSortValues(formattedSortValues, rawSortValues); + } + + private SearchSortValues(Object[] formattedSortValues, Object[] rawSortValues) { + this.formattedSortValues = formattedSortValues; + this.rawSortValues = rawSortValues; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationInitializationException.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationInitializationException.java index f477cd884f79a..efe3af7ca27bf 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationInitializationException.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationInitializationException.java @@ -17,10 +17,6 @@ */ public class AggregationInitializationException extends ElasticsearchException { - public AggregationInitializationException(String msg) { - super(msg); - } - public AggregationInitializationException(String msg, Throwable cause) { super(msg, cause); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterByFilterAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterByFilterAggregator.java index 3a2cce587f34f..59b591094ae3a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterByFilterAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterByFilterAggregator.java @@ -296,13 +296,15 @@ class MatchCollector implements LeafCollector { @Override public void collect(int docId) throws IOException { - collectBucket(subCollector, docId, filterOrd); + collectExistingBucket(subCollector, docId, filterOrd); } @Override - public void setScorer(Scorable scorer) throws IOException {} + public void setScorer(Scorable scorer) {} } MatchCollector collector = new MatchCollector(); + // create the buckets so we can call collectExistingBucket + grow(filters().size() + 1); filters().get(0).collect(aggCtx.getLeafReaderContext(), collector, live); for (int filterOrd = 1; filterOrd < filters().size(); filterOrd++) { collector.subCollector = collectableSubAggregators.getLeafCollector(aggCtx); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java index 3beec89853b76..ce3031d4cddf8 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java @@ -43,10 +43,11 @@ public LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx, if (scorer == null) { return LeafBucketCollector.NO_OP_COLLECTOR; } + grow(1); scorer.score(new LeafCollector() { @Override public void collect(int doc) throws IOException { - collectBucket(sub, doc, 0); + collectExistingBucket(sub, doc, 0); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java index e8201ffb86317..3478773464feb 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java @@ -8,9 +8,11 @@ package org.elasticsearch.search.aggregations.bucket.histogram; -import org.apache.lucene.util.PriorityQueue; +import org.apache.lucene.util.NumericUtils; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.util.LongObjectPagedHashMap; +import org.elasticsearch.core.Releasables; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.AggregationReduceContext; import org.elasticsearch.search.aggregations.AggregatorReducer; @@ -18,7 +20,7 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.KeyComparable; -import org.elasticsearch.search.aggregations.bucket.IteratorAndCurrent; +import org.elasticsearch.search.aggregations.bucket.MultiBucketAggregatorsReducer; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; import org.elasticsearch.search.aggregations.support.SamplingContext; import org.elasticsearch.xcontent.XContentBuilder; @@ -324,59 +326,6 @@ private Bucket reduceBucket(List buckets, AggregationReduceContext conte return new Bucket(centroid, bounds, docCount, format, aggs); } - public List reduceBuckets(List aggregations, AggregationReduceContext reduceContext) { - PriorityQueue> pq = new PriorityQueue<>(aggregations.size()) { - @Override - protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurrent b) { - return Double.compare(a.current().centroid, b.current().centroid) < 0; - } - }; - for (InternalVariableWidthHistogram histogram : aggregations) { - if (histogram.buckets.isEmpty() == false) { - pq.add(new IteratorAndCurrent<>(histogram.buckets.iterator())); - } - } - - List reducedBuckets = new ArrayList<>(); - if (pq.size() > 0) { - double key = pq.top().current().centroid(); - // list of buckets coming from different shards that have the same key - List currentBuckets = new ArrayList<>(); - do { - IteratorAndCurrent top = pq.top(); - - if (Double.compare(top.current().centroid(), key) != 0) { - // The key changes, reduce what we already buffered and reset the buffer for current buckets. - final Bucket reduced = reduceBucket(currentBuckets, reduceContext); - reduceContext.consumeBucketsAndMaybeBreak(1); - reducedBuckets.add(reduced); - currentBuckets.clear(); - key = top.current().centroid(); - } - - currentBuckets.add(top.current()); - - if (top.hasNext()) { - Bucket prev = top.current(); - top.next(); - assert top.current().compareKey(prev) >= 0 : "shards must return data sorted by centroid"; - pq.updateTop(); - } else { - pq.pop(); - } - } while (pq.size() > 0); - - if (currentBuckets.isEmpty() == false) { - final Bucket reduced = reduceBucket(currentBuckets, reduceContext); - reduceContext.consumeBucketsAndMaybeBreak(1); - reducedBuckets.add(reduced); - } - } - - mergeBucketsIfNeeded(reducedBuckets, targetNumBuckets, reduceContext); - return reducedBuckets; - } - static class BucketRange { int startIdx; int endIdx; @@ -530,16 +479,40 @@ private static void adjustBoundsForOverlappingBuckets(List buckets) { @Override protected AggregatorReducer getLeaderReducer(AggregationReduceContext reduceContext, int size) { return new AggregatorReducer() { - final List aggregations = new ArrayList<>(size); + + final LongObjectPagedHashMap bucketsReducer = new LongObjectPagedHashMap<>( + getBuckets().size(), + reduceContext.bigArrays() + ); @Override public void accept(InternalAggregation aggregation) { - aggregations.add((InternalVariableWidthHistogram) aggregation); + InternalVariableWidthHistogram histogram = (InternalVariableWidthHistogram) aggregation; + for (Bucket bucket : histogram.getBuckets()) { + long key = NumericUtils.doubleToSortableLong(bucket.centroid()); + ReducerAndExtraInfo reducer = bucketsReducer.get(key); + if (reducer == null) { + reducer = new ReducerAndExtraInfo(new MultiBucketAggregatorsReducer(reduceContext, size)); + bucketsReducer.put(key, reducer); + reduceContext.consumeBucketsAndMaybeBreak(1); + } + reducer.min[0] = Math.min(reducer.min[0], bucket.bounds.min); + reducer.max[0] = Math.max(reducer.max[0], bucket.bounds.max); + reducer.sum[0] += bucket.docCount * bucket.centroid; + reducer.reducer.accept(bucket); + } } @Override public InternalAggregation get() { - final List reducedBuckets = reduceBuckets(aggregations, reduceContext); + final List reducedBuckets = new ArrayList<>((int) bucketsReducer.size()); + bucketsReducer.iterator().forEachRemaining(entry -> { + final double centroid = entry.value.sum[0] / entry.value.reducer.getDocCount(); + final Bucket.BucketBounds bounds = new Bucket.BucketBounds(entry.value.min[0], entry.value.max[0]); + reducedBuckets.add(new Bucket(centroid, bounds, entry.value.reducer.getDocCount(), format, entry.value.reducer.get())); + }); + reducedBuckets.sort(Comparator.comparing(Bucket::centroid)); + mergeBucketsIfNeeded(reducedBuckets, targetNumBuckets, reduceContext); if (reduceContext.isFinalReduce()) { buckets.sort(Comparator.comparing(Bucket::min)); mergeBucketsWithSameMin(reducedBuckets, reduceContext); @@ -547,9 +520,21 @@ public InternalAggregation get() { } return new InternalVariableWidthHistogram(getName(), reducedBuckets, emptyBucketInfo, targetNumBuckets, format, metadata); } + + @Override + public void close() { + bucketsReducer.iterator().forEachRemaining(entry -> Releasables.close(entry.value.reducer)); + Releasables.close(bucketsReducer); + } }; } + private record ReducerAndExtraInfo(MultiBucketAggregatorsReducer reducer, double[] min, double[] max, double[] sum) { + private ReducerAndExtraInfo(MultiBucketAggregatorsReducer reducer) { + this(reducer, new double[] { Double.POSITIVE_INFINITY }, new double[] { Double.NEGATIVE_INFINITY }, new double[] { 0 }); + } + } + @Override public InternalAggregation finalizeSampling(SamplingContext samplingContext) { return new InternalVariableWidthHistogram( diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregator.java index be9fca9acdbb5..51901b422c861 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregator.java @@ -92,17 +92,29 @@ public ScoreMode scoreMode() { return super.scoreMode(); } + @FunctionalInterface + private interface BucketCollector { + void accept(LeafBucketCollector sub, int doc, long subBucketOrdinal) throws IOException; + } + @Override protected LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx, LeafBucketCollector sub) throws IOException { if (valuesSource == null) { return LeafBucketCollector.NO_OP_COLLECTOR; } + BucketCollector collector; + if (parent() == null) { + grow(ranges.length); + collector = this::collectExistingBucket; + } else { + collector = this::collectBucket; + } if (valuesSource instanceof ValuesSource.Bytes.WithOrdinals) { SortedSetDocValues values = ((ValuesSource.Bytes.WithOrdinals) valuesSource).ordinalsValues(aggCtx.getLeafReaderContext()); return new SortedSetRangeLeafCollector(values, ranges, sub) { @Override protected void doCollect(LeafBucketCollector sub, int doc, long bucket) throws IOException { - collectBucket(sub, doc, bucket); + collector.accept(sub, doc, bucket); } }; } else { @@ -110,7 +122,7 @@ protected void doCollect(LeafBucketCollector sub, int doc, long bucket) throws I return new SortedBinaryRangeLeafCollector(values, ranges, sub) { @Override protected void doCollect(LeafBucketCollector sub, int doc, long bucket) throws IOException { - collectBucket(sub, doc, bucket); + collector.accept(sub, doc, bucket); } }; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java index 7d7e1a1a03bc4..9640395712b23 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java @@ -715,8 +715,16 @@ static class NoOverlap extends NumericRangeAggregator { cardinality, metadata ); + if (parent == null) { + grow(ranges.length); + this.collector = this::collectExistingBucket; + } else { + this.collector = this::collectBucket; + } } + private final BucketCollector collector; + @Override protected int collect(LeafBucketCollector sub, int doc, double value, long owningBucketOrdinal, int lowBound) throws IOException { int lo = lowBound, hi = ranges.length - 1; @@ -727,7 +735,7 @@ protected int collect(LeafBucketCollector sub, int doc, double value, long ownin } else if (value >= ranges[mid].to) { lo = mid + 1; } else { - collectBucket(sub, doc, subBucketOrdinal(owningBucketOrdinal, mid)); + collector.accept(sub, doc, subBucketOrdinal(owningBucketOrdinal, mid)); // The next value must fall in the next bucket to be collected. return mid + 1; } @@ -736,6 +744,11 @@ protected int collect(LeafBucketCollector sub, int doc, double value, long ownin } } + @FunctionalInterface + private interface BucketCollector { + void accept(LeafBucketCollector sub, int doc, long subBucketOrdinal) throws IOException; + } + private static class Overlap extends NumericRangeAggregator { Overlap( String name, @@ -770,9 +783,16 @@ private static class Overlap extends NumericRangeAggregator { for (int i = 1; i < ranges.length; ++i) { maxTo[i] = Math.max(ranges[i].to, maxTo[i - 1]); } + if (parent == null) { + grow(ranges.length); + this.collector = this::collectExistingBucket; + } else { + this.collector = this::collectBucket; + } } private final double[] maxTo; + private final BucketCollector collector; @Override protected int collect(LeafBucketCollector sub, int doc, double value, long owningBucketOrdinal, int lowBound) throws IOException { @@ -817,7 +837,7 @@ protected int collect(LeafBucketCollector sub, int doc, double value, long ownin for (int i = startLo; i <= endHi; ++i) { if (ranges[i].matches(value)) { - collectBucket(sub, doc, subBucketOrdinal(owningBucketOrdinal, i)); + collector.accept(sub, doc, subBucketOrdinal(owningBucketOrdinal, i)); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/InternalRandomSampler.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/InternalRandomSampler.java index 4dde9cc67b975..68a1a22369d2a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/InternalRandomSampler.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/InternalRandomSampler.java @@ -8,6 +8,7 @@ package org.elasticsearch.search.aggregations.bucket.sampler.random; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Releasables; @@ -29,18 +30,21 @@ public class InternalRandomSampler extends InternalSingleBucketAggregation imple public static final String PARSER_NAME = "random_sampler"; private final int seed; + private final Integer shardSeed; private final double probability; InternalRandomSampler( String name, long docCount, int seed, + Integer shardSeed, double probability, InternalAggregations subAggregations, Map metadata ) { super(name, docCount, subAggregations, metadata); this.seed = seed; + this.shardSeed = shardSeed; this.probability = probability; } @@ -51,6 +55,11 @@ public InternalRandomSampler(StreamInput in) throws IOException { super(in); this.seed = in.readInt(); this.probability = in.readDouble(); + if (in.getTransportVersion().onOrAfter(TransportVersions.RANDOM_AGG_SHARD_SEED)) { + this.shardSeed = in.readOptionalInt(); + } else { + this.shardSeed = null; + } } @Override @@ -58,6 +67,9 @@ protected void doWriteTo(StreamOutput out) throws IOException { super.doWriteTo(out); out.writeInt(seed); out.writeDouble(probability); + if (out.getTransportVersion().onOrAfter(TransportVersions.RANDOM_AGG_SHARD_SEED)) { + out.writeOptionalInt(shardSeed); + } } @Override @@ -72,7 +84,7 @@ public String getType() { @Override protected InternalSingleBucketAggregation newAggregation(String name, long docCount, InternalAggregations subAggregations) { - return new InternalRandomSampler(name, docCount, seed, probability, subAggregations, metadata); + return new InternalRandomSampler(name, docCount, seed, shardSeed, probability, subAggregations, metadata); } @Override @@ -105,12 +117,15 @@ public void close() { } public SamplingContext buildContext() { - return new SamplingContext(probability, seed); + return new SamplingContext(probability, seed, shardSeed); } @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { builder.field(RandomSamplerAggregationBuilder.SEED.getPreferredName(), seed); + if (shardSeed != null) { + builder.field(RandomSamplerAggregationBuilder.SHARD_SEED.getPreferredName(), shardSeed); + } builder.field(RandomSamplerAggregationBuilder.PROBABILITY.getPreferredName(), probability); builder.field(CommonFields.DOC_COUNT.getPreferredName(), getDocCount()); getAggregations().toXContentInternal(builder, params); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregationBuilder.java index 240f016c66954..9bd9ab45b633a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregationBuilder.java @@ -34,6 +34,7 @@ public class RandomSamplerAggregationBuilder extends AbstractAggregationBuilder< static final ParseField PROBABILITY = new ParseField("probability"); static final ParseField SEED = new ParseField("seed"); + static final ParseField SHARD_SEED = new ParseField("shard_seed"); public static final ObjectParser PARSER = ObjectParser.fromBuilder( RandomSamplerAggregationBuilder.NAME, @@ -41,10 +42,12 @@ public class RandomSamplerAggregationBuilder extends AbstractAggregationBuilder< ); static { PARSER.declareInt(RandomSamplerAggregationBuilder::setSeed, SEED); + PARSER.declareInt(RandomSamplerAggregationBuilder::setShardSeed, SHARD_SEED); PARSER.declareDouble(RandomSamplerAggregationBuilder::setProbability, PROBABILITY); } private int seed = Randomness.get().nextInt(); + private Integer shardSeed; private double p; public RandomSamplerAggregationBuilder(String name) { @@ -67,10 +70,18 @@ public RandomSamplerAggregationBuilder setSeed(int seed) { return this; } + public RandomSamplerAggregationBuilder setShardSeed(int shardSeed) { + this.shardSeed = shardSeed; + return this; + } + public RandomSamplerAggregationBuilder(StreamInput in) throws IOException { super(in); this.p = in.readDouble(); this.seed = in.readInt(); + if (in.getTransportVersion().onOrAfter(TransportVersions.RANDOM_AGG_SHARD_SEED)) { + this.shardSeed = in.readOptionalInt(); + } } protected RandomSamplerAggregationBuilder( @@ -81,12 +92,16 @@ protected RandomSamplerAggregationBuilder( super(clone, factoriesBuilder, metadata); this.p = clone.p; this.seed = clone.seed; + this.shardSeed = clone.shardSeed; } @Override protected void doWriteTo(StreamOutput out) throws IOException { out.writeDouble(p); out.writeInt(seed); + if (out.getTransportVersion().onOrAfter(TransportVersions.RANDOM_AGG_SHARD_SEED)) { + out.writeOptionalInt(shardSeed); + } } static void recursivelyCheckSubAggs(Collection builders, Consumer aggregationCheck) { @@ -128,7 +143,7 @@ protected AggregatorFactory doBuild( ); } }); - return new RandomSamplerAggregatorFactory(name, seed, p, context, parent, subfactoriesBuilder, metadata); + return new RandomSamplerAggregatorFactory(name, seed, shardSeed, p, context, parent, subfactoriesBuilder, metadata); } @Override @@ -136,6 +151,9 @@ protected XContentBuilder internalXContent(XContentBuilder builder, Params param builder.startObject(); builder.field(PROBABILITY.getPreferredName(), p); builder.field(SEED.getPreferredName(), seed); + if (shardSeed != null) { + builder.field(SHARD_SEED.getPreferredName(), shardSeed); + } builder.endObject(); return null; } @@ -162,7 +180,7 @@ public TransportVersion getMinimalSupportedVersion() { @Override public int hashCode() { - return Objects.hash(super.hashCode(), p, seed); + return Objects.hash(super.hashCode(), p, seed, shardSeed); } @Override @@ -171,6 +189,6 @@ public boolean equals(Object obj) { if (obj == null || getClass() != obj.getClass()) return false; if (super.equals(obj) == false) return false; RandomSamplerAggregationBuilder other = (RandomSamplerAggregationBuilder) obj; - return Objects.equals(p, other.p) && Objects.equals(seed, other.seed); + return Objects.equals(p, other.p) && Objects.equals(seed, other.seed) && Objects.equals(shardSeed, other.shardSeed); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregator.java index 8853733b9a158..276e0bbf300d2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregator.java @@ -30,12 +30,14 @@ public class RandomSamplerAggregator extends BucketsAggregator implements SingleBucketAggregator { private final int seed; + private final Integer shardSeed; private final double probability; private final CheckedSupplier weightSupplier; RandomSamplerAggregator( String name, int seed, + Integer shardSeed, double probability, CheckedSupplier weightSupplier, AggregatorFactories factories, @@ -53,6 +55,7 @@ public class RandomSamplerAggregator extends BucketsAggregator implements Single ); } this.weightSupplier = weightSupplier; + this.shardSeed = shardSeed; } @Override @@ -63,6 +66,7 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I name, bucketDocCount(owningBucketOrd), seed, + shardSeed, probability, subAggregationResults, metadata() @@ -72,7 +76,7 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I @Override public InternalAggregation buildEmptyAggregation() { - return new InternalRandomSampler(name, 0, seed, probability, buildEmptySubAggregations(), metadata()); + return new InternalRandomSampler(name, 0, seed, shardSeed, probability, buildEmptySubAggregations(), metadata()); } /** @@ -97,10 +101,11 @@ protected LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCt } // No sampling is being done, collect all docs if (probability >= 1.0) { + grow(1); return new LeafBucketCollector() { @Override public void collect(int doc, long owningBucketOrd) throws IOException { - collectBucket(sub, doc, 0); + collectExistingBucket(sub, doc, 0); } }; } @@ -113,11 +118,12 @@ public void collect(int doc, long owningBucketOrd) throws IOException { final DocIdSetIterator docIt = scorer.iterator(); final Bits liveDocs = aggCtx.getLeafReaderContext().reader().getLiveDocs(); try { + grow(1); // Iterate every document provided by the scorer iterator for (int docId = docIt.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = docIt.nextDoc()) { // If liveDocs is null, that means that every doc is a live doc, no need to check if it has been deleted or not if (liveDocs == null || liveDocs.get(docIt.docID())) { - collectBucket(sub, docIt.docID(), 0); + collectExistingBucket(sub, docIt.docID(), 0); } } // This collector could throw `CollectionTerminatedException` if the last leaf collector has stopped collecting diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregatorFactory.java index d63f574b4d8bd..4be2e932179fe 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregatorFactory.java @@ -26,6 +26,7 @@ public class RandomSamplerAggregatorFactory extends AggregatorFactory { private final int seed; + private final Integer shardSeed; private final double probability; private final SamplingContext samplingContext; private Weight weight; @@ -33,6 +34,7 @@ public class RandomSamplerAggregatorFactory extends AggregatorFactory { RandomSamplerAggregatorFactory( String name, int seed, + Integer shardSeed, double probability, AggregationContext context, AggregatorFactory parent, @@ -42,7 +44,8 @@ public class RandomSamplerAggregatorFactory extends AggregatorFactory { super(name, context, parent, subFactories, metadata); this.probability = probability; this.seed = seed; - this.samplingContext = new SamplingContext(probability, seed); + this.samplingContext = new SamplingContext(probability, seed, shardSeed); + this.shardSeed = shardSeed; } @Override @@ -53,7 +56,18 @@ public Optional getSamplingContext() { @Override public Aggregator createInternal(Aggregator parent, CardinalityUpperBound cardinality, Map metadata) throws IOException { - return new RandomSamplerAggregator(name, seed, probability, this::getWeight, factories, context, parent, cardinality, metadata); + return new RandomSamplerAggregator( + name, + seed, + shardSeed, + probability, + this::getWeight, + factories, + context, + parent, + cardinality, + metadata + ); } /** @@ -66,7 +80,11 @@ public Aggregator createInternal(Aggregator parent, CardinalityUpperBound cardin */ private Weight getWeight() throws IOException { if (weight == null) { - RandomSamplingQuery query = new RandomSamplingQuery(probability, seed, context.shardRandomSeed()); + RandomSamplingQuery query = new RandomSamplingQuery( + probability, + seed, + shardSeed == null ? context.shardRandomSeed() : shardSeed + ); BooleanQuery booleanQuery = new BooleanQuery.Builder().add(query, BooleanClause.Occur.FILTER) .add(context.query(), BooleanClause.Occur.FILTER) .build(); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java index 45067208cbdd2..b0af2c3d4e618 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java @@ -9,6 +9,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Predicates; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.AggregationReduceContext; import org.elasticsearch.search.aggregations.AggregatorReducer; @@ -208,7 +209,7 @@ protected AggregatorReducer getLeaderReducer(AggregationReduceContext reduceCont } else if (format == DocValueFormat.UNSIGNED_LONG_SHIFTED) { needsPromoting = docFormat -> docFormat == DocValueFormat.RAW; } else { - needsPromoting = docFormat -> false; + needsPromoting = Predicates.never(); } return new AggregatorReducer() { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTextAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTextAggregationBuilder.java index 99dc93a175f7b..9b042ab4a6966 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTextAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTextAggregationBuilder.java @@ -16,7 +16,6 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregationBuilder; -import org.elasticsearch.search.aggregations.AggregationInitializationException; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator.BucketCountThresholds; @@ -129,16 +128,12 @@ public TermsAggregator.BucketCountThresholds bucketCountThresholds() { @Override public SignificantTextAggregationBuilder subAggregations(Builder subFactories) { - throw new AggregationInitializationException( - "Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations" - ); + throw new IllegalArgumentException("Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations"); } @Override public SignificantTextAggregationBuilder subAggregation(AggregationBuilder aggregation) { - throw new AggregationInitializationException( - "Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations" - ); + throw new IllegalArgumentException("Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations"); } /** diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java index 68263e2d72b9c..13e5fe3dbd11f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java @@ -139,6 +139,11 @@ public boolean supportsSampling() { @Override public boolean supportsParallelCollection(ToLongFunction fieldCardinalityResolver) { + if (minDocCount() == 0) { + // if minDocCount os zero, we collect the zero buckets looking into all segments in the index. to avoid + // looking into the same segment for each thread we disable concurrency + return false; + } /* * we parallelize only if the cardinality of the field is lower than shard size, this is to minimize precision issues. * When ordered by term, we still take cardinality into account to avoid overhead that concurrency may cause against diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java index c5dca1271c891..c9ccc1c6936d5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java @@ -21,7 +21,6 @@ import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregationBuilder; -import org.elasticsearch.search.aggregations.AggregationInitializationException; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.support.AggregationContext; @@ -490,9 +489,7 @@ public TopHitsAggregationBuilder trackScores(boolean trackScores) { @Override public TopHitsAggregationBuilder subAggregations(Builder subFactories) { - throw new AggregationInitializationException( - "Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations" - ); + throw new IllegalArgumentException("Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations"); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java index 7e0c235ee4fb3..ecacf8cca8d01 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java @@ -14,7 +14,6 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; -import org.elasticsearch.search.aggregations.AggregationInitializationException; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.xcontent.XContentBuilder; @@ -44,9 +43,7 @@ protected LeafOnly(String name) { protected LeafOnly(LeafOnly clone, Builder factoriesBuilder, Map metadata) { super(clone, factoriesBuilder, metadata); if (factoriesBuilder.count() > 0) { - throw new AggregationInitializationException( - "Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations" - ); + throw new IllegalArgumentException("Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations"); } } @@ -59,9 +56,7 @@ protected LeafOnly(StreamInput in) throws IOException { @Override public AB subAggregations(Builder subFactories) { - throw new AggregationInitializationException( - "Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations" - ); + throw new IllegalArgumentException("Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations"); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/SamplingContext.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/SamplingContext.java index 57ea138f63268..d8f34bfcf9973 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/SamplingContext.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/SamplingContext.java @@ -20,8 +20,9 @@ /** * This provides information around the current sampling context for aggregations */ -public record SamplingContext(double probability, int seed) { - public static final SamplingContext NONE = new SamplingContext(1.0, 0); +public record SamplingContext(double probability, int seed, Integer shardSeed) { + + public static final SamplingContext NONE = new SamplingContext(1.0, 0, null); public boolean isSampled() { return probability < 1.0; @@ -97,20 +98,22 @@ public Query buildQueryWithSampler(QueryBuilder builder, AggregationContext cont } BooleanQuery.Builder queryBuilder = new BooleanQuery.Builder(); queryBuilder.add(rewritten, BooleanClause.Occur.FILTER); - queryBuilder.add(new RandomSamplingQuery(probability(), seed(), context.shardRandomSeed()), BooleanClause.Occur.FILTER); + queryBuilder.add( + new RandomSamplingQuery(probability(), seed(), shardSeed == null ? context.shardRandomSeed() : shardSeed), + BooleanClause.Occur.FILTER + ); return queryBuilder.build(); } /** * @param context The current aggregation context * @return the sampling query if the sampling context indicates that sampling is required - * @throws IOException thrown on query build failure */ - public Optional buildSamplingQueryIfNecessary(AggregationContext context) throws IOException { + public Optional buildSamplingQueryIfNecessary(AggregationContext context) { if (isSampled() == false) { return Optional.empty(); } - return Optional.of(new RandomSamplingQuery(probability(), seed(), context.shardRandomSeed())); + return Optional.of(new RandomSamplingQuery(probability(), seed(), shardSeed == null ? context.shardRandomSeed() : shardSeed)); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java index 48afb79b95e90..0c1de6006ffa5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java @@ -12,7 +12,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; -import org.elasticsearch.search.aggregations.AggregationInitializationException; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.xcontent.AbstractObjectParser; @@ -119,9 +118,7 @@ protected LeafOnly(String name) { protected LeafOnly(LeafOnly clone, Builder factoriesBuilder, Map metadata) { super(clone, factoriesBuilder, metadata); if (factoriesBuilder.count() > 0) { - throw new AggregationInitializationException( - "Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations" - ); + throw new IllegalArgumentException("Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations"); } } @@ -134,9 +131,7 @@ protected LeafOnly(StreamInput in) throws IOException { @Override public final AB subAggregations(Builder subFactories) { - throw new AggregationInitializationException( - "Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations" - ); + throw new IllegalArgumentException("Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations"); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java b/server/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java index 44dddc119925f..8c40a283844b4 100644 --- a/server/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java +++ b/server/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java @@ -17,7 +17,7 @@ import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.TermStatistics; import org.apache.lucene.search.TopDocs; -import org.apache.lucene.search.TopScoreDocCollector; +import org.apache.lucene.search.TopScoreDocCollectorManager; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -203,7 +203,7 @@ private static void executeKnnVectorQuery(SearchContext context) throws IOExcept static DfsKnnResults singleKnnSearch(Query knnQuery, int k, Profilers profilers, ContextIndexSearcher searcher, String nestedPath) throws IOException { - CollectorManager topDocsCollectorManager = TopScoreDocCollector.createSharedManager( + CollectorManager topDocsCollectorManager = new TopScoreDocCollectorManager( k, null, Integer.MAX_VALUE diff --git a/server/src/main/java/org/elasticsearch/search/query/QueryPhaseCollectorManager.java b/server/src/main/java/org/elasticsearch/search/query/QueryPhaseCollectorManager.java index 86a01756d247e..7fd09d3ddfdf1 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QueryPhaseCollectorManager.java +++ b/server/src/main/java/org/elasticsearch/search/query/QueryPhaseCollectorManager.java @@ -35,9 +35,9 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopDocsCollector; -import org.apache.lucene.search.TopFieldCollector; +import org.apache.lucene.search.TopFieldCollectorManager; import org.apache.lucene.search.TopFieldDocs; -import org.apache.lucene.search.TopScoreDocCollector; +import org.apache.lucene.search.TopScoreDocCollectorManager; import org.apache.lucene.search.TotalHits; import org.apache.lucene.search.Weight; import org.elasticsearch.action.search.MaxScoreCollector; @@ -413,14 +413,9 @@ private static class WithHits extends QueryPhaseCollectorManager { } } if (sortAndFormats == null) { - this.topDocsManager = TopScoreDocCollector.createSharedManager(numHits, searchAfter, hitCountThreshold); + this.topDocsManager = new TopScoreDocCollectorManager(numHits, searchAfter, hitCountThreshold); } else { - this.topDocsManager = TopFieldCollector.createSharedManager( - sortAndFormats.sort, - numHits, - (FieldDoc) searchAfter, - hitCountThreshold - ); + this.topDocsManager = new TopFieldCollectorManager(sortAndFormats.sort, numHits, (FieldDoc) searchAfter, hitCountThreshold); } } diff --git a/server/src/main/java/org/elasticsearch/search/vectors/ESKnnByteVectorQuery.java b/server/src/main/java/org/elasticsearch/search/vectors/ESKnnByteVectorQuery.java index 091ce6f8a0f6d..05cf52fd23f24 100644 --- a/server/src/main/java/org/elasticsearch/search/vectors/ESKnnByteVectorQuery.java +++ b/server/src/main/java/org/elasticsearch/search/vectors/ESKnnByteVectorQuery.java @@ -8,35 +8,17 @@ package org.elasticsearch.search.vectors; -import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.KnnByteVectorQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TopDocs; -import org.apache.lucene.search.TopDocsCollector; -import org.apache.lucene.util.Bits; import org.elasticsearch.search.profile.query.QueryProfiler; -import java.io.IOException; - public class ESKnnByteVectorQuery extends KnnByteVectorQuery implements ProfilingQuery { - private static final TopDocs NO_RESULTS = TopDocsCollector.EMPTY_TOPDOCS; private long vectorOpsCount; - private final byte[] target; public ESKnnByteVectorQuery(String field, byte[] target, int k, Query filter) { super(field, target, k, filter); - this.target = target; - } - - @Override - protected TopDocs approximateSearch(LeafReaderContext context, Bits acceptDocs, int visitedLimit) throws IOException { - // We increment visit limit by one to bypass a fencepost error in the collector - if (visitedLimit < Integer.MAX_VALUE) { - visitedLimit += 1; - } - TopDocs results = context.reader().searchNearestVectors(field, target, k, acceptDocs, visitedLimit); - return results != null ? results : NO_RESULTS; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/vectors/ESKnnFloatVectorQuery.java b/server/src/main/java/org/elasticsearch/search/vectors/ESKnnFloatVectorQuery.java index 4fa4db1f4ea95..e83a90a3c4df8 100644 --- a/server/src/main/java/org/elasticsearch/search/vectors/ESKnnFloatVectorQuery.java +++ b/server/src/main/java/org/elasticsearch/search/vectors/ESKnnFloatVectorQuery.java @@ -8,24 +8,16 @@ package org.elasticsearch.search.vectors; -import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.KnnFloatVectorQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TopDocs; -import org.apache.lucene.search.TopDocsCollector; -import org.apache.lucene.util.Bits; import org.elasticsearch.search.profile.query.QueryProfiler; -import java.io.IOException; - public class ESKnnFloatVectorQuery extends KnnFloatVectorQuery implements ProfilingQuery { - private static final TopDocs NO_RESULTS = TopDocsCollector.EMPTY_TOPDOCS; private long vectorOpsCount; - private final float[] target; public ESKnnFloatVectorQuery(String field, float[] target, int k, Query filter) { super(field, target, k, filter); - this.target = target; } @Override @@ -35,16 +27,6 @@ protected TopDocs mergeLeafResults(TopDocs[] perLeafResults) { return topK; } - @Override - protected TopDocs approximateSearch(LeafReaderContext context, Bits acceptDocs, int visitedLimit) throws IOException { - // We increment visit limit by one to bypass a fencepost error in the collector - if (visitedLimit < Integer.MAX_VALUE) { - visitedLimit += 1; - } - TopDocs results = context.reader().searchNearestVectors(field, target, k, acceptDocs, visitedLimit); - return results != null ? results : NO_RESULTS; - } - @Override public void profile(QueryProfiler queryProfiler) { queryProfiler.setVectorOpsCount(vectorOpsCount); diff --git a/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java b/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java index 96a16013ab874..7e65cd19638ce 100644 --- a/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java @@ -28,6 +28,7 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.index.search.NestedHelper; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; @@ -274,7 +275,6 @@ protected Query doToQuery(SearchExecutionContext context) throws IOException { ); } - final BitSetProducer parentFilter; BooleanQuery.Builder builder = new BooleanQuery.Builder(); for (QueryBuilder query : this.filterQueries) { builder.add(query.toQuery(context), BooleanClause.Occur.FILTER); @@ -289,6 +289,8 @@ protected Query doToQuery(SearchExecutionContext context) throws IOException { String parentPath = context.nestedLookup().getNestedParent(fieldName); if (parentPath != null) { + final BitSetProducer parentBitSet; + final Query parentFilter; NestedObjectMapper originalObjectMapper = context.nestedScope().getObjectMapper(); if (originalObjectMapper != null) { try { @@ -296,19 +298,28 @@ protected Query doToQuery(SearchExecutionContext context) throws IOException { context.nestedScope().previousLevel(); NestedObjectMapper objectMapper = context.nestedScope().getObjectMapper(); parentFilter = objectMapper == null - ? context.bitsetFilter(Queries.newNonNestedFilter(context.indexVersionCreated())) - : context.bitsetFilter(objectMapper.nestedTypeFilter()); + ? Queries.newNonNestedFilter(context.indexVersionCreated()) + : objectMapper.nestedTypeFilter(); } finally { context.nestedScope().nextLevel(originalObjectMapper); } } else { // we are NOT in a nested context, coming from the top level knn search - parentFilter = context.bitsetFilter(Queries.newNonNestedFilter(context.indexVersionCreated())); + parentFilter = Queries.newNonNestedFilter(context.indexVersionCreated()); } + parentBitSet = context.bitsetFilter(parentFilter); if (filterQuery != null) { - filterQuery = new ToChildBlockJoinQuery(filterQuery, parentFilter); + NestedHelper nestedHelper = new NestedHelper(context.nestedLookup(), context::isFieldMapped); + // We treat the provided filter as a filter over PARENT documents, so if it might match nested documents + // we need to adjust it. + if (nestedHelper.mightMatchNestedDocs(filterQuery)) { + // Ensure that the query only returns parent documents matching `filterQuery` + filterQuery = Queries.filtered(filterQuery, parentFilter); + } + // Now join the filterQuery & parentFilter to provide the matching blocks of children + filterQuery = new ToChildBlockJoinQuery(filterQuery, parentBitSet); } - return vectorFieldType.createKnnQuery(queryVector, adjustedNumCands, filterQuery, vectorSimilarity, parentFilter); + return vectorFieldType.createKnnQuery(queryVector, adjustedNumCands, filterQuery, vectorSimilarity, parentBitSet); } return vectorFieldType.createKnnQuery(queryVector, adjustedNumCands, filterQuery, vectorSimilarity, null); } diff --git a/server/src/main/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorService.java b/server/src/main/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorService.java index 0b460b5cb2fb7..67afddcb70664 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorService.java @@ -12,7 +12,9 @@ import org.elasticsearch.cluster.metadata.RepositoriesMetadata; import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.health.Diagnosis; +import org.elasticsearch.health.HealthFeatures; import org.elasticsearch.health.HealthIndicatorDetails; import org.elasticsearch.health.HealthIndicatorImpact; import org.elasticsearch.health.HealthIndicatorResult; @@ -59,6 +61,8 @@ public class RepositoryIntegrityHealthIndicatorService implements HealthIndicato public static final String NO_REPOS_CONFIGURED = "No snapshot repositories configured."; public static final String ALL_REPOS_HEALTHY = "All repositories are healthy."; public static final String NO_REPO_HEALTH_INFO = "No repository health info."; + public static final String MIXED_VERSIONS = + "No repository health info. The cluster currently has mixed versions (an upgrade may be in progress)."; public static final List IMPACTS = List.of( new HealthIndicatorImpact( @@ -95,9 +99,11 @@ public class RepositoryIntegrityHealthIndicatorService implements HealthIndicato ); private final ClusterService clusterService; + private final FeatureService featureService; - public RepositoryIntegrityHealthIndicatorService(ClusterService clusterService) { + public RepositoryIntegrityHealthIndicatorService(ClusterService clusterService, FeatureService featureService) { this.clusterService = clusterService; + this.featureService = featureService; } @Override @@ -128,7 +134,7 @@ public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResources /** * Analyzer for the cluster's repositories health; aids in constructing a {@link HealthIndicatorResult}. */ - static class RepositoryHealthAnalyzer { + class RepositoryHealthAnalyzer { private final ClusterState clusterState; private final int totalRepositories; private final List corruptedRepositories; @@ -137,6 +143,7 @@ static class RepositoryHealthAnalyzer { private final Set invalidRepositories = new HashSet<>(); private final Set nodesWithInvalidRepos = new HashSet<>(); private final HealthStatus healthStatus; + private boolean clusterHasFeature = true; private RepositoryHealthAnalyzer( ClusterState clusterState, @@ -167,7 +174,15 @@ private RepositoryHealthAnalyzer( || invalidRepositories.isEmpty() == false) { healthStatus = YELLOW; } else if (repositoriesHealthByNode.isEmpty()) { - healthStatus = UNKNOWN; + clusterHasFeature = featureService.clusterHasFeature( + clusterState, + HealthFeatures.SUPPORTS_EXTENDED_REPOSITORY_INDICATOR + ) == false; + if (clusterHasFeature) { + healthStatus = GREEN; + } else { + healthStatus = UNKNOWN; + } } else { healthStatus = GREEN; } @@ -179,7 +194,7 @@ public HealthStatus getHealthStatus() { public String getSymptom() { if (healthStatus == GREEN) { - return ALL_REPOS_HEALTHY; + return clusterHasFeature ? ALL_REPOS_HEALTHY : MIXED_VERSIONS; } else if (healthStatus == UNKNOWN) { return NO_REPO_HEALTH_INFO; } diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 9ac76e653b640..4b6e3f30fe6fa 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -716,7 +716,8 @@ static DataStream updateDataStream(DataStream dataStream, Metadata.Builder metad dataStream.getIndexMode(), dataStream.getLifecycle(), dataStream.isFailureStore(), - dataStream.getFailureIndices() + dataStream.getFailureIndices(), + dataStream.getAutoShardingEvent() ); } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardSizeInfo.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardSizeInfo.java index 29ae2d1c5da4b..3bd5431c7be63 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardSizeInfo.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardSizeInfo.java @@ -46,4 +46,9 @@ public long getShardSize(ShardRouting shardRouting, long fallback) { } return shardSize; } + + @Override + public String toString() { + return "SnapshotShardSizeInfo{snapshotShardSizes=" + snapshotShardSizes + '}'; + } } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 3b2868298cf65..d505a6ded4809 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -73,6 +73,7 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ListenableFuture; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Predicates; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; @@ -1189,7 +1190,7 @@ private static ImmutableOpenMap processWaitingShar IndexRoutingTable indexShardRoutingTable = routingTable.index(shardId.getIndex()); if (indexShardRoutingTable != null) { IndexShardRoutingTable shardRouting = indexShardRoutingTable.shard(shardId.id()); - if (shardRouting != null && shardRouting.primaryShard() != null) { + if (shardRouting != null) { final var primaryNodeId = shardRouting.primaryShard().currentNodeId(); if (nodeIdRemovalPredicate.test(primaryNodeId)) { if (shardStatus.state() == ShardState.PAUSED_FOR_NODE_REMOVAL) { @@ -1274,9 +1275,8 @@ private static boolean waitingShardsStartedOrUnassigned(SnapshotsInProgress snap return true; } ShardRouting shardRouting = indexShardRoutingTable.shard(shardId.shardId()).primaryShard(); - if (shardRouting != null - && (shardRouting.started() && snapshotsInProgress.isNodeIdForRemoval(shardRouting.currentNodeId()) == false - || shardRouting.unassigned())) { + if (shardRouting.started() && snapshotsInProgress.isNodeIdForRemoval(shardRouting.currentNodeId()) == false + || shardRouting.unassigned()) { return true; } } @@ -2267,7 +2267,7 @@ public static IndexVersion minCompatibleVersion( IndexVersion minCompatVersion = minNodeVersion; final Collection snapshotIds = repositoryData.getSnapshotIds(); for (SnapshotId snapshotId : snapshotIds.stream() - .filter(excluded == null ? sn -> true : Predicate.not(excluded::contains)) + .filter(excluded == null ? Predicates.always() : Predicate.not(excluded::contains)) .toList()) { final IndexVersion known = repositoryData.getVersion(snapshotId); // If we don't have the version cached in the repository data yet we load it from the snapshot info blobs diff --git a/server/src/main/java/org/elasticsearch/transport/BytesRefRecycler.java b/server/src/main/java/org/elasticsearch/transport/BytesRefRecycler.java index ca1fd984f2bf0..7b083ea77a991 100644 --- a/server/src/main/java/org/elasticsearch/transport/BytesRefRecycler.java +++ b/server/src/main/java/org/elasticsearch/transport/BytesRefRecycler.java @@ -43,4 +43,9 @@ public void close() { } }; } + + @Override + public int pageSize() { + return PageCacheRecycler.BYTE_PAGE_SIZE; + } } diff --git a/server/src/main/java/org/elasticsearch/transport/LeakTracker.java b/server/src/main/java/org/elasticsearch/transport/LeakTracker.java index 3be22f6fae53a..75bab40a3d9a0 100644 --- a/server/src/main/java/org/elasticsearch/transport/LeakTracker.java +++ b/server/src/main/java/org/elasticsearch/transport/LeakTracker.java @@ -17,10 +17,10 @@ import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.Releasable; -import java.lang.ref.ReferenceQueue; -import java.lang.ref.WeakReference; +import java.lang.ref.Cleaner; import java.util.Set; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; @@ -32,44 +32,35 @@ public final class LeakTracker { private static final Logger logger = LogManager.getLogger(LeakTracker.class); - private static final int TARGET_RECORDS = 25; + private static final Cleaner cleaner = Cleaner.create(); - private final Set> allLeaks = ConcurrentCollections.newConcurrentSet(); + private static final int TARGET_RECORDS = 25; - private final ReferenceQueue refQueue = new ReferenceQueue<>(); private final ConcurrentMap reportedLeaks = ConcurrentCollections.newConcurrentMap(); public static final LeakTracker INSTANCE = new LeakTracker(); + private static volatile String contextHint = ""; + private LeakTracker() {} /** * Track the given object. * * @param obj object to track - * @return leak object that must be released by a call to {@link Leak#close(Object)} before {@code obj} goes out of scope + * @return leak object that must be released by a call to {@link LeakTracker.Leak#close()} before {@code obj} goes out of scope */ - public Leak track(T obj) { - reportLeak(); - return new Leak<>(obj, refQueue, allLeaks); + public Leak track(Object obj) { + return new Leak(obj); } - public void reportLeak() { - while (true) { - Leak ref = (Leak) refQueue.poll(); - if (ref == null) { - break; - } - - if (ref.dispose() == false || logger.isErrorEnabled() == false) { - continue; - } - - String records = ref.toString(); - if (reportedLeaks.putIfAbsent(records, Boolean.TRUE) == null) { - logger.error("LEAK: resource was not cleaned up before it was garbage-collected.{}", records); - } - } + /** + * Set a hint string that will be recorded with every leak that is recorded. Used by unit tests to allow identifying the exact test + * that caused a leak by setting the test name here. + * @param hint hint value + */ + public static void setContextHint(String hint) { + contextHint = hint; } public static Releasable wrap(Releasable releasable) { @@ -83,7 +74,7 @@ public void close() { try { releasable.close(); } finally { - leak.close(releasable); + leak.close(); } } @@ -124,7 +115,7 @@ public boolean tryIncRef() { @Override public boolean decRef() { if (refCounted.decRef()) { - leak.close(refCounted); + leak.close(); return true; } leak.record(); @@ -152,33 +143,43 @@ public boolean equals(Object obj) { }; } - public static final class Leak extends WeakReference { + public final class Leak implements Runnable { - @SuppressWarnings({ "unchecked", "rawtypes" }) - private static final AtomicReferenceFieldUpdater, Record> headUpdater = - (AtomicReferenceFieldUpdater) AtomicReferenceFieldUpdater.newUpdater(Leak.class, Record.class, "head"); + private static final AtomicReferenceFieldUpdater headUpdater = AtomicReferenceFieldUpdater.newUpdater( + Leak.class, + Record.class, + "head" + ); - @SuppressWarnings({ "unchecked", "rawtypes" }) - private static final AtomicIntegerFieldUpdater> droppedRecordsUpdater = - (AtomicIntegerFieldUpdater) AtomicIntegerFieldUpdater.newUpdater(Leak.class, "droppedRecords"); + private static final AtomicIntegerFieldUpdater droppedRecordsUpdater = AtomicIntegerFieldUpdater.newUpdater( + Leak.class, + "droppedRecords" + ); @SuppressWarnings("unused") private volatile Record head; @SuppressWarnings("unused") private volatile int droppedRecords; - private final Set> allLeaks; - private final int trackedHash; - - private Leak(Object referent, ReferenceQueue refQueue, Set> allLeaks) { - super(referent, refQueue); + private final AtomicBoolean closed = new AtomicBoolean(false); - assert referent != null; + private final Cleaner.Cleanable cleanable; - trackedHash = System.identityHashCode(referent); - allLeaks.add(this); + @SuppressWarnings("this-escape") + private Leak(Object referent) { + this.cleanable = cleaner.register(referent, this); headUpdater.set(this, new Record(Record.BOTTOM)); - this.allLeaks = allLeaks; + } + + @Override + public void run() { + if (closed.compareAndSet(false, true) == false || logger.isErrorEnabled() == false) { + return; + } + String records = toString(); + if (reportedLeaks.putIfAbsent(records, Boolean.TRUE) == null) { + logger.error("LEAK: resource was not cleaned up before it was garbage-collected.{}", records); + } } /** @@ -210,38 +211,18 @@ public void record() { } } - private boolean dispose() { - clear(); - return allLeaks.remove(this); - } - /** * Stop tracking the object that this leak was created for. * - * @param trackedObject the object that this leak was originally created for * @return true if the leak was released by this call, false if the leak had already been released */ - public boolean close(T trackedObject) { - assert trackedHash == System.identityHashCode(trackedObject); - try { - if (allLeaks.remove(this)) { - // Call clear so the reference is not even enqueued. - clear(); - headUpdater.set(this, null); - return true; - } - return false; - } finally { - reachabilityFence0(trackedObject); - } - } - - private static void reachabilityFence0(Object ref) { - if (ref != null) { - synchronized (ref) { - // empty on purpose - } + public boolean close() { + if (closed.compareAndSet(false, true)) { + cleanable.clean(); + headUpdater.set(this, null); + return true; } + return false; } @Override @@ -299,19 +280,25 @@ private static final class Record extends Throwable { private final Record next; private final int pos; + private final String threadName; + + private final String contextHint = LeakTracker.contextHint; + Record(Record next) { this.next = next; this.pos = next.pos + 1; + threadName = Thread.currentThread().getName(); } private Record() { next = null; pos = -1; + threadName = Thread.currentThread().getName(); } @Override public String toString() { - StringBuilder buf = new StringBuilder(); + StringBuilder buf = new StringBuilder("\tin [").append(threadName).append("][").append(contextHint).append("]\n"); StackTraceElement[] array = getStackTrace(); // Skip the first three elements since those are just related to the leak tracker. for (int i = 3; i < array.length; i++) { diff --git a/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java b/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java index cfb6f872ce748..b0c4a6cd95156 100644 --- a/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java +++ b/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.concurrent.CountDown; +import org.elasticsearch.core.Predicates; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; @@ -182,7 +183,7 @@ public class ProxyConnectionStrategy extends RemoteConnectionStrategy { connectionManager.getCredentialsManager() ), actualProfile.getHandshakeTimeout(), - cn -> true, + Predicates.always(), listener.map(resp -> { ClusterName remote = resp.getClusterName(); if (remoteClusterName.compareAndSet(null, remote)) { diff --git a/server/src/main/java/org/elasticsearch/transport/TransportService.java b/server/src/main/java/org/elasticsearch/transport/TransportService.java index 7f1d63b092cdb..2ade579f216e4 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportService.java @@ -37,6 +37,7 @@ import org.elasticsearch.core.Booleans; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Predicates; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.UpdateForV9; @@ -356,7 +357,7 @@ protected void doStop() { // but there may still be pending handlers for node-local requests since this connection is not closed, and we may also // (briefly) track handlers for requests which are sent concurrently with stopping even though the underlying connection is // now closed. We complete all these outstanding handlers here: - for (final Transport.ResponseContext holderToNotify : responseHandlers.prune(h -> true)) { + for (final Transport.ResponseContext holderToNotify : responseHandlers.prune(Predicates.always())) { try { final TransportResponseHandler handler = holderToNotify.handler(); final var targetNode = holderToNotify.connection().getNode(); @@ -499,7 +500,7 @@ public void connectToNode( public ConnectionManager.ConnectionValidator connectionValidator(DiscoveryNode node) { return (newConnection, actualProfile, listener) -> { // We don't validate cluster names to allow for CCS connections. - handshake(newConnection, actualProfile.getHandshakeTimeout(), cn -> true, listener.map(resp -> { + handshake(newConnection, actualProfile.getHandshakeTimeout(), Predicates.always(), listener.map(resp -> { final DiscoveryNode remote = resp.discoveryNode; if (node.equals(remote) == false) { throw new ConnectTransportException(node, "handshake failed. unexpected remote node " + remote); diff --git a/server/src/main/resources/org/elasticsearch/TransportVersions.csv b/server/src/main/resources/org/elasticsearch/TransportVersions.csv index 8efe3b01eefd4..b392111557615 100644 --- a/server/src/main/resources/org/elasticsearch/TransportVersions.csv +++ b/server/src/main/resources/org/elasticsearch/TransportVersions.csv @@ -111,3 +111,4 @@ 8.11.4,8512001 8.12.0,8560000 8.12.1,8560001 +8.12.2,8560001 diff --git a/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json b/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json index fef69aec2f543..ead7387b0e1ac 100644 --- a/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json +++ b/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json @@ -31,5 +31,6 @@ "BOOTSTRAP_CHECK_TOKEN_SSL": "bootstrap-checks-xpack.html#_token_ssl_check", "BOOTSTRAP_CHECK_SECURITY_MINIMAL_SETUP": "security-minimal-setup.html", "CONTACT_SUPPORT": "troubleshooting.html#troubleshooting-contact-support", - "UNASSIGNED_SHARDS": "red-yellow-cluster-status.html" + "UNASSIGNED_SHARDS": "red-yellow-cluster-status.html", + "EXECUTABLE_JNA_TMPDIR": "executable-jna-tmpdir.html" } diff --git a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv index 43220565ab871..f2da9fcaf60ce 100644 --- a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv +++ b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv @@ -111,3 +111,4 @@ 8.11.4,8500003 8.12.0,8500008 8.12.1,8500010 +8.12.2,8500010 diff --git a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index 1265a4e7f96db..b8091b50b5dd8 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -60,6 +60,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardNotInPrimaryModeException; import org.elasticsearch.indices.AutoscalingMissedIndicesUpdateException; +import org.elasticsearch.indices.FailureIndexNotSupportedException; import org.elasticsearch.indices.IndexTemplateMissingException; import org.elasticsearch.indices.InvalidIndexTemplateException; import org.elasticsearch.indices.recovery.PeerRecoveryNotFound; @@ -827,6 +828,7 @@ public void testIds() { ids.put(175, AutoscalingMissedIndicesUpdateException.class); ids.put(176, SearchTimeoutException.class); ids.put(177, GraphStructureException.class); + ids.put(178, FailureIndexNotSupportedException.class); Map, Integer> reverse = new HashMap<>(); for (Map.Entry> entry : ids.entrySet()) { diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java index 0290bfb9c236f..e4b821fba7634 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationStats; import org.elasticsearch.cluster.service.ClusterApplierRecordingService; import org.elasticsearch.cluster.service.ClusterApplierRecordingService.Stats.Recording; import org.elasticsearch.cluster.service.ClusterStateUpdateStats; @@ -1043,6 +1044,13 @@ public static NodeStats createNodeStats() { RepositoriesStats repositoriesStats = new RepositoriesStats( Map.of("test-repository", new RepositoriesStats.ThrottlingStats(100, 200)) ); + NodeAllocationStats nodeAllocationStats = new NodeAllocationStats( + randomIntBetween(0, 10000), + randomIntBetween(0, 1000), + randomDoubleBetween(0, 8, true), + randomNonNegativeLong(), + randomNonNegativeLong() + ); return new NodeStats( node, @@ -1062,7 +1070,8 @@ public static NodeStats createNodeStats() { adaptiveSelectionStats, scriptCacheStats, indexingPressureStats, - repositoriesStats + repositoriesStats, + nodeAllocationStats ); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestTests.java index 965654266e1cc..810d297602e8a 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestTests.java @@ -39,8 +39,7 @@ public void testValidateParameters() { assertThat(e.getMessage(), containsString("can't use offset with verbose=false")); } { - final GetSnapshotsRequest request = new GetSnapshotsRequest("repo", "snapshot").verbose(false) - .sort(GetSnapshotsRequest.SortBy.INDICES); + final GetSnapshotsRequest request = new GetSnapshotsRequest("repo", "snapshot").verbose(false).sort(SnapshotSortKey.INDICES); final ActionRequestValidationException e = request.validate(); assertThat(e.getMessage(), containsString("can't use non-default sort with verbose=false")); } @@ -51,7 +50,7 @@ public void testValidateParameters() { } { final GetSnapshotsRequest request = new GetSnapshotsRequest("repo", "snapshot").verbose(false) - .after(new GetSnapshotsRequest.After("foo", "repo", "bar")); + .after(new SnapshotSortKey.After("foo", "repo", "bar")); final ActionRequestValidationException e = request.validate(); assertThat(e.getMessage(), containsString("can't use after with verbose=false")); } @@ -62,14 +61,14 @@ public void testValidateParameters() { } { final GetSnapshotsRequest request = new GetSnapshotsRequest("repo", "snapshot").after( - new GetSnapshotsRequest.After("foo", "repo", "bar") + new SnapshotSortKey.After("foo", "repo", "bar") ).offset(randomIntBetween(1, 500)); final ActionRequestValidationException e = request.validate(); assertThat(e.getMessage(), containsString("can't use after and offset simultaneously")); } { final GetSnapshotsRequest request = new GetSnapshotsRequest("repo", "snapshot").fromSortValue("foo") - .after(new GetSnapshotsRequest.After("foo", "repo", "bar")); + .after(new SnapshotSortKey.After("foo", "repo", "bar")); final ActionRequestValidationException e = request.validate(); assertThat(e.getMessage(), containsString("can't use after and from_sort_value simultaneously")); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestTests.java index 21c5d0bee47e9..d77be7c45e416 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.admin.indices.get; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.core.Strings; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestRequestTests; @@ -71,4 +72,18 @@ public void testInvalidFeatures() { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> GetIndexRequest.Feature.fromRequest(request)); assertThat(e.getMessage(), containsString(Strings.format("Invalid features specified [%s]", String.join(",", invalidFeatures)))); } + + public void testIndicesOptions() { + GetIndexRequest getIndexRequest = new GetIndexRequest(); + assertThat( + getIndexRequest.indicesOptions().concreteTargetOptions(), + equalTo(IndicesOptions.strictExpandOpen().concreteTargetOptions()) + ); + assertThat(getIndexRequest.indicesOptions().wildcardOptions(), equalTo(IndicesOptions.strictExpandOpen().wildcardOptions())); + assertThat(getIndexRequest.indicesOptions().gatekeeperOptions(), equalTo(IndicesOptions.strictExpandOpen().gatekeeperOptions())); + assertThat( + getIndexRequest.indicesOptions().failureStoreOptions(), + equalTo(IndicesOptions.FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(true).build()) + ); + } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/ResolveClusterActionResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/ResolveClusterActionResponseTests.java index 322600fdeedff..33d4f0edf3450 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/ResolveClusterActionResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/ResolveClusterActionResponseTests.java @@ -20,22 +20,39 @@ public class ResolveClusterActionResponseTests extends AbstractWireSerializingTe @Override protected ResolveClusterActionResponse createTestInstance() { - return new ResolveClusterActionResponse(randomResolveClusterInfoMap()); + return new ResolveClusterActionResponse(randomResolveClusterInfoMap(null)); } - private Map randomResolveClusterInfoMap() { + private ResolveClusterInfo randomResolveClusterInfo(ResolveClusterInfo existing) { + if (existing == null) { + return randomResolveClusterInfo(); + } else { + return randomValueOtherThan(existing, () -> randomResolveClusterInfo()); + } + } + + private ResolveClusterInfo getResolveClusterInfoFromResponse(String key, ResolveClusterActionResponse response) { + if (response == null || response.getResolveClusterInfo() == null) { + return null; + } + return response.getResolveClusterInfo().get(key); + } + + private Map randomResolveClusterInfoMap(ResolveClusterActionResponse existingResponse) { Map infoMap = new HashMap<>(); int numClusters = randomIntBetween(0, 50); if (randomBoolean() || numClusters == 0) { - infoMap.put(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, randomResolveClusterInfo()); + String key = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; + infoMap.put(key, randomResolveClusterInfo(getResolveClusterInfoFromResponse(key, existingResponse))); } for (int i = 0; i < numClusters; i++) { - infoMap.put("remote_" + i, randomResolveClusterInfo()); + String key = "remote_" + i; + infoMap.put(key, randomResolveClusterInfo(getResolveClusterInfoFromResponse(key, existingResponse))); } return infoMap; } - private ResolveClusterInfo randomResolveClusterInfo() { + static ResolveClusterInfo randomResolveClusterInfo() { int val = randomIntBetween(1, 3); return switch (val) { case 1 -> new ResolveClusterInfo(false, randomBoolean()); @@ -52,6 +69,6 @@ protected Writeable.Reader instanceReader() { @Override protected ResolveClusterActionResponse mutateInstance(ResolveClusterActionResponse response) { - return new ResolveClusterActionResponse(randomResolveClusterInfoMap()); + return new ResolveClusterActionResponse(randomResolveClusterInfoMap(response)); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestTests.java index 48ab2b0802616..36e347204d1cc 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.Predicates; import org.elasticsearch.test.AbstractXContentTestCase; import org.elasticsearch.test.XContentTestUtils; import org.elasticsearch.xcontent.ToXContent; @@ -110,7 +111,7 @@ protected Predicate getRandomFieldsExcludeFilter() { if (enclosedSettings) { return field -> field.startsWith("settings"); } - return field -> true; + return Predicates.always(); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/bulk/SimulateBulkRequestTests.java b/server/src/test/java/org/elasticsearch/action/bulk/SimulateBulkRequestTests.java index 7fe036f97596e..e7e922c47acbe 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/SimulateBulkRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/SimulateBulkRequestTests.java @@ -11,7 +11,6 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; -import java.util.HashMap; import java.util.List; import java.util.Map; @@ -35,32 +34,12 @@ private void testSerialization(Map> pipelineSubstitu assertThat(copy.getPipelineSubstitutions(), equalTo(simulateBulkRequest.getPipelineSubstitutions())); } - private Map> getTestPipelineSubstitutions() { - return new HashMap<>() { - { - put("pipeline1", new HashMap<>() { - { - put("processors", List.of(new HashMap<>() { - { - put("processor2", new HashMap<>()); - } - }, new HashMap<>() { - { - put("processor3", new HashMap<>()); - } - })); - } - }); - put("pipeline2", new HashMap<>() { - { - put("processors", List.of(new HashMap<>() { - { - put("processor3", new HashMap<>()); - } - })); - } - }); - } - }; + private static Map> getTestPipelineSubstitutions() { + return Map.of( + "pipeline1", + Map.of("processors", List.of(Map.of("processor2", Map.of()), Map.of("processor3", Map.of()))), + "pipeline2", + Map.of("processors", List.of(Map.of("processor3", Map.of()))) + ); } } diff --git a/server/src/test/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingServiceTests.java b/server/src/test/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingServiceTests.java new file mode 100644 index 0000000000000..674b3e855e912 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingServiceTests.java @@ -0,0 +1,771 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.datastreams.autosharding; + +import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition; +import org.elasticsearch.action.admin.indices.rollover.RolloverInfo; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamAutoShardingEvent; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.IndexMetadataStats; +import org.elasticsearch.cluster.metadata.IndexWriteLoad; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.features.FeatureService; +import org.elasticsearch.features.FeatureSpecification; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Function; + +import static org.elasticsearch.action.datastreams.autosharding.AutoShardingResult.NOT_APPLICABLE_RESULT; +import static org.elasticsearch.action.datastreams.autosharding.AutoShardingType.COOLDOWN_PREVENTED_DECREASE; +import static org.elasticsearch.action.datastreams.autosharding.AutoShardingType.COOLDOWN_PREVENTED_INCREASE; +import static org.elasticsearch.action.datastreams.autosharding.AutoShardingType.DECREASE_SHARDS; +import static org.elasticsearch.action.datastreams.autosharding.AutoShardingType.INCREASE_SHARDS; +import static org.elasticsearch.action.datastreams.autosharding.AutoShardingType.NO_CHANGE_REQUIRED; +import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; +import static org.hamcrest.Matchers.is; + +public class DataStreamAutoShardingServiceTests extends ESTestCase { + + private ClusterService clusterService; + private ThreadPool threadPool; + private DataStreamAutoShardingService service; + private long now; + String dataStreamName; + + @Before + public void setupService() { + threadPool = new TestThreadPool(getTestName()); + Set> builtInClusterSettings = new HashSet<>(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + builtInClusterSettings.add(DataStreamAutoShardingService.CLUSTER_AUTO_SHARDING_MIN_WRITE_THREADS); + builtInClusterSettings.add(DataStreamAutoShardingService.CLUSTER_AUTO_SHARDING_MAX_WRITE_THREADS); + builtInClusterSettings.add(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_INCREASE_SHARDS_COOLDOWN); + builtInClusterSettings.add(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_DECREASE_SHARDS_COOLDOWN); + builtInClusterSettings.add( + Setting.boolSetting( + DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_ENABLED, + false, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ) + ); + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, builtInClusterSettings); + clusterService = createClusterService(threadPool, clusterSettings); + now = System.currentTimeMillis(); + service = new DataStreamAutoShardingService( + Settings.builder() + .put(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_ENABLED, true) + .putList(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING.getKey(), List.of()) + .build(), + clusterService, + new FeatureService(List.of(new FeatureSpecification() { + @Override + public Set getFeatures() { + return Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE); + } + })), + () -> now + ); + dataStreamName = randomAlphaOfLengthBetween(10, 100); + logger.info("-> data stream name is [{}]", dataStreamName); + } + + @After + public void cleanup() { + clusterService.close(); + threadPool.shutdownNow(); + } + + public void testCalculateValidations() { + Metadata.Builder builder = Metadata.builder(); + DataStream dataStream = createDataStream( + builder, + dataStreamName, + 1, + now, + List.of(now - 3000, now - 2000, now - 1000), + getWriteLoad(1, 2.0), + null + ); + builder.put(dataStream); + ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodeFeatures( + Map.of( + "n1", + Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()), + "n2", + Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()) + ) + ) + .metadata(builder) + .build(); + + { + // autosharding disabled + DataStreamAutoShardingService disabledAutoshardingService = new DataStreamAutoShardingService( + Settings.EMPTY, + clusterService, + new FeatureService(List.of(new FeatureSpecification() { + @Override + public Set getFeatures() { + return Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE); + } + })), + System::currentTimeMillis + ); + + AutoShardingResult autoShardingResult = disabledAutoshardingService.calculate(state, dataStream, 2.0); + assertThat(autoShardingResult, is(NOT_APPLICABLE_RESULT)); + } + + { + // cluster doesn't have feature + ClusterState stateNoFeature = ClusterState.builder(ClusterName.DEFAULT).metadata(Metadata.builder()).build(); + + DataStreamAutoShardingService noFeatureService = new DataStreamAutoShardingService( + Settings.builder() + .put(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_ENABLED, true) + .putList(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING.getKey(), List.of()) + .build(), + clusterService, + new FeatureService(List.of()), + () -> now + ); + + AutoShardingResult autoShardingResult = noFeatureService.calculate(stateNoFeature, dataStream, 2.0); + assertThat(autoShardingResult, is(NOT_APPLICABLE_RESULT)); + } + + { + // patterns are configured to exclude the current data stream + DataStreamAutoShardingService noFeatureService = new DataStreamAutoShardingService( + Settings.builder() + .put(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_ENABLED, true) + .putList( + DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING.getKey(), + List.of("foo", dataStreamName + "*") + ) + .build(), + clusterService, + new FeatureService(List.of()), + () -> now + ); + + AutoShardingResult autoShardingResult = noFeatureService.calculate(state, dataStream, 2.0); + assertThat(autoShardingResult, is(NOT_APPLICABLE_RESULT)); + } + + { + // null write load passed + AutoShardingResult autoShardingResult = service.calculate(state, dataStream, null); + assertThat(autoShardingResult, is(NOT_APPLICABLE_RESULT)); + } + } + + public void testCalculateIncreaseShardingRecommendations() { + // the input is a data stream with 5 backing indices with 1 shard each + // all 4 backing indices have a write load of 2.0 + // we'll recreate it across the test and add an auto sharding event as we iterate + { + Metadata.Builder builder = Metadata.builder(); + Function dataStreamSupplier = (autoShardingEvent) -> createDataStream( + builder, + dataStreamName, + 1, + now, + List.of(now - 10_000, now - 7000, now - 5000, now - 2000, now - 1000), + getWriteLoad(1, 2.0), + autoShardingEvent + ); + + DataStream dataStream = dataStreamSupplier.apply(null); + builder.put(dataStream); + ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodeFeatures( + Map.of( + "n1", + Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()), + "n2", + Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()) + ) + ) + .metadata(builder) + .build(); + + AutoShardingResult autoShardingResult = service.calculate(state, dataStream, 2.5); + assertThat(autoShardingResult.type(), is(INCREASE_SHARDS)); + // no pre-existing scaling event so the cool down must be zero + assertThat(autoShardingResult.coolDownRemaining(), is(TimeValue.ZERO)); + assertThat(autoShardingResult.targetNumberOfShards(), is(3)); + } + + { + // let's add a pre-existing sharding event so that we'll return some cool down period that's preventing an INCREASE_SHARDS + // event so the result type we're expecting is COOLDOWN_PREVENTED_INCREASE + Metadata.Builder builder = Metadata.builder(); + Function dataStreamSupplier = (autoShardingEvent) -> createDataStream( + builder, + dataStreamName, + 1, + now, + List.of(now - 10_000, now - 7000, now - 5000, now - 2000, now - 1000), + getWriteLoad(1, 2.0), + autoShardingEvent + ); + + // generation 4 triggered an auto sharding event to 2 shards + DataStream dataStream = dataStreamSupplier.apply( + new DataStreamAutoShardingEvent(DataStream.getDefaultBackingIndexName(dataStreamName, 4), 2, now - 1005) + ); + builder.put(dataStream); + ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodeFeatures( + Map.of( + "n1", + Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()), + "n2", + Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()) + ) + ) + .metadata(builder) + .build(); + + AutoShardingResult autoShardingResult = service.calculate(state, dataStream, 2.5); + assertThat(autoShardingResult.type(), is(COOLDOWN_PREVENTED_INCREASE)); + // no pre-existing scaling event so the cool down must be zero + assertThat(autoShardingResult.targetNumberOfShards(), is(3)); + // it's been 1005 millis since the last auto sharding event and the cool down is 270secoinds (270_000 millis) + assertThat(autoShardingResult.coolDownRemaining(), is(TimeValue.timeValueMillis(268995))); + } + + { + // let's test a subsequent increase in the number of shards after a previos auto sharding event + Metadata.Builder builder = Metadata.builder(); + Function dataStreamSupplier = (autoShardingEvent) -> createDataStream( + builder, + dataStreamName, + 1, + now, + List.of(now - 10_000_000, now - 7_000_000, now - 2_000_000, now - 1_000_000, now - 1000), + getWriteLoad(1, 2.0), + autoShardingEvent + ); + + // generation 3 triggered an increase in shards event to 2 shards + DataStream dataStream = dataStreamSupplier.apply( + new DataStreamAutoShardingEvent(DataStream.getDefaultBackingIndexName(dataStreamName, 4), 2, now - 2_000_100) + ); + builder.put(dataStream); + ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodeFeatures( + Map.of( + "n1", + Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()), + "n2", + Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()) + ) + ) + .metadata(builder) + .build(); + + AutoShardingResult autoShardingResult = service.calculate(state, dataStream, 2.5); + assertThat(autoShardingResult.type(), is(INCREASE_SHARDS)); + // no pre-existing scaling event so the cool down must be zero + assertThat(autoShardingResult.targetNumberOfShards(), is(3)); + assertThat(autoShardingResult.coolDownRemaining(), is(TimeValue.ZERO)); + } + } + + public void testCalculateDecreaseShardingRecommendations() { + // the input is a data stream with 5 backing indices with 3 shards each + { + // testing a decrease shards events prevented by the cool down period not lapsing due to the oldest generation index being + // "too new" (i.e. the cool down period hasn't lapsed since the oldest generation index) + Metadata.Builder builder = Metadata.builder(); + Function dataStreamSupplier = (autoShardingEvent) -> createDataStream( + builder, + dataStreamName, + 3, + now, + List.of(now - 10_000, now - 7000, now - 5000, now - 2000, now - 1000), + getWriteLoad(3, 0.25), + autoShardingEvent + ); + + DataStream dataStream = dataStreamSupplier.apply(null); + builder.put(dataStream); + ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodeFeatures( + Map.of( + "n1", + Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()), + "n2", + Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()) + ) + ) + .metadata(builder) + .build(); + + AutoShardingResult autoShardingResult = service.calculate(state, dataStream, 1.0); + // the cooldown period for the decrease shards event hasn't lapsed since the data stream was created + assertThat(autoShardingResult.type(), is(COOLDOWN_PREVENTED_DECREASE)); + assertThat(autoShardingResult.coolDownRemaining(), is(TimeValue.timeValueMillis(TimeValue.timeValueDays(3).millis() - 10_000))); + // based on the write load of 0.75 we should be reducing the number of shards to 1 + assertThat(autoShardingResult.targetNumberOfShards(), is(1)); + } + + { + Metadata.Builder builder = Metadata.builder(); + Function dataStreamSupplier = (autoShardingEvent) -> createDataStream( + builder, + dataStreamName, + 3, + now, + List.of( + now - TimeValue.timeValueDays(21).getMillis(), + now - TimeValue.timeValueDays(15).getMillis(), + now - TimeValue.timeValueDays(4).getMillis(), + now - TimeValue.timeValueDays(2).getMillis(), + now - 1000 + ), + getWriteLoad(3, 0.333), + autoShardingEvent + ); + + DataStream dataStream = dataStreamSupplier.apply(null); + builder.put(dataStream); + ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodeFeatures( + Map.of( + "n1", + Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()), + "n2", + Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()) + ) + ) + .metadata(builder) + .build(); + + AutoShardingResult autoShardingResult = service.calculate(state, dataStream, 1.0); + assertThat(autoShardingResult.type(), is(DECREASE_SHARDS)); + assertThat(autoShardingResult.targetNumberOfShards(), is(1)); + // no pre-existing auto sharding event however we have old enough backing indices (older than the cooldown period) so we can + // make a decision to reduce the number of shards + assertThat(autoShardingResult.coolDownRemaining(), is(TimeValue.ZERO)); + } + + { + // let's test a decrease in number of shards after a previous decrease event + Metadata.Builder builder = Metadata.builder(); + Function dataStreamSupplier = (autoShardingEvent) -> createDataStream( + builder, + dataStreamName, + 3, + now, + List.of( + now - TimeValue.timeValueDays(21).getMillis(), + now - TimeValue.timeValueDays(15).getMillis(), // triggers auto sharding event + now - TimeValue.timeValueDays(4).getMillis(), + now - TimeValue.timeValueDays(2).getMillis(), + now - 1000 + ), + getWriteLoad(3, 0.333), + autoShardingEvent + ); + + // generation 2 triggered a decrease in shards event to 2 shards + DataStream dataStream = dataStreamSupplier.apply( + new DataStreamAutoShardingEvent( + DataStream.getDefaultBackingIndexName(dataStreamName, 2), + 2, + now - TimeValue.timeValueDays(4).getMillis() + ) + ); + builder.put(dataStream); + ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodeFeatures( + Map.of( + "n1", + Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()), + "n2", + Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()) + ) + ) + .metadata(builder) + .build(); + + AutoShardingResult autoShardingResult = service.calculate(state, dataStream, 1.0); + assertThat(autoShardingResult.type(), is(DECREASE_SHARDS)); + assertThat(autoShardingResult.targetNumberOfShards(), is(1)); + assertThat(autoShardingResult.coolDownRemaining(), is(TimeValue.ZERO)); + } + + { + // let's test a decrease in number of shards that's prevented by the cool down period due to a previous sharding event + // the expected result type here is COOLDOWN_PREVENTED_DECREASE + Metadata.Builder builder = Metadata.builder(); + Function dataStreamSupplier = (autoShardingEvent) -> createDataStream( + builder, + dataStreamName, + 3, + now, + List.of( + now - TimeValue.timeValueDays(21).getMillis(), + now - TimeValue.timeValueDays(2).getMillis(), // triggers auto sharding event + now - TimeValue.timeValueDays(1).getMillis(), + now - 1000 + ), + getWriteLoad(3, 0.25), + autoShardingEvent + ); + + // generation 2 triggered a decrease in shards event to 2 shards + DataStream dataStream = dataStreamSupplier.apply( + new DataStreamAutoShardingEvent( + DataStream.getDefaultBackingIndexName(dataStreamName, 2), + 2, + now - TimeValue.timeValueDays(2).getMillis() + ) + ); + builder.put(dataStream); + ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodeFeatures( + Map.of( + "n1", + Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()), + "n2", + Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()) + ) + ) + .metadata(builder) + .build(); + + AutoShardingResult autoShardingResult = service.calculate(state, dataStream, 1.0); + assertThat(autoShardingResult.type(), is(COOLDOWN_PREVENTED_DECREASE)); + assertThat(autoShardingResult.targetNumberOfShards(), is(1)); + assertThat(autoShardingResult.coolDownRemaining(), is(TimeValue.timeValueDays(1))); + } + + { + // no change required + Metadata.Builder builder = Metadata.builder(); + Function dataStreamSupplier = (autoShardingEvent) -> createDataStream( + builder, + dataStreamName, + 3, + now, + List.of( + now - TimeValue.timeValueDays(21).getMillis(), + now - TimeValue.timeValueDays(15).getMillis(), + now - TimeValue.timeValueDays(4).getMillis(), + now - TimeValue.timeValueDays(2).getMillis(), + now - 1000 + ), + getWriteLoad(3, 1.333), + autoShardingEvent + ); + + // generation 2 triggered a decrease in shards event to 2 shards + DataStream dataStream = dataStreamSupplier.apply(null); + builder.put(dataStream); + ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodeFeatures( + Map.of( + "n1", + Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()), + "n2", + Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()) + ) + ) + .metadata(builder) + .build(); + + AutoShardingResult autoShardingResult = service.calculate(state, dataStream, 4.0); + assertThat(autoShardingResult.type(), is(NO_CHANGE_REQUIRED)); + assertThat(autoShardingResult.targetNumberOfShards(), is(3)); + assertThat(autoShardingResult.coolDownRemaining(), is(TimeValue.ZERO)); + } + } + + public void testComputeOptimalNumberOfShards() { + int minWriteThreads = 2; + int maxWriteThreads = 32; + { + // the small values will be very common so let's randomise to make sure we never go below 1L + double indexingLoad = randomDoubleBetween(0.0001, 1.0, true); + logger.info("-> indexingLoad {}", indexingLoad); + assertThat(DataStreamAutoShardingService.computeOptimalNumberOfShards(minWriteThreads, maxWriteThreads, indexingLoad), is(1L)); + } + + { + double indexingLoad = 2.0; + logger.info("-> indexingLoad {}", indexingLoad); + assertThat(DataStreamAutoShardingService.computeOptimalNumberOfShards(minWriteThreads, maxWriteThreads, indexingLoad), is(2L)); + } + + { + // there's a broad range of popular values (a write index starting to be very busy, using between 3 and all of the 32 write + // threads, so let's randomise this too to make sure we stay at 3 recommended shards) + double indexingLoad = randomDoubleBetween(3.0002, 32.0, true); + logger.info("-> indexingLoad {}", indexingLoad); + + assertThat(DataStreamAutoShardingService.computeOptimalNumberOfShards(minWriteThreads, maxWriteThreads, indexingLoad), is(3L)); + } + + { + double indexingLoad = 49.0; + logger.info("-> indexingLoad {}", indexingLoad); + assertThat(DataStreamAutoShardingService.computeOptimalNumberOfShards(minWriteThreads, maxWriteThreads, indexingLoad), is(4L)); + } + + { + double indexingLoad = 70.0; + logger.info("-> indexingLoad {}", indexingLoad); + assertThat(DataStreamAutoShardingService.computeOptimalNumberOfShards(minWriteThreads, maxWriteThreads, indexingLoad), is(5L)); + } + + { + double indexingLoad = 100.0; + logger.info("-> indexingLoad {}", indexingLoad); + assertThat(DataStreamAutoShardingService.computeOptimalNumberOfShards(minWriteThreads, maxWriteThreads, indexingLoad), is(7L)); + } + + { + double indexingLoad = 180.0; + logger.info("-> indexingLoad {}", indexingLoad); + assertThat(DataStreamAutoShardingService.computeOptimalNumberOfShards(minWriteThreads, maxWriteThreads, indexingLoad), is(12L)); + } + } + + public void testGetMaxIndexLoadWithinCoolingPeriod() { + final TimeValue coolingPeriod = TimeValue.timeValueDays(3); + + final Metadata.Builder metadataBuilder = Metadata.builder(); + final int numberOfBackingIndicesOutsideCoolingPeriod = randomIntBetween(3, 10); + final int numberOfBackingIndicesWithinCoolingPeriod = randomIntBetween(3, 10); + final List backingIndices = new ArrayList<>(); + final String dataStreamName = "logs"; + long now = System.currentTimeMillis(); + + // to cover the entire cooling period we'll also include the backing index right before the index age calculation + // this flag makes that index have a very low or very high write load + boolean lastIndexBeforeCoolingPeriodHasLowWriteLoad = randomBoolean(); + for (int i = 0; i < numberOfBackingIndicesOutsideCoolingPeriod; i++) { + long creationDate = now - (coolingPeriod.millis() * 2); + IndexMetadata indexMetadata = createIndexMetadata( + DataStream.getDefaultBackingIndexName(dataStreamName, backingIndices.size(), creationDate), + 1, + getWriteLoad(1, 999.0), + creationDate + ); + + if (lastIndexBeforeCoolingPeriodHasLowWriteLoad) { + indexMetadata = createIndexMetadata( + DataStream.getDefaultBackingIndexName(dataStreamName, backingIndices.size(), creationDate), + 1, + getWriteLoad(1, 1.0), + creationDate + ); + } + backingIndices.add(indexMetadata.getIndex()); + metadataBuilder.put(indexMetadata, false); + } + + for (int i = 0; i < numberOfBackingIndicesWithinCoolingPeriod; i++) { + final long createdAt = now - (coolingPeriod.getMillis() / 2); + IndexMetadata indexMetadata; + if (i == numberOfBackingIndicesWithinCoolingPeriod - 1) { + indexMetadata = createIndexMetadata( + DataStream.getDefaultBackingIndexName(dataStreamName, backingIndices.size(), createdAt), + 3, + getWriteLoad(3, 5.0), // max write index within cooling period + createdAt + ); + } else { + indexMetadata = createIndexMetadata( + DataStream.getDefaultBackingIndexName(dataStreamName, backingIndices.size(), createdAt), + 3, + getWriteLoad(3, 3.0), // each backing index has a write load of 9.0 + createdAt + ); + } + backingIndices.add(indexMetadata.getIndex()); + metadataBuilder.put(indexMetadata, false); + } + + final String writeIndexName = DataStream.getDefaultBackingIndexName(dataStreamName, backingIndices.size()); + final IndexMetadata writeIndexMetadata = createIndexMetadata(writeIndexName, 3, getWriteLoad(3, 1.0), System.currentTimeMillis()); + backingIndices.add(writeIndexMetadata.getIndex()); + metadataBuilder.put(writeIndexMetadata, false); + + final DataStream dataStream = new DataStream( + dataStreamName, + backingIndices, + backingIndices.size(), + Collections.emptyMap(), + false, + false, + false, + false, + IndexMode.STANDARD + ); + + metadataBuilder.put(dataStream); + + double maxIndexLoadWithinCoolingPeriod = DataStreamAutoShardingService.getMaxIndexLoadWithinCoolingPeriod( + metadataBuilder.build(), + dataStream, + 3.0, + coolingPeriod, + () -> now + ); + // to cover the entire cooldown period, the last index before the cooling period is taken into account + assertThat(maxIndexLoadWithinCoolingPeriod, is(lastIndexBeforeCoolingPeriodHasLowWriteLoad ? 15.0 : 999.0)); + } + + public void testAutoShardingResultValidation() { + { + // throws exception when constructed using types that shouldn't report cooldowns + expectThrows( + IllegalArgumentException.class, + () -> new AutoShardingResult(INCREASE_SHARDS, 1, 3, TimeValue.timeValueSeconds(3), 3.0) + ); + + expectThrows( + IllegalArgumentException.class, + () -> new AutoShardingResult(DECREASE_SHARDS, 3, 1, TimeValue.timeValueSeconds(3), 1.0) + ); + + } + + { + // we can successfully create results with cooldown period for the designated types + AutoShardingResult cooldownPreventedIncrease = new AutoShardingResult( + COOLDOWN_PREVENTED_INCREASE, + 1, + 3, + TimeValue.timeValueSeconds(3), + 3.0 + ); + assertThat(cooldownPreventedIncrease.coolDownRemaining(), is(TimeValue.timeValueSeconds(3))); + + AutoShardingResult cooldownPreventedDecrease = new AutoShardingResult( + COOLDOWN_PREVENTED_DECREASE, + 3, + 1, + TimeValue.timeValueSeconds(7), + 1.0 + ); + assertThat(cooldownPreventedDecrease.coolDownRemaining(), is(TimeValue.timeValueSeconds(7))); + } + } + + private DataStream createDataStream( + Metadata.Builder builder, + String dataStreamName, + int numberOfShards, + Long now, + List indicesCreationDate, + IndexWriteLoad backingIndicesWriteLoad, + @Nullable DataStreamAutoShardingEvent autoShardingEvent + ) { + final List backingIndices = new ArrayList<>(); + int backingIndicesCount = indicesCreationDate.size(); + for (int k = 0; k < indicesCreationDate.size(); k++) { + long createdAt = indicesCreationDate.get(k); + IndexMetadata.Builder indexMetaBuilder; + if (k < backingIndicesCount - 1) { + indexMetaBuilder = IndexMetadata.builder( + createIndexMetadata( + DataStream.getDefaultBackingIndexName(dataStreamName, k + 1), + numberOfShards, + backingIndicesWriteLoad, + createdAt + ) + ); + // add rollover info only for non-write indices + MaxAgeCondition rolloverCondition = new MaxAgeCondition(TimeValue.timeValueMillis(now - 2000L)); + indexMetaBuilder.putRolloverInfo(new RolloverInfo(dataStreamName, List.of(rolloverCondition), now - 2000L)); + } else { + // write index + indexMetaBuilder = IndexMetadata.builder( + createIndexMetadata(DataStream.getDefaultBackingIndexName(dataStreamName, k + 1), numberOfShards, null, createdAt) + ); + } + IndexMetadata indexMetadata = indexMetaBuilder.build(); + builder.put(indexMetadata, false); + backingIndices.add(indexMetadata.getIndex()); + } + return new DataStream( + dataStreamName, + backingIndices, + backingIndicesCount, + null, + false, + false, + false, + false, + null, + null, + false, + List.of(), + autoShardingEvent + ); + } + + private IndexMetadata createIndexMetadata( + String indexName, + int numberOfShards, + @Nullable IndexWriteLoad indexWriteLoad, + long createdAt + ) { + return IndexMetadata.builder(indexName) + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numberOfShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + .build() + ) + .stats(indexWriteLoad == null ? null : new IndexMetadataStats(indexWriteLoad, 1, 1)) + .creationDate(createdAt) + .build(); + } + + private IndexWriteLoad getWriteLoad(int numberOfShards, double shardWriteLoad) { + IndexWriteLoad.Builder builder = IndexWriteLoad.builder(numberOfShards); + for (int shardId = 0; shardId < numberOfShards; shardId++) { + builder.withShardWriteLoad(shardId, shardWriteLoad, randomLongBetween(1, 10)); + } + return builder.build(); + } + +} diff --git a/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java b/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java index deec53de59326..297ebbae6c85a 100644 --- a/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java @@ -9,7 +9,8 @@ package org.elasticsearch.action.support; import org.elasticsearch.action.support.IndicesOptions.ConcreteTargetOptions; -import org.elasticsearch.action.support.IndicesOptions.GeneralOptions; +import org.elasticsearch.action.support.IndicesOptions.FailureStoreOptions; +import org.elasticsearch.action.support.IndicesOptions.GatekeeperOptions; import org.elasticsearch.action.support.IndicesOptions.WildcardOptions; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -40,17 +41,25 @@ public class IndicesOptionsTests extends ESTestCase { public void testSerialization() throws Exception { int iterations = randomIntBetween(5, 20); for (int i = 0; i < iterations; i++) { - IndicesOptions indicesOptions = IndicesOptions.fromOptions( - randomBoolean(), - randomBoolean(), - randomBoolean(), - randomBoolean(), - randomBoolean(), - randomBoolean(), - randomBoolean(), - randomBoolean(), - randomBoolean() - ); + IndicesOptions indicesOptions = IndicesOptions.builder() + .wildcardOptions( + WildcardOptions.builder() + .matchOpen(randomBoolean()) + .matchClosed(randomBoolean()) + .includeHidden(randomBoolean()) + .allowEmptyExpressions(randomBoolean()) + .resolveAliases(randomBoolean()) + ) + .gatekeeperOptions( + GatekeeperOptions.builder() + .ignoreThrottled(randomBoolean()) + .allowAliasToMultipleIndices(randomBoolean()) + .allowClosedIndices(randomBoolean()) + ) + .failureStoreOptions( + FailureStoreOptions.builder().includeRegularIndices(randomBoolean()).includeFailureIndices(randomBoolean()) + ) + .build(); BytesStreamOutput output = new BytesStreamOutput(); indicesOptions.writeIndicesOptions(output); @@ -58,16 +67,7 @@ public void testSerialization() throws Exception { StreamInput streamInput = output.bytes().streamInput(); IndicesOptions indicesOptions2 = IndicesOptions.readIndicesOptions(streamInput); - assertThat(indicesOptions2.ignoreUnavailable(), equalTo(indicesOptions.ignoreUnavailable())); - assertThat(indicesOptions2.allowNoIndices(), equalTo(indicesOptions.allowNoIndices())); - assertThat(indicesOptions2.expandWildcardsOpen(), equalTo(indicesOptions.expandWildcardsOpen())); - assertThat(indicesOptions2.expandWildcardsClosed(), equalTo(indicesOptions.expandWildcardsClosed())); - assertThat(indicesOptions2.expandWildcardsHidden(), equalTo(indicesOptions.expandWildcardsHidden())); - - assertThat(indicesOptions2.forbidClosedIndices(), equalTo(indicesOptions.forbidClosedIndices())); - assertThat(indicesOptions2.allowAliasesToMultipleIndices(), equalTo(indicesOptions.allowAliasesToMultipleIndices())); - - assertEquals(indicesOptions2.ignoreAliases(), indicesOptions.ignoreAliases()); + assertThat(indicesOptions2, equalTo(indicesOptions)); } } @@ -343,9 +343,10 @@ public void testToXContent() throws IOException { randomBoolean(), randomBoolean() ); - GeneralOptions generalOptions = new GeneralOptions(randomBoolean(), randomBoolean(), randomBoolean()); + GatekeeperOptions gatekeeperOptions = new GatekeeperOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()); + FailureStoreOptions failureStoreOptions = new IndicesOptions.FailureStoreOptions(randomBoolean(), randomBoolean()); - IndicesOptions indicesOptions = new IndicesOptions(concreteTargetOptions, wildcardOptions, generalOptions); + IndicesOptions indicesOptions = new IndicesOptions(concreteTargetOptions, wildcardOptions, gatekeeperOptions, failureStoreOptions); XContentType type = randomFrom(XContentType.values()); BytesReference xContentBytes = toXContentBytes(indicesOptions, type); @@ -359,7 +360,8 @@ public void testToXContent() throws IOException { assertThat(((List) map.get("expand_wildcards")).contains("hidden"), equalTo(wildcardOptions.includeHidden())); assertThat(map.get("ignore_unavailable"), equalTo(concreteTargetOptions.allowUnavailableTargets())); assertThat(map.get("allow_no_indices"), equalTo(wildcardOptions.allowEmptyExpressions())); - assertThat(map.get("ignore_throttled"), equalTo(generalOptions.ignoreThrottled())); + assertThat(map.get("ignore_throttled"), equalTo(gatekeeperOptions.ignoreThrottled())); + assertThat(map.get("failure_store"), equalTo(failureStoreOptions.displayValue())); } public void testFromXContent() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java index e0538603573f7..ee98f40a6cb29 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java @@ -61,7 +61,6 @@ import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -396,13 +395,14 @@ public void testToXContent() throws IOException { } public void testToXContent_FlatSettingTrue_ReduceMappingFalse() throws IOException { - Map mapParams = new HashMap<>() { - { - put("flat_settings", "true"); - put("reduce_mappings", "false"); - put(Metadata.CONTEXT_MODE_PARAM, Metadata.CONTEXT_MODE_API); - } - }; + Map mapParams = Map.of( + "flat_settings", + "true", + "reduce_mappings", + "false", + Metadata.CONTEXT_MODE_PARAM, + Metadata.CONTEXT_MODE_API + ); final ClusterState clusterState = buildClusterState(); IndexRoutingTable index = clusterState.getRoutingTable().getIndicesRouting().get("index"); @@ -661,13 +661,14 @@ public void testToXContent_FlatSettingTrue_ReduceMappingFalse() throws IOExcepti } public void testToXContent_FlatSettingFalse_ReduceMappingTrue() throws IOException { - Map mapParams = new HashMap<>() { - { - put("flat_settings", "false"); - put("reduce_mappings", "true"); - put(Metadata.CONTEXT_MODE_PARAM, Metadata.CONTEXT_MODE_API); - } - }; + Map mapParams = Map.of( + "flat_settings", + "false", + "reduce_mappings", + "true", + Metadata.CONTEXT_MODE_PARAM, + Metadata.CONTEXT_MODE_API + ); final ClusterState clusterState = buildClusterState(); @@ -948,15 +949,7 @@ public void testToXContentSameTypeName() throws IOException { "type", // the type name is the root value, // the original logic in ClusterState.toXContent will reduce - new HashMap<>() { - { - put("type", new HashMap() { - { - put("key", "value"); - } - }); - } - } + Map.of("type", Map.of("key", "value")) ) ) .numberOfShards(1) @@ -1086,23 +1079,11 @@ private ClusterState buildClusterState() throws IOException { IndexMetadata indexMetadata = IndexMetadata.builder("index") .state(IndexMetadata.State.OPEN) .settings(Settings.builder().put(SETTING_VERSION_CREATED, IndexVersion.current())) - .putMapping(new MappingMetadata("type", new HashMap<>() { - { - put("type1", new HashMap() { - { - put("key", "value"); - } - }); - } - })) + .putMapping(new MappingMetadata("type", Map.of("type1", Map.of("key", "value")))) .putAlias(AliasMetadata.builder("alias").indexRouting("indexRouting").build()) .numberOfShards(1) .primaryTerm(0, 1L) - .putInSyncAllocationIds(0, new HashSet<>() { - { - add("allocationId"); - } - }) + .putInSyncAllocationIds(0, Set.of("allocationId")) .numberOfReplicas(2) .putRolloverInfo(new RolloverInfo("rolloveAlias", new ArrayList<>(), 1L)) .stats(new IndexMetadataStats(IndexWriteLoad.builder(1).build(), 120, 1)) @@ -1150,16 +1131,8 @@ private ClusterState buildClusterState() throws IOException { .coordinationMetadata( CoordinationMetadata.builder() .term(1) - .lastCommittedConfiguration(new CoordinationMetadata.VotingConfiguration(new HashSet<>() { - { - add("commitedConfigurationNodeId"); - } - })) - .lastAcceptedConfiguration(new CoordinationMetadata.VotingConfiguration(new HashSet<>() { - { - add("acceptedConfigurationNodeId"); - } - })) + .lastCommittedConfiguration(new CoordinationMetadata.VotingConfiguration(Set.of("commitedConfigurationNodeId"))) + .lastAcceptedConfiguration(new CoordinationMetadata.VotingConfiguration(Set.of("acceptedConfigurationNodeId"))) .addVotingConfigExclusion(new CoordinationMetadata.VotingConfigExclusion("exlucdedNodeId", "excludedNodeName")) .build() ) diff --git a/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java b/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java index 5e122c4050b6c..8334c535cea43 100644 --- a/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java @@ -183,6 +183,7 @@ public void testLeastAndMostAvailableDiskSpace() { null, null, null, + null, null ); DiskUsage leastNode = DiskUsage.findLeastAvailablePath(nodeStats); @@ -211,6 +212,7 @@ public void testLeastAndMostAvailableDiskSpace() { null, null, null, + null, null ); DiskUsage leastNode = DiskUsage.findLeastAvailablePath(nodeStats); @@ -241,6 +243,7 @@ public void testLeastAndMostAvailableDiskSpace() { null, null, null, + null, null ); DiskUsage leastNode = DiskUsage.findLeastAvailablePath(nodeStats); @@ -275,6 +278,7 @@ public void testLeastAndMostAvailableDiskSpaceSomeInvalidValues() { null, null, null, + null, null ); DiskUsage leastNode = DiskUsage.findLeastAvailablePath(nodeStats); @@ -304,6 +308,7 @@ public void testLeastAndMostAvailableDiskSpaceSomeInvalidValues() { null, null, null, + null, null ); DiskUsage leastNode = DiskUsage.findLeastAvailablePath(nodeStats); @@ -334,6 +339,7 @@ public void testLeastAndMostAvailableDiskSpaceSomeInvalidValues() { null, null, null, + null, null ); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java index 3fc62981b75ba..2985cd33aaa64 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java @@ -1885,31 +1885,23 @@ public void testImproveConfigurationPerformsVotingConfigExclusionStateCheck() { final Coordinator coordinator = cluster.getAnyLeader().coordinator; final ClusterState currentState = coordinator.getLastAcceptedState(); - Set newVotingConfigExclusion1 = new HashSet<>() { - { - add( - new CoordinationMetadata.VotingConfigExclusion( - "resolvableNodeId", - CoordinationMetadata.VotingConfigExclusion.MISSING_VALUE_MARKER - ) - ); - } - }; + Set newVotingConfigExclusion1 = Set.of( + new CoordinationMetadata.VotingConfigExclusion( + "resolvableNodeId", + CoordinationMetadata.VotingConfigExclusion.MISSING_VALUE_MARKER + ) + ); ClusterState newState1 = buildNewClusterStateWithVotingConfigExclusion(currentState, newVotingConfigExclusion1); assertFalse(Coordinator.validVotingConfigExclusionState(newState1)); - Set newVotingConfigExclusion2 = new HashSet<>() { - { - add( - new CoordinationMetadata.VotingConfigExclusion( - CoordinationMetadata.VotingConfigExclusion.MISSING_VALUE_MARKER, - "resolvableNodeName" - ) - ); - } - }; + Set newVotingConfigExclusion2 = Set.of( + new CoordinationMetadata.VotingConfigExclusion( + CoordinationMetadata.VotingConfigExclusion.MISSING_VALUE_MARKER, + "resolvableNodeName" + ) + ); ClusterState newState2 = buildNewClusterStateWithVotingConfigExclusion(currentState, newVotingConfigExclusion2); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/AtomicRegisterPreVoteCollectorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/AtomicRegisterPreVoteCollectorTests.java index ddb1ccbbd4f9a..f0b6d62ef9767 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/AtomicRegisterPreVoteCollectorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/AtomicRegisterPreVoteCollectorTests.java @@ -8,12 +8,16 @@ package org.elasticsearch.cluster.coordination.stateless; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.threadpool.ThreadPool; import org.junit.After; import org.junit.Before; @@ -65,7 +69,7 @@ protected long absoluteTimeInMillis() { // Either there's no heartbeat or is stale if (randomBoolean()) { PlainActionFuture.get(f -> heartbeatStore.writeHeartbeat(new Heartbeat(1, fakeClock.get()), f)); - fakeClock.set(maxTimeSinceLastHeartbeat.millis() + 1); + fakeClock.set(maxTimeSinceLastHeartbeat.millis() + randomLongBetween(0, 1000)); } var startElection = new AtomicBoolean(); @@ -76,6 +80,55 @@ protected long absoluteTimeInMillis() { assertThat(startElection.get(), is(true)); } + public void testLogSkippedElectionIfRecentLeaderHeartbeat() throws Exception { + final var currentTermProvider = new AtomicLong(1); + final var heartbeatFrequency = TimeValue.timeValueSeconds(randomIntBetween(15, 30)); + final var maxTimeSinceLastHeartbeat = TimeValue.timeValueSeconds(2 * heartbeatFrequency.seconds()); + DiscoveryNodeUtils.create("master"); + final var logger = LogManager.getLogger(AtomicRegisterPreVoteCollector.class); + final var appender = new MockLogAppender(); + appender.start(); + try { + Loggers.addAppender(logger, appender); + appender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "log emitted when skipping election", + AtomicRegisterPreVoteCollector.class.getCanonicalName(), + Level.INFO, + "skipping election since there is a recent heartbeat*" + ) + ); + final var fakeClock = new AtomicLong(); + final var heartbeatStore = new InMemoryHeartbeatStore(); + final var heartbeatService = new StoreHeartbeatService( + heartbeatStore, + threadPool, + heartbeatFrequency, + maxTimeSinceLastHeartbeat, + listener -> listener.onResponse(OptionalLong.of(currentTermProvider.get())) + ) { + @Override + protected long absoluteTimeInMillis() { + return fakeClock.get(); + } + }; + + PlainActionFuture.get(f -> heartbeatStore.writeHeartbeat(new Heartbeat(1, fakeClock.get()), f)); + fakeClock.addAndGet(randomLongBetween(0L, maxTimeSinceLastHeartbeat.millis() - 1)); + + var startElection = new AtomicBoolean(); + var preVoteCollector = new AtomicRegisterPreVoteCollector(heartbeatService, () -> startElection.set(true)); + + preVoteCollector.start(ClusterState.EMPTY_STATE, Collections.emptyList()); + + assertThat(startElection.get(), is(false)); + appender.assertAllExpectationsMatched(); + } finally { + Loggers.removeAppender(logger, appender); + appender.stop(); + } + } + public void testElectionDoesNotRunWhenThereIsALeader() throws Exception { final var currentTermProvider = new AtomicLong(1); final var heartbeatFrequency = TimeValue.timeValueSeconds(randomIntBetween(15, 30)); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatServiceTests.java index 1df613a500f83..bad8385acfbf3 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatServiceTests.java @@ -233,7 +233,7 @@ protected long absoluteTimeInMillis() { assertThat(heartbeat, is(nullValue())); AtomicBoolean noRecentLeaderFound = new AtomicBoolean(); - heartbeatService.runIfNoRecentLeader(() -> noRecentLeaderFound.set(true)); + heartbeatService.checkLeaderHeartbeatAndRun(() -> noRecentLeaderFound.set(true), hb -> {}); assertThat(noRecentLeaderFound.get(), is(true)); } @@ -242,7 +242,7 @@ protected long absoluteTimeInMillis() { PlainActionFuture.get(f -> heartbeatStore.writeHeartbeat(new Heartbeat(1, fakeClock.get()), f)); AtomicBoolean noRecentLeaderFound = new AtomicBoolean(); - heartbeatService.runIfNoRecentLeader(() -> noRecentLeaderFound.set(true)); + heartbeatService.checkLeaderHeartbeatAndRun(() -> noRecentLeaderFound.set(true), hb -> {}); assertThat(noRecentLeaderFound.get(), is(false)); } @@ -252,7 +252,7 @@ protected long absoluteTimeInMillis() { fakeClock.set(maxTimeSinceLastHeartbeat.millis() + 1); AtomicBoolean noRecentLeaderFound = new AtomicBoolean(); - heartbeatService.runIfNoRecentLeader(() -> noRecentLeaderFound.set(true)); + heartbeatService.checkLeaderHeartbeatAndRun(() -> noRecentLeaderFound.set(true), hb -> {}); assertThat(noRecentLeaderFound.get(), is(true)); } @@ -273,7 +273,7 @@ protected long absoluteTimeInMillis() { ) ); try (var ignored = mockAppender.capturing(StoreHeartbeatService.class)) { - heartbeatService.runIfNoRecentLeader(() -> fail("should not be called")); + heartbeatService.checkLeaderHeartbeatAndRun(() -> fail("should not be called"), hb -> {}); mockAppender.assertAllExpectationsMatched(); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamAutoShardingEventTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamAutoShardingEventTests.java new file mode 100644 index 0000000000000..925c204fa5b27 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamAutoShardingEventTests.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.SimpleDiffableSerializationTestCase; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; + +public class DataStreamAutoShardingEventTests extends SimpleDiffableSerializationTestCase { + + @Override + protected DataStreamAutoShardingEvent doParseInstance(XContentParser parser) throws IOException { + return DataStreamAutoShardingEvent.fromXContent(parser); + } + + @Override + protected Writeable.Reader instanceReader() { + return DataStreamAutoShardingEvent::new; + } + + @Override + protected DataStreamAutoShardingEvent createTestInstance() { + return DataStreamAutoShardingEventTests.randomInstance(); + } + + @Override + protected DataStreamAutoShardingEvent mutateInstance(DataStreamAutoShardingEvent instance) { + String triggerIndex = instance.triggerIndexName(); + long timestamp = instance.timestamp(); + int targetNumberOfShards = instance.targetNumberOfShards(); + switch (randomInt(2)) { + case 0 -> triggerIndex = randomValueOtherThan(triggerIndex, () -> randomAlphaOfLengthBetween(10, 50)); + case 1 -> timestamp = randomValueOtherThan(timestamp, ESTestCase::randomNonNegativeLong); + case 2 -> targetNumberOfShards = randomValueOtherThan(targetNumberOfShards, ESTestCase::randomNonNegativeInt); + } + return new DataStreamAutoShardingEvent(triggerIndex, targetNumberOfShards, timestamp); + } + + static DataStreamAutoShardingEvent randomInstance() { + return new DataStreamAutoShardingEvent(randomAlphaOfLengthBetween(10, 40), randomNonNegativeInt(), randomNonNegativeLong()); + } + + @Override + protected DataStreamAutoShardingEvent makeTestChanges(DataStreamAutoShardingEvent testInstance) { + return mutateInstance(testInstance); + } + + @Override + protected Writeable.Reader> diffReader() { + return DataStreamAutoShardingEvent::readDiffFrom; + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSerializationTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSerializationTests.java new file mode 100644 index 0000000000000..491ba868dfd9b --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSerializationTests.java @@ -0,0 +1,97 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.test.AbstractChunkedSerializingTestCase; +import org.elasticsearch.test.SimpleDiffableWireSerializationTestCase; + +import java.util.List; + +public class DataStreamGlobalRetentionSerializationTests extends SimpleDiffableWireSerializationTestCase { + + @Override + protected ClusterState.Custom makeTestChanges(ClusterState.Custom testInstance) { + if (randomBoolean()) { + return testInstance; + } + return mutateInstance(testInstance); + } + + @Override + protected Writeable.Reader> diffReader() { + return DataStreamGlobalRetention::readDiffFrom; + } + + @Override + protected Writeable.Reader instanceReader() { + return DataStreamGlobalRetention::read; + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry( + List.of( + new NamedWriteableRegistry.Entry(ClusterState.Custom.class, DataStreamGlobalRetention.TYPE, DataStreamGlobalRetention::read) + ) + ); + } + + @Override + protected ClusterState.Custom createTestInstance() { + return randomGlobalRetention(); + } + + @Override + protected ClusterState.Custom mutateInstance(ClusterState.Custom instance) { + DataStreamGlobalRetention metadata = (DataStreamGlobalRetention) instance; + var defaultRetention = metadata.getDefaultRetention(); + var maxRetention = metadata.getMaxRetention(); + switch (randomInt(1)) { + case 0 -> { + defaultRetention = randomValueOtherThan( + defaultRetention, + () -> randomBoolean() ? null : TimeValue.timeValueDays(randomIntBetween(1, 1000)) + ); + } + case 1 -> { + maxRetention = randomValueOtherThan( + maxRetention, + () -> randomBoolean() ? null : TimeValue.timeValueDays(randomIntBetween(1001, 2000)) + ); + } + } + return new DataStreamGlobalRetention(defaultRetention, maxRetention); + } + + public static DataStreamGlobalRetention randomGlobalRetention() { + return new DataStreamGlobalRetention( + randomBoolean() ? null : TimeValue.timeValueDays(randomIntBetween(1, 1000)), + randomBoolean() ? null : TimeValue.timeValueDays(randomIntBetween(1000, 2000)) + ); + } + + public void testChunking() { + AbstractChunkedSerializingTestCase.assertChunkCount(createTestInstance(), ignored -> 1); + } + + public void testValidation() { + expectThrows( + IllegalArgumentException.class, + () -> new DataStreamGlobalRetention( + TimeValue.timeValueDays(randomIntBetween(1001, 2000)), + TimeValue.timeValueDays(randomIntBetween(1, 1000)) + ) + ); + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java index 1c4cb8c0681ff..a07cd8e60411a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java @@ -39,6 +39,7 @@ import java.time.temporal.ChronoUnit; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -50,6 +51,7 @@ import static org.elasticsearch.cluster.metadata.DataStream.getDefaultBackingIndexName; import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.newInstance; import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.randomIndexInstances; +import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.randomNonEmptyIndexInstances; import static org.elasticsearch.index.IndexSettings.LIFECYCLE_ORIGINATION_DATE; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -93,9 +95,10 @@ protected DataStream mutateInstance(DataStream instance) { var lifecycle = instance.getLifecycle(); var failureStore = instance.isFailureStore(); var failureIndices = instance.getFailureIndices(); - switch (between(0, 10)) { + var autoShardingEvent = instance.getAutoShardingEvent(); + switch (between(0, 11)) { case 0 -> name = randomAlphaOfLength(10); - case 1 -> indices = randomValueOtherThan(List.of(), DataStreamTestHelper::randomIndexInstances); + case 1 -> indices = randomNonEmptyIndexInstances(); case 2 -> generation = instance.getGeneration() + randomIntBetween(1, 10); case 3 -> metadata = randomBoolean() && metadata != null ? null : Map.of("key", randomAlphaOfLength(10)); case 4 -> { @@ -123,12 +126,17 @@ protected DataStream mutateInstance(DataStream instance) { ? null : DataStreamLifecycle.newBuilder().dataRetention(randomMillisUpToYear9999()).build(); case 10 -> { - failureIndices = randomValueOtherThan(List.of(), DataStreamTestHelper::randomIndexInstances); - if (failureIndices.isEmpty()) { - failureStore = false; - } else { - failureStore = true; - } + failureIndices = randomValueOtherThan(failureIndices, DataStreamTestHelper::randomIndexInstances); + failureStore = failureIndices.isEmpty() == false; + } + case 11 -> { + autoShardingEvent = randomBoolean() && autoShardingEvent != null + ? null + : new DataStreamAutoShardingEvent( + indices.get(indices.size() - 1).getName(), + randomIntBetween(1, 10), + randomMillisUpToYear9999() + ); } } @@ -144,7 +152,8 @@ protected DataStream mutateInstance(DataStream instance) { indexMode, lifecycle, failureStore, - failureIndices + failureIndices, + autoShardingEvent ); } @@ -201,7 +210,8 @@ public void testRolloverUpgradeToTsdbDataStream() { indexMode, ds.getLifecycle(), ds.isFailureStore(), - ds.getFailureIndices() + ds.getFailureIndices(), + ds.getAutoShardingEvent() ); var newCoordinates = ds.nextWriteIndexAndGeneration(Metadata.EMPTY_METADATA); @@ -228,7 +238,8 @@ public void testRolloverDowngradeToRegularDataStream() { IndexMode.TIME_SERIES, ds.getLifecycle(), ds.isFailureStore(), - ds.getFailureIndices() + ds.getFailureIndices(), + ds.getAutoShardingEvent() ); var newCoordinates = ds.nextWriteIndexAndGeneration(Metadata.EMPTY_METADATA); @@ -590,7 +601,8 @@ public void testSnapshot() { preSnapshotDataStream.getIndexMode(), preSnapshotDataStream.getLifecycle(), preSnapshotDataStream.isFailureStore(), - preSnapshotDataStream.getFailureIndices() + preSnapshotDataStream.getFailureIndices(), + preSnapshotDataStream.getAutoShardingEvent() ); var reconciledDataStream = postSnapshotDataStream.snapshot( @@ -616,11 +628,7 @@ public void testSnapshot() { public void testSnapshotWithAllBackingIndicesRemoved() { var preSnapshotDataStream = DataStreamTestHelper.randomInstance(); - var indicesToAdd = new ArrayList(); - while (indicesToAdd.isEmpty()) { - // ensure at least one index - indicesToAdd.addAll(randomIndexInstances()); - } + var indicesToAdd = randomNonEmptyIndexInstances(); var postSnapshotDataStream = new DataStream( preSnapshotDataStream.getName(), @@ -634,7 +642,8 @@ public void testSnapshotWithAllBackingIndicesRemoved() { preSnapshotDataStream.getIndexMode(), preSnapshotDataStream.getLifecycle(), preSnapshotDataStream.isFailureStore(), - preSnapshotDataStream.getFailureIndices() + preSnapshotDataStream.getFailureIndices(), + preSnapshotDataStream.getAutoShardingEvent() ); assertNull(postSnapshotDataStream.snapshot(preSnapshotDataStream.getIndices().stream().map(Index::getName).toList())); @@ -1249,7 +1258,7 @@ public void testGetIndicesPastRetentionWithOriginationDate() { creationAndRolloverTimes, settings(IndexVersion.current()), new DataStreamLifecycle() { - public TimeValue getEffectiveDataRetention() { + public TimeValue getDataStreamRetention() { return testRetentionReference.get(); } } @@ -1636,7 +1645,7 @@ public void testXContentSerializationWithRollover() throws IOException { boolean failureStore = randomBoolean(); List failureIndices = List.of(); if (failureStore) { - failureIndices = randomIndexInstances(); + failureIndices = randomNonEmptyIndexInstances(); } DataStreamLifecycle lifecycle = DataStreamLifecycle.newBuilder().dataRetention(randomMillisUpToYear9999()).build(); @@ -1654,7 +1663,8 @@ public void testXContentSerializationWithRollover() throws IOException { lifecycle, failureStore, failureIndices, - false + false, + null ); try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { @@ -1671,6 +1681,257 @@ public void testXContentSerializationWithRollover() throws IOException { } } + public void testGetIndicesWithinMaxAgeRange() { + final TimeValue maxIndexAge = TimeValue.timeValueDays(7); + + final Metadata.Builder metadataBuilder = Metadata.builder(); + final int numberOfBackingIndicesOlderThanMinAge = randomIntBetween(0, 10); + final int numberOfBackingIndicesWithinMinAnge = randomIntBetween(0, 10); + final int numberOfShards = 1; + final List backingIndices = new ArrayList<>(); + final String dataStreamName = "logs-es"; + final List backingIndicesOlderThanMinAge = new ArrayList<>(); + for (int i = 0; i < numberOfBackingIndicesOlderThanMinAge; i++) { + long creationDate = System.currentTimeMillis() - maxIndexAge.millis() * 2; + final IndexMetadata indexMetadata = createIndexMetadata( + DataStream.getDefaultBackingIndexName(dataStreamName, backingIndices.size(), creationDate), + randomIndexWriteLoad(numberOfShards), + creationDate + ); + backingIndices.add(indexMetadata.getIndex()); + backingIndicesOlderThanMinAge.add(indexMetadata.getIndex()); + metadataBuilder.put(indexMetadata, false); + } + + final List backingIndicesWithinMinAge = new ArrayList<>(); + for (int i = 0; i < numberOfBackingIndicesWithinMinAnge; i++) { + final long createdAt = System.currentTimeMillis() - (maxIndexAge.getMillis() / 2); + final IndexMetadata indexMetadata = createIndexMetadata( + DataStream.getDefaultBackingIndexName(dataStreamName, backingIndices.size(), createdAt), + randomIndexWriteLoad(numberOfShards), + createdAt + ); + backingIndices.add(indexMetadata.getIndex()); + backingIndicesWithinMinAge.add(indexMetadata.getIndex()); + metadataBuilder.put(indexMetadata, false); + } + + final String writeIndexName = DataStream.getDefaultBackingIndexName(dataStreamName, backingIndices.size()); + final IndexMetadata writeIndexMetadata = createIndexMetadata(writeIndexName, null, System.currentTimeMillis()); + backingIndices.add(writeIndexMetadata.getIndex()); + metadataBuilder.put(writeIndexMetadata, false); + + final DataStream dataStream = new DataStream( + dataStreamName, + backingIndices, + backingIndices.size(), + Collections.emptyMap(), + false, + false, + false, + false, + randomBoolean() ? IndexMode.STANDARD : IndexMode.TIME_SERIES + ); + + metadataBuilder.put(dataStream); + + final List indicesWithinMaxAgeRange = DataStream.getIndicesWithinMaxAgeRange( + dataStream, + metadataBuilder::getSafe, + maxIndexAge, + System::currentTimeMillis + ); + + final List expectedIndicesWithinMaxAgeRange = new ArrayList<>(); + if (numberOfBackingIndicesOlderThanMinAge > 0) { + expectedIndicesWithinMaxAgeRange.add(backingIndicesOlderThanMinAge.get(backingIndicesOlderThanMinAge.size() - 1)); + } + expectedIndicesWithinMaxAgeRange.addAll(backingIndicesWithinMinAge); + expectedIndicesWithinMaxAgeRange.add(writeIndexMetadata.getIndex()); + + assertThat(indicesWithinMaxAgeRange, is(equalTo(expectedIndicesWithinMaxAgeRange))); + } + + private IndexWriteLoad randomIndexWriteLoad(int numberOfShards) { + IndexWriteLoad.Builder builder = IndexWriteLoad.builder(numberOfShards); + for (int shardId = 0; shardId < numberOfShards; shardId++) { + builder.withShardWriteLoad(shardId, randomDoubleBetween(0, 64, true), randomLongBetween(1, 10)); + } + return builder.build(); + } + + private IndexMetadata createIndexMetadata(String indexName, IndexWriteLoad indexWriteLoad, long createdAt) { + return IndexMetadata.builder(indexName) + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + .build() + ) + .stats(indexWriteLoad == null ? null : new IndexMetadataStats(indexWriteLoad, 1, 1)) + .creationDate(createdAt) + .build(); + } + + public void testWriteFailureIndex() { + boolean hidden = randomBoolean(); + boolean system = hidden && randomBoolean(); + DataStream noFailureStoreDataStream = new DataStream( + randomAlphaOfLength(10), + randomNonEmptyIndexInstances(), + randomNonNegativeInt(), + null, + hidden, + randomBoolean(), + system, + System::currentTimeMillis, + randomBoolean(), + randomBoolean() ? IndexMode.STANDARD : IndexMode.TIME_SERIES, + DataStreamLifecycleTests.randomLifecycle(), + false, + null, + randomBoolean(), + null + ); + assertThat(noFailureStoreDataStream.getFailureStoreWriteIndex(), nullValue()); + + DataStream failureStoreDataStreamWithEmptyFailureIndices = new DataStream( + randomAlphaOfLength(10), + randomNonEmptyIndexInstances(), + randomNonNegativeInt(), + null, + hidden, + randomBoolean(), + system, + System::currentTimeMillis, + randomBoolean(), + randomBoolean() ? IndexMode.STANDARD : IndexMode.TIME_SERIES, + DataStreamLifecycleTests.randomLifecycle(), + true, + List.of(), + randomBoolean(), + null + ); + assertThat(failureStoreDataStreamWithEmptyFailureIndices.getFailureStoreWriteIndex(), nullValue()); + + List failureIndices = randomIndexInstances(); + String dataStreamName = randomAlphaOfLength(10); + Index writeFailureIndex = new Index( + getDefaultBackingIndexName(dataStreamName, randomNonNegativeInt()), + UUIDs.randomBase64UUID(LuceneTestCase.random()) + ); + failureIndices.add(writeFailureIndex); + DataStream failureStoreDataStream = new DataStream( + dataStreamName, + randomNonEmptyIndexInstances(), + randomNonNegativeInt(), + null, + hidden, + randomBoolean(), + system, + System::currentTimeMillis, + randomBoolean(), + randomBoolean() ? IndexMode.STANDARD : IndexMode.TIME_SERIES, + DataStreamLifecycleTests.randomLifecycle(), + true, + failureIndices, + randomBoolean(), + null + ); + assertThat(failureStoreDataStream.getFailureStoreWriteIndex(), is(writeFailureIndex)); + } + + public void testIsFailureIndex() { + boolean hidden = randomBoolean(); + boolean system = hidden && randomBoolean(); + List backingIndices = randomNonEmptyIndexInstances(); + DataStream noFailureStoreDataStream = new DataStream( + randomAlphaOfLength(10), + backingIndices, + randomNonNegativeInt(), + null, + hidden, + randomBoolean(), + system, + System::currentTimeMillis, + randomBoolean(), + randomBoolean() ? IndexMode.STANDARD : IndexMode.TIME_SERIES, + DataStreamLifecycleTests.randomLifecycle(), + false, + null, + randomBoolean(), + null + ); + assertThat( + noFailureStoreDataStream.isFailureStoreIndex(backingIndices.get(randomIntBetween(0, backingIndices.size() - 1)).getName()), + is(false) + ); + + backingIndices = randomNonEmptyIndexInstances(); + DataStream failureStoreDataStreamWithEmptyFailureIndices = new DataStream( + randomAlphaOfLength(10), + backingIndices, + randomNonNegativeInt(), + null, + hidden, + randomBoolean(), + system, + System::currentTimeMillis, + randomBoolean(), + randomBoolean() ? IndexMode.STANDARD : IndexMode.TIME_SERIES, + DataStreamLifecycleTests.randomLifecycle(), + true, + List.of(), + randomBoolean(), + null + ); + assertThat( + failureStoreDataStreamWithEmptyFailureIndices.isFailureStoreIndex( + backingIndices.get(randomIntBetween(0, backingIndices.size() - 1)).getName() + ), + is(false) + ); + + backingIndices = randomNonEmptyIndexInstances(); + List failureIndices = randomIndexInstances(); + String dataStreamName = randomAlphaOfLength(10); + Index writeFailureIndex = new Index( + getDefaultBackingIndexName(dataStreamName, randomNonNegativeInt()), + UUIDs.randomBase64UUID(LuceneTestCase.random()) + ); + failureIndices.add(writeFailureIndex); + DataStream failureStoreDataStream = new DataStream( + dataStreamName, + backingIndices, + randomNonNegativeInt(), + null, + hidden, + randomBoolean(), + system, + System::currentTimeMillis, + randomBoolean(), + randomBoolean() ? IndexMode.STANDARD : IndexMode.TIME_SERIES, + DataStreamLifecycleTests.randomLifecycle(), + true, + failureIndices, + randomBoolean(), + null + ); + assertThat(failureStoreDataStream.isFailureStoreIndex(writeFailureIndex.getName()), is(true)); + assertThat( + failureStoreDataStream.isFailureStoreIndex(failureIndices.get(randomIntBetween(0, failureIndices.size() - 1)).getName()), + is(true) + ); + assertThat( + failureStoreDataStreamWithEmptyFailureIndices.isFailureStoreIndex( + backingIndices.get(randomIntBetween(0, backingIndices.size() - 1)).getName() + ), + is(false) + ); + assertThat(failureStoreDataStreamWithEmptyFailureIndices.isFailureStoreIndex(randomAlphaOfLength(10)), is(false)); + } + private record DataStreamMetadata(Long creationTimeInMillis, Long rolloverTimeInMillis, Long originationTimeInMillis) { public static DataStreamMetadata dataStreamMetadata(Long creationTimeInMillis, Long rolloverTimeInMillis) { return new DataStreamMetadata(creationTimeInMillis, rolloverTimeInMillis, null); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java index c043734d15cdf..a1eeceba8a390 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.indices.FailureIndexNotSupportedException; import org.elasticsearch.indices.IndexClosedException; import org.elasticsearch.indices.InvalidIndexNameException; import org.elasticsearch.indices.SystemIndexDescriptor; @@ -53,6 +54,7 @@ import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.backingIndexEqualTo; import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.createBackingIndex; +import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.createFailureStore; import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.newInstance; import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_HIDDEN_SETTING; import static org.elasticsearch.common.util.set.Sets.newHashSet; @@ -2294,7 +2296,7 @@ public void testIgnoreThrottled() { new IndicesOptions( IndicesOptions.ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS, IndicesOptions.WildcardOptions.DEFAULT, - IndicesOptions.GeneralOptions.builder().ignoreThrottled(true).build() + IndicesOptions.GatekeeperOptions.builder().ignoreThrottled(true).build() ), "ind*", "test-index" @@ -2697,6 +2699,200 @@ public void testDataStreams() { } } + public void testDataStreamsWithFailureStore() { + final String dataStreamName = "my-data-stream"; + IndexMetadata index1 = createBackingIndex(dataStreamName, 1, epochMillis).build(); + IndexMetadata index2 = createBackingIndex(dataStreamName, 2, epochMillis).build(); + IndexMetadata failureIndex1 = createFailureStore(dataStreamName, 1, epochMillis).build(); + IndexMetadata failureIndex2 = createFailureStore(dataStreamName, 2, epochMillis).build(); + IndexMetadata otherIndex = indexBuilder("my-other-index", Settings.EMPTY).state(State.OPEN).build(); + + Metadata.Builder mdBuilder = Metadata.builder() + .put(index1, false) + .put(index2, false) + .put(failureIndex1, false) + .put(failureIndex2, false) + .put(otherIndex, false) + .put( + newInstance( + dataStreamName, + List.of(index1.getIndex(), index2.getIndex()), + List.of(failureIndex1.getIndex(), failureIndex2.getIndex()) + ) + ); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(mdBuilder).build(); + + // Test default with an exact data stream name + { + IndicesOptions indicesOptions = IndicesOptions.STRICT_EXPAND_OPEN; + Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true, "my-data-stream"); + assertThat(result.length, equalTo(2)); + assertThat(result[0].getName(), equalTo(DataStream.getDefaultBackingIndexName(dataStreamName, 1, epochMillis))); + assertThat(result[1].getName(), equalTo(DataStream.getDefaultBackingIndexName(dataStreamName, 2, epochMillis))); + } + + // Test include failure store with an exact data stream name + { + IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) + .failureStoreOptions(IndicesOptions.FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(true)) + .build(); + Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true, "my-data-stream"); + assertThat(result.length, equalTo(4)); + assertThat(result[0].getName(), equalTo(DataStream.getDefaultBackingIndexName(dataStreamName, 1, epochMillis))); + assertThat(result[1].getName(), equalTo(DataStream.getDefaultBackingIndexName(dataStreamName, 2, epochMillis))); + assertThat(result[2].getName(), equalTo(DataStream.getDefaultFailureStoreName(dataStreamName, 1, epochMillis))); + assertThat(result[3].getName(), equalTo(DataStream.getDefaultFailureStoreName(dataStreamName, 2, epochMillis))); + } + + // Test include failure store while we do not allow failure indices and ignore unavailable + // We expect that they will be skipped + { + IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) + .failureStoreOptions(IndicesOptions.FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(true)) + .gatekeeperOptions(IndicesOptions.GatekeeperOptions.builder().allowFailureIndices(false).build()) + .concreteTargetOptions(IndicesOptions.ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) + .build(); + Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true, "my-data-stream"); + assertThat(result.length, equalTo(2)); + assertThat(result[0].getName(), equalTo(DataStream.getDefaultBackingIndexName(dataStreamName, 1, epochMillis))); + assertThat(result[1].getName(), equalTo(DataStream.getDefaultBackingIndexName(dataStreamName, 2, epochMillis))); + } + + // Test include failure store while we do not allow failure indices + // We expect an error + { + IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) + .failureStoreOptions(IndicesOptions.FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(true)) + .gatekeeperOptions(IndicesOptions.GatekeeperOptions.builder().allowFailureIndices(false).build()) + .build(); + FailureIndexNotSupportedException failureIndexNotSupportedException = expectThrows( + FailureIndexNotSupportedException.class, + () -> indexNameExpressionResolver.concreteIndices(state, indicesOptions, true, "my-data-stream") + ); + assertThat( + failureIndexNotSupportedException.getIndex().getName(), + equalTo(DataStream.getDefaultFailureStoreName(dataStreamName, 1, epochMillis)) + ); + } + + // Test only failure store with an exact data stream name + { + IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) + .failureStoreOptions(IndicesOptions.FailureStoreOptions.builder().includeRegularIndices(false).includeFailureIndices(true)) + .build(); + Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true, "my-data-stream"); + assertThat(result.length, equalTo(2)); + assertThat(result[0].getName(), equalTo(DataStream.getDefaultFailureStoreName(dataStreamName, 1, epochMillis))); + assertThat(result[1].getName(), equalTo(DataStream.getDefaultFailureStoreName(dataStreamName, 2, epochMillis))); + } + + // Test default without any expressions + { + IndicesOptions indicesOptions = IndicesOptions.STRICT_EXPAND_OPEN; + Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true); + assertThat(result.length, equalTo(3)); + List indexNames = Arrays.stream(result).map(Index::getName).toList(); + assertThat( + indexNames, + containsInAnyOrder( + DataStream.getDefaultBackingIndexName(dataStreamName, 2, epochMillis), + DataStream.getDefaultBackingIndexName(dataStreamName, 1, epochMillis), + otherIndex.getIndex().getName() + ) + ); + } + + // Test include failure store without any expressions + { + IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) + .failureStoreOptions(IndicesOptions.FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(true)) + .build(); + Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true); + assertThat(result.length, equalTo(5)); + List indexNames = Arrays.stream(result).map(Index::getName).toList(); + assertThat( + indexNames, + containsInAnyOrder( + DataStream.getDefaultBackingIndexName(dataStreamName, 2, epochMillis), + DataStream.getDefaultBackingIndexName(dataStreamName, 1, epochMillis), + DataStream.getDefaultFailureStoreName(dataStreamName, 2, epochMillis), + DataStream.getDefaultFailureStoreName(dataStreamName, 1, epochMillis), + otherIndex.getIndex().getName() + ) + ); + } + + // Test only failure store without any expressions + { + IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) + .failureStoreOptions(IndicesOptions.FailureStoreOptions.builder().includeRegularIndices(false).includeFailureIndices(true)) + .build(); + Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true); + assertThat(result.length, equalTo(2)); + List indexNames = Arrays.stream(result).map(Index::getName).toList(); + assertThat( + indexNames, + containsInAnyOrder( + DataStream.getDefaultFailureStoreName(dataStreamName, 2, epochMillis), + DataStream.getDefaultFailureStoreName(dataStreamName, 1, epochMillis) + ) + ); + } + + // Test default with wildcard expression + { + IndicesOptions indicesOptions = IndicesOptions.STRICT_EXPAND_OPEN; + Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true, "my-*"); + assertThat(result.length, equalTo(3)); + List indexNames = Arrays.stream(result).map(Index::getName).toList(); + assertThat( + indexNames, + containsInAnyOrder( + DataStream.getDefaultBackingIndexName(dataStreamName, 2, epochMillis), + DataStream.getDefaultBackingIndexName(dataStreamName, 1, epochMillis), + otherIndex.getIndex().getName() + ) + ); + } + + // Test include failure store with wildcard expression + { + IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) + .failureStoreOptions(IndicesOptions.FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(true)) + .build(); + Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true, "my-*"); + assertThat(result.length, equalTo(5)); + List indexNames = Arrays.stream(result).map(Index::getName).toList(); + assertThat( + indexNames, + containsInAnyOrder( + DataStream.getDefaultBackingIndexName(dataStreamName, 2, epochMillis), + DataStream.getDefaultBackingIndexName(dataStreamName, 1, epochMillis), + DataStream.getDefaultFailureStoreName(dataStreamName, 2, epochMillis), + DataStream.getDefaultFailureStoreName(dataStreamName, 1, epochMillis), + otherIndex.getIndex().getName() + ) + ); + } + + // Test only failure store with wildcard expression + { + IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) + .failureStoreOptions(IndicesOptions.FailureStoreOptions.builder().includeRegularIndices(false).includeFailureIndices(true)) + .build(); + Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true, "my-*"); + assertThat(result.length, equalTo(2)); + List indexNames = Arrays.stream(result).map(Index::getName).toList(); + assertThat( + indexNames, + containsInAnyOrder( + DataStream.getDefaultFailureStoreName(dataStreamName, 2, epochMillis), + DataStream.getDefaultFailureStoreName(dataStreamName, 1, epochMillis) + ) + ); + } + } + public void testDataStreamAliases() { String dataStream1 = "my-data-stream-1"; IndexMetadata index1 = createBackingIndex(dataStream1, 1, epochMillis).build(); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamServiceTests.java index e11f8c0cbe108..ea79bc8f13765 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamServiceTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.indices.ExecutorNames; import org.elasticsearch.indices.SystemDataStreamDescriptor; @@ -59,6 +60,7 @@ public void testCreateDataStream() throws Exception { CreateDataStreamClusterStateUpdateRequest req = new CreateDataStreamClusterStateUpdateRequest(dataStreamName); ClusterState newState = MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, + Settings.EMPTY, cs, true, req, @@ -98,6 +100,7 @@ public void testCreateDataStreamWithAliasFromTemplate() throws Exception { CreateDataStreamClusterStateUpdateRequest req = new CreateDataStreamClusterStateUpdateRequest(dataStreamName); ClusterState newState = MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, + Settings.EMPTY, cs, randomBoolean(), req, @@ -174,6 +177,7 @@ public void testCreateDataStreamWithAliasFromComponentTemplate() throws Exceptio CreateDataStreamClusterStateUpdateRequest req = new CreateDataStreamClusterStateUpdateRequest(dataStreamName); ClusterState newState = MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, + Settings.EMPTY, cs, randomBoolean(), req, @@ -226,6 +230,7 @@ public void testCreateDataStreamWithFailureStore() throws Exception { CreateDataStreamClusterStateUpdateRequest req = new CreateDataStreamClusterStateUpdateRequest(dataStreamName); ClusterState newState = MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, + Settings.EMPTY, cs, randomBoolean(), req, @@ -246,6 +251,40 @@ public void testCreateDataStreamWithFailureStore() throws Exception { assertThat(newState.metadata().index(failureStoreIndexName).isSystem(), is(false)); } + public void testCreateDataStreamWithFailureStoreWithRefreshRate() throws Exception { + final MetadataCreateIndexService metadataCreateIndexService = getMetadataCreateIndexService(); + var timeValue = randomTimeValue(); + var settings = Settings.builder() + .put(MetadataCreateDataStreamService.FAILURE_STORE_REFRESH_INTERVAL_SETTING_NAME, timeValue) + .build(); + final String dataStreamName = "my-data-stream"; + ComposableIndexTemplate template = new ComposableIndexTemplate.Builder().indexPatterns(List.of(dataStreamName + "*")) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false, true)) + .build(); + ClusterState cs = ClusterState.builder(new ClusterName("_name")) + .metadata(Metadata.builder().put("template", template).build()) + .build(); + CreateDataStreamClusterStateUpdateRequest req = new CreateDataStreamClusterStateUpdateRequest(dataStreamName); + ClusterState newState = MetadataCreateDataStreamService.createDataStream( + metadataCreateIndexService, + settings, + cs, + randomBoolean(), + req, + ActionListener.noop() + ); + var backingIndexName = DataStream.getDefaultBackingIndexName(dataStreamName, 1, req.getStartTime()); + var failureStoreIndexName = DataStream.getDefaultFailureStoreName(dataStreamName, 1, req.getStartTime()); + assertThat(newState.metadata().dataStreams().size(), equalTo(1)); + assertThat(newState.metadata().dataStreams().get(dataStreamName).getName(), equalTo(dataStreamName)); + assertThat(newState.metadata().index(backingIndexName), notNullValue()); + assertThat(newState.metadata().index(failureStoreIndexName), notNullValue()); + assertThat( + newState.metadata().index(failureStoreIndexName).getSettings().get(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey()), + equalTo(timeValue) + ); + } + public void testCreateSystemDataStream() throws Exception { final MetadataCreateIndexService metadataCreateIndexService = getMetadataCreateIndexService(); final String dataStreamName = ".system-data-stream"; @@ -259,6 +298,7 @@ public void testCreateSystemDataStream() throws Exception { ); ClusterState newState = MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, + Settings.EMPTY, cs, randomBoolean(), req, @@ -291,6 +331,7 @@ public void testCreateDuplicateDataStream() throws Exception { ResourceAlreadyExistsException.class, () -> MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, + Settings.EMPTY, cs, randomBoolean(), req, @@ -309,6 +350,7 @@ public void testCreateDataStreamWithInvalidName() throws Exception { IllegalArgumentException.class, () -> MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, + Settings.EMPTY, cs, randomBoolean(), req, @@ -327,6 +369,7 @@ public void testCreateDataStreamWithUppercaseCharacters() throws Exception { IllegalArgumentException.class, () -> MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, + Settings.EMPTY, cs, randomBoolean(), req, @@ -345,6 +388,7 @@ public void testCreateDataStreamStartingWithPeriod() throws Exception { IllegalArgumentException.class, () -> MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, + Settings.EMPTY, cs, randomBoolean(), req, @@ -363,6 +407,7 @@ public void testCreateDataStreamNoTemplate() throws Exception { IllegalArgumentException.class, () -> MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, + Settings.EMPTY, cs, randomBoolean(), req, @@ -384,6 +429,7 @@ public void testCreateDataStreamNoValidTemplate() throws Exception { IllegalArgumentException.class, () -> MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, + Settings.EMPTY, cs, randomBoolean(), req, @@ -408,6 +454,7 @@ public static ClusterState createDataStream(final String dataStreamName) throws CreateDataStreamClusterStateUpdateRequest req = new CreateDataStreamClusterStateUpdateRequest(dataStreamName); return MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, + Settings.EMPTY, cs, randomBoolean(), req, diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsServiceTests.java index ba3b1a7387110..71306d7fe0aef 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsServiceTests.java @@ -356,7 +356,8 @@ public void testRemoveBrokenBackingIndexReference() { original.getIndexMode(), original.getLifecycle(), original.isFailureStore(), - original.getFailureIndices() + original.getFailureIndices(), + original.getAutoShardingEvent() ); var brokenState = ClusterState.builder(state).metadata(Metadata.builder(state.getMetadata()).put(broken).build()).build(); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java index 14cb19ba89810..84b6feb1dbffa 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java @@ -1778,7 +1778,7 @@ public void testIndexTemplateFailsToOverrideComponentTemplateMappingField() thro "properties": { "field2": { "type": "object", - "subobjects": false, + "subobjects": false, "properties": { "foo": { "type": "integer" @@ -1803,12 +1803,12 @@ public void testIndexTemplateFailsToOverrideComponentTemplateMappingField() thro { "properties": { "field2": { - "type": "object", - "properties": { - "bar": { - "type": "object" - } - } + "type": "object", + "properties": { + "bar": { + "type": "nested" + } + } } } }"""), null)) @@ -1834,7 +1834,7 @@ public void testIndexTemplateFailsToOverrideComponentTemplateMappingField() thro assertNotNull(e.getCause().getCause()); assertThat( e.getCause().getCause().getMessage(), - containsString("Tried to add subobject [bar] to object [field2] which does not support subobjects") + containsString("Tried to add nested object [bar] to object [field2] which does not support subobjects") ); } @@ -1920,12 +1920,12 @@ public void testUpdateComponentTemplateFailsIfResolvedIndexTemplatesWouldBeInval { "properties": { "field2": { - "type": "object", - "properties": { - "bar": { - "type": "object" - } - } + "type": "object", + "properties": { + "bar": { + "type": "nested" + } + } } } } @@ -1951,7 +1951,7 @@ public void testUpdateComponentTemplateFailsIfResolvedIndexTemplatesWouldBeInval assertNotNull(e.getCause().getCause().getCause()); assertThat( e.getCause().getCause().getCause().getMessage(), - containsString("Tried to add subobject [bar] to object [field2] which does not support subobjects") + containsString("Tried to add nested object [bar] to object [field2] which does not support subobjects") ); } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamServiceTests.java index 128601ff21250..cefbd31db1ee6 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamServiceTests.java @@ -297,6 +297,7 @@ public void testCreateDataStreamWithSuppliedWriteIndex() throws Exception { TimeValue.ZERO ), getMetadataCreateIndexService(), + Settings.EMPTY, ActionListener.noop() ); IndexAbstraction ds = newState.metadata().getIndicesLookup().get(dataStreamName); @@ -355,6 +356,7 @@ public void testCreateDataStreamHidesBackingIndicesAndRemovesAlias() throws Exce TimeValue.ZERO ), getMetadataCreateIndexService(), + Settings.EMPTY, ActionListener.noop() ); IndexAbstraction ds = newState.metadata().getIndicesLookup().get(dataStreamName); @@ -415,6 +417,7 @@ public void testCreateDataStreamWithoutSuppliedWriteIndex() { TimeValue.ZERO ), getMetadataCreateIndexService(), + Settings.EMPTY, ActionListener.noop() ) ); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java index 07ccf0e8f34e7..1e35a40dedc17 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Predicates; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; @@ -783,7 +784,7 @@ public void testFindMappingsWithFilters() throws IOException { && field.equals("address.location") == false; } if (index.equals("index2")) { - return field -> false; + return Predicates.never(); } return MapperPlugin.NOOP_FIELD_PREDICATE; }, Metadata.ON_NEXT_INDEX_FIND_MAPPINGS_NOOP); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetadataTests.java index e7f49bc773404..aa9d0b9368fa6 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetadataTests.java @@ -23,12 +23,9 @@ import org.elasticsearch.xcontent.json.JsonXContent; import java.io.IOException; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.EnumSet; -import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -219,13 +216,14 @@ public void testSimpleJsonFromAndTo() throws IOException { private static final String ALIAS_FILTER2 = "{\"field2\":\"value2\"}"; public void testToXContentGateway_FlatSettingTrue_ReduceMappingFalse() throws IOException { - Map mapParams = new HashMap<>() { - { - put(Metadata.CONTEXT_MODE_PARAM, CONTEXT_MODE_GATEWAY); - put("flat_settings", "true"); - put("reduce_mappings", "false"); - } - }; + Map mapParams = Map.of( + Metadata.CONTEXT_MODE_PARAM, + CONTEXT_MODE_GATEWAY, + "flat_settings", + "true", + "reduce_mappings", + "false" + ); Metadata metadata = buildMetadata(); XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); @@ -282,11 +280,7 @@ public void testToXContentGateway_FlatSettingTrue_ReduceMappingFalse() throws IO } public void testToXContentAPI_SameTypeName() throws IOException { - Map mapParams = new HashMap<>() { - { - put(Metadata.CONTEXT_MODE_PARAM, CONTEXT_MODE_API); - } - }; + Map mapParams = Map.of(Metadata.CONTEXT_MODE_PARAM, CONTEXT_MODE_API); Metadata metadata = Metadata.builder() .clusterUUID("clusterUUID") @@ -300,15 +294,7 @@ public void testToXContentAPI_SameTypeName() throws IOException { "type", // the type name is the root value, // the original logic in ClusterState.toXContent will reduce - new HashMap<>() { - { - put("type", new HashMap() { - { - put("key", "value"); - } - }); - } - } + Map.of("type", Map.of("key", "value")) ) ) .numberOfShards(1) @@ -378,13 +364,14 @@ public void testToXContentAPI_SameTypeName() throws IOException { } public void testToXContentGateway_FlatSettingFalse_ReduceMappingTrue() throws IOException { - Map mapParams = new HashMap<>() { - { - put(Metadata.CONTEXT_MODE_PARAM, CONTEXT_MODE_GATEWAY); - put("flat_settings", "false"); - put("reduce_mappings", "true"); - } - }; + Map mapParams = Map.of( + Metadata.CONTEXT_MODE_PARAM, + CONTEXT_MODE_GATEWAY, + "flat_settings", + "false", + "reduce_mappings", + "true" + ); Metadata metadata = buildMetadata(); XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); @@ -443,13 +430,14 @@ public void testToXContentGateway_FlatSettingFalse_ReduceMappingTrue() throws IO } public void testToXContentAPI_FlatSettingTrue_ReduceMappingFalse() throws IOException { - Map mapParams = new HashMap<>() { - { - put(Metadata.CONTEXT_MODE_PARAM, CONTEXT_MODE_API); - put("flat_settings", "true"); - put("reduce_mappings", "false"); - } - }; + Map mapParams = Map.of( + Metadata.CONTEXT_MODE_PARAM, + CONTEXT_MODE_API, + "flat_settings", + "true", + "reduce_mappings", + "false" + ); final Metadata metadata = buildMetadata(); @@ -546,13 +534,14 @@ public void testToXContentAPI_FlatSettingTrue_ReduceMappingFalse() throws IOExce } public void testToXContentAPI_FlatSettingFalse_ReduceMappingTrue() throws IOException { - Map mapParams = new HashMap<>() { - { - put(Metadata.CONTEXT_MODE_PARAM, CONTEXT_MODE_API); - put("flat_settings", "false"); - put("reduce_mappings", "true"); - } - }; + Map mapParams = Map.of( + Metadata.CONTEXT_MODE_PARAM, + CONTEXT_MODE_API, + "flat_settings", + "false", + "reduce_mappings", + "true" + ); final Metadata metadata = buildMetadata(); @@ -655,13 +644,14 @@ public void testToXContentAPI_FlatSettingFalse_ReduceMappingTrue() throws IOExce } public void testToXContentAPIReservedMetadata() throws IOException { - Map mapParams = new HashMap<>() { - { - put(Metadata.CONTEXT_MODE_PARAM, CONTEXT_MODE_API); - put("flat_settings", "false"); - put("reduce_mappings", "true"); - } - }; + Map mapParams = Map.of( + Metadata.CONTEXT_MODE_PARAM, + CONTEXT_MODE_API, + "flat_settings", + "false", + "reduce_mappings", + "true" + ); Metadata metadata = buildMetadata(); @@ -840,16 +830,8 @@ private Metadata buildMetadata() throws IOException { .coordinationMetadata( CoordinationMetadata.builder() .term(1) - .lastCommittedConfiguration(new CoordinationMetadata.VotingConfiguration(new HashSet<>() { - { - add("commitedConfigurationNodeId"); - } - })) - .lastAcceptedConfiguration(new CoordinationMetadata.VotingConfiguration(new HashSet<>() { - { - add("acceptedConfigurationNodeId"); - } - })) + .lastCommittedConfiguration(new CoordinationMetadata.VotingConfiguration(Set.of("commitedConfigurationNodeId"))) + .lastAcceptedConfiguration(new CoordinationMetadata.VotingConfiguration(Set.of("acceptedConfigurationNodeId"))) .addVotingConfigExclusion(new CoordinationMetadata.VotingConfigExclusion("exlucdedNodeId", "excludedNodeName")) .build() ) @@ -859,25 +841,13 @@ private Metadata buildMetadata() throws IOException { IndexMetadata.builder("index") .state(IndexMetadata.State.OPEN) .settings(Settings.builder().put(SETTING_VERSION_CREATED, IndexVersion.current())) - .putMapping(new MappingMetadata("type", new HashMap<>() { - { - put("type1", new HashMap() { - { - put("key", "value"); - } - }); - } - })) + .putMapping(new MappingMetadata("type", Map.of("type1", Map.of("key", "value")))) .putAlias(AliasMetadata.builder("alias").indexRouting("indexRouting").build()) .numberOfShards(1) .primaryTerm(0, 1L) - .putInSyncAllocationIds(0, new HashSet<>() { - { - add("allocationId"); - } - }) + .putInSyncAllocationIds(0, Set.of("allocationId")) .numberOfReplicas(2) - .putRolloverInfo(new RolloverInfo("rolloveAlias", new ArrayList<>(), 1L)) + .putRolloverInfo(new RolloverInfo("rolloveAlias", List.of(), 1L)) ) .put( IndexTemplateMetadata.builder("template") diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/IndexShardRoutingTableTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/IndexShardRoutingTableTests.java index 838a4268fa1cf..2ae9414711801 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/IndexShardRoutingTableTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/IndexShardRoutingTableTests.java @@ -12,7 +12,6 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; -import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -35,9 +34,13 @@ public void testEquals() { Index index = new Index("a", "b"); ShardId shardId = new ShardId(index, 1); ShardId shardId2 = new ShardId(index, 2); - IndexShardRoutingTable table1 = new IndexShardRoutingTable(shardId, new ArrayList<>()); - IndexShardRoutingTable table2 = new IndexShardRoutingTable(shardId, new ArrayList<>()); - IndexShardRoutingTable table3 = new IndexShardRoutingTable(shardId2, new ArrayList<>()); + ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, null, true, ShardRoutingState.UNASSIGNED); + IndexShardRoutingTable table1 = new IndexShardRoutingTable(shardId, List.of(shardRouting)); + IndexShardRoutingTable table2 = new IndexShardRoutingTable(shardId, List.of(shardRouting)); + IndexShardRoutingTable table3 = new IndexShardRoutingTable( + shardId2, + List.of(TestShardRouting.newShardRouting(shardId2, null, true, ShardRoutingState.UNASSIGNED)) + ); String s = "Some other random object"; assertEquals(table1, table1); assertEquals(table1, table2); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsServiceTests.java new file mode 100644 index 0000000000000..d99d4c1b54527 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsServiceTests.java @@ -0,0 +1,205 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.routing.allocation; + +import org.elasticsearch.cluster.ClusterInfo; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ESAllocationTestCase; +import org.elasticsearch.cluster.EmptyClusterInfoService; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalance; +import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceShardsAllocator; +import org.elasticsearch.cluster.routing.allocation.allocator.ShardAssignment; +import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.telemetry.TelemetryProvider; +import org.elasticsearch.test.ClusterServiceUtils; + +import java.util.Map; +import java.util.Set; + +import static org.elasticsearch.cluster.routing.TestShardRouting.newShardRouting; +import static org.elasticsearch.cluster.routing.TestShardRouting.shardRoutingBuilder; +import static org.hamcrest.Matchers.aMapWithSize; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.hasEntry; + +public class AllocationStatsServiceTests extends ESAllocationTestCase { + + public void testShardStats() { + + var ingestLoadForecast = randomDoubleBetween(0, 10, true); + var shardSizeForecast = randomNonNegativeLong(); + var currentShardSize = randomNonNegativeLong(); + + var indexMetadata = IndexMetadata.builder("my-index") + .settings(indexSettings(IndexVersion.current(), 1, 0)) + .indexWriteLoadForecast(ingestLoadForecast) + .shardSizeInBytesForecast(shardSizeForecast) + .build(); + var shardId = new ShardId(indexMetadata.getIndex(), 0); + + var state = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(newNode("node-1"))) + .metadata(Metadata.builder().put(indexMetadata, false)) + .routingTable( + RoutingTable.builder() + .add( + IndexRoutingTable.builder(indexMetadata.getIndex()) + .addShard(newShardRouting(shardId, "node-1", true, ShardRoutingState.STARTED)) + .build() + ) + ) + .build(); + + var clusterInfo = new ClusterInfo( + Map.of(), + Map.of(), + Map.of(ClusterInfo.shardIdentifierFromRouting(shardId, true), currentShardSize), + Map.of(), + Map.of(), + Map.of() + ); + + var queue = new DeterministicTaskQueue(); + try (var clusterService = ClusterServiceUtils.createClusterService(state, queue.getThreadPool())) { + var service = new AllocationStatsService(clusterService, () -> clusterInfo, createShardAllocator(), TEST_WRITE_LOAD_FORECASTER); + assertThat( + service.stats(), + allOf( + aMapWithSize(1), + hasEntry( + "node-1", + new NodeAllocationStats(1, -1, ingestLoadForecast, Math.max(shardSizeForecast, currentShardSize), currentShardSize) + ) + ) + ); + } + } + + public void testRelocatingShardIsOnlyCountedOnceOnTargetNode() { + + var indexMetadata = IndexMetadata.builder("my-index").settings(indexSettings(IndexVersion.current(), 1, 0)).build(); + var state = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(newNode("node-1")).add(newNode("node-2"))) + .metadata(Metadata.builder().put(indexMetadata, false)) + .routingTable( + RoutingTable.builder() + .add( + IndexRoutingTable.builder(indexMetadata.getIndex()) + .addShard( + shardRoutingBuilder(new ShardId(indexMetadata.getIndex(), 0), "node-1", true, ShardRoutingState.RELOCATING) + .withRelocatingNodeId("node-2") + .build() + ) + .build() + ) + ) + .build(); + + var queue = new DeterministicTaskQueue(); + try (var clusterService = ClusterServiceUtils.createClusterService(state, queue.getThreadPool())) { + var service = new AllocationStatsService( + clusterService, + EmptyClusterInfoService.INSTANCE, + createShardAllocator(), + TEST_WRITE_LOAD_FORECASTER + ); + assertThat( + service.stats(), + allOf( + aMapWithSize(2), + hasEntry("node-1", new NodeAllocationStats(0, -1, 0, 0, 0)), + hasEntry("node-2", new NodeAllocationStats(1, -1, 0, 0, 0)) + ) + ); + } + } + + public void testUndesiredShardCount() { + + var indexMetadata = IndexMetadata.builder("my-index").settings(indexSettings(IndexVersion.current(), 2, 0)).build(); + + var state = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(newNode("node-1")).add(newNode("node-2")).add(newNode("node-3"))) + .metadata(Metadata.builder().put(indexMetadata, false)) + .routingTable( + RoutingTable.builder() + .add( + IndexRoutingTable.builder(indexMetadata.getIndex()) + .addShard(newShardRouting(new ShardId(indexMetadata.getIndex(), 0), "node-1", true, ShardRoutingState.STARTED)) + .addShard(newShardRouting(new ShardId(indexMetadata.getIndex(), 1), "node-3", true, ShardRoutingState.STARTED)) + .build() + ) + ) + .build(); + + var queue = new DeterministicTaskQueue(); + var threadPool = queue.getThreadPool(); + try (var clusterService = ClusterServiceUtils.createClusterService(state, threadPool)) { + var service = new AllocationStatsService( + clusterService, + EmptyClusterInfoService.INSTANCE, + new DesiredBalanceShardsAllocator( + ClusterSettings.createBuiltInClusterSettings(), + createShardAllocator(), + threadPool, + clusterService, + (innerState, strategy) -> innerState, + TelemetryProvider.NOOP + ) { + @Override + public DesiredBalance getDesiredBalance() { + return new DesiredBalance( + 1, + Map.ofEntries( + Map.entry(new ShardId(indexMetadata.getIndex(), 0), new ShardAssignment(Set.of("node-1"), 1, 0, 0)), + Map.entry(new ShardId(indexMetadata.getIndex(), 1), new ShardAssignment(Set.of("node-2"), 1, 0, 0)) + ) + ); + } + }, + TEST_WRITE_LOAD_FORECASTER + ); + assertThat( + service.stats(), + allOf( + aMapWithSize(3), + hasEntry("node-1", new NodeAllocationStats(1, 0, 0, 0, 0)), + hasEntry("node-2", new NodeAllocationStats(0, 0, 0, 0, 0)), + hasEntry("node-3", new NodeAllocationStats(1, 1, 0, 0, 0)) // [my-index][1] should be allocated to [node-2] + ) + ); + } + } + + private ShardsAllocator createShardAllocator() { + return new ShardsAllocator() { + @Override + public void allocate(RoutingAllocation allocation) { + + } + + @Override + public ShardAllocationDecision decideShardAllocation(ShardRouting shard, RoutingAllocation allocation) { + return null; + } + }; + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationStatsTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationStatsTests.java new file mode 100644 index 0000000000000..ad371ed239795 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationStatsTests.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.routing.allocation; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +public class NodeAllocationStatsTests extends AbstractWireSerializingTestCase { + + @Override + protected Writeable.Reader instanceReader() { + return NodeAllocationStats::new; + } + + @Override + protected NodeAllocationStats createTestInstance() { + return new NodeAllocationStats( + randomIntBetween(0, 10000), + randomIntBetween(0, 1000), + randomDoubleBetween(0, 8, true), + randomNonNegativeLong(), + randomNonNegativeLong() + ); + } + + @Override + protected NodeAllocationStats mutateInstance(NodeAllocationStats instance) throws IOException { + return switch (randomInt(4)) { + case 0 -> new NodeAllocationStats( + randomValueOtherThan(instance.shards(), () -> randomIntBetween(0, 10000)), + instance.undesiredShards(), + instance.forecastedIngestLoad(), + instance.forecastedDiskUsage(), + instance.currentDiskUsage() + ); + case 1 -> new NodeAllocationStats( + instance.shards(), + randomValueOtherThan(instance.undesiredShards(), () -> randomIntBetween(0, 1000)), + instance.forecastedIngestLoad(), + instance.forecastedDiskUsage(), + instance.currentDiskUsage() + ); + case 2 -> new NodeAllocationStats( + instance.shards(), + instance.undesiredShards(), + randomValueOtherThan(instance.forecastedIngestLoad(), () -> randomDoubleBetween(0, 8, true)), + instance.forecastedDiskUsage(), + instance.currentDiskUsage() + ); + case 3 -> new NodeAllocationStats( + instance.shards(), + instance.undesiredShards(), + instance.forecastedIngestLoad(), + randomValueOtherThan(instance.forecastedDiskUsage(), ESTestCase::randomNonNegativeLong), + instance.currentDiskUsage() + ); + case 4 -> new NodeAllocationStats( + instance.shards(), + instance.undesiredShards(), + instance.forecastedIngestLoad(), + instance.currentDiskUsage(), + randomValueOtherThan(instance.forecastedDiskUsage(), ESTestCase::randomNonNegativeLong) + ); + default -> throw new RuntimeException("unreachable"); + }; + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDecidersTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDecidersTests.java index ac3984a2ded21..4fe07756a1d6b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDecidersTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDecidersTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.core.Predicates; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; @@ -113,7 +114,7 @@ private static List generateDecisions(Decision mandatory, Supplier decisions) { - return collectToMultiDecision(decisions, ignored -> true); + return collectToMultiDecision(decisions, Predicates.always()); } private static Decision.Multi collectToMultiDecision(List decisions, Predicate filter) { diff --git a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java index 77b2f0112ad43..453d9bfecf2ab 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java @@ -2125,7 +2125,7 @@ public void testTimeoutRejectionBehaviourAtSubmission() { final var source = randomIdentifier(); final var taskDescription = randomIdentifier(); - final var timeout = TimeValue.timeValueMillis(between(0, 100000)); + final var timeout = TimeValue.timeValueMillis(between(1, 100000)); final var actionCount = new AtomicInteger(); final var deterministicTaskQueue = new DeterministicTaskQueue(); diff --git a/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java b/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java index eb1d5838c734b..351efa59f2381 100644 --- a/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java +++ b/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java @@ -18,6 +18,7 @@ import java.util.List; import java.util.NoSuchElementException; import java.util.Objects; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiPredicate; import java.util.function.ToIntFunction; @@ -216,6 +217,31 @@ public void testMap() { assertEquals(array.length, index.get()); } + public void testFailFast() { + final var array = randomIntegerArray(); + assertEmptyIterator(Iterators.failFast(Iterators.forArray(array), () -> true)); + + final var index = new AtomicInteger(); + Iterators.failFast(Iterators.forArray(array), () -> false).forEachRemaining(i -> assertEquals(array[index.getAndIncrement()], i)); + assertEquals(array.length, index.get()); + + final var isFailing = new AtomicBoolean(); + index.set(0); + Iterators.failFast(Iterators.concat(Iterators.forArray(array), new Iterator<>() { + @Override + public boolean hasNext() { + isFailing.set(true); + return true; + } + + @Override + public Integer next() { + return 0; + } + }), isFailing::get).forEachRemaining(i -> assertEquals(array[index.getAndIncrement()], i)); + assertEquals(array.length, index.get()); + } + public void testEquals() { final BiPredicate notCalled = (a, b) -> { throw new AssertionError("not called"); }; diff --git a/server/src/test/java/org/elasticsearch/common/io/stream/RecyclerBytesStreamOutputTests.java b/server/src/test/java/org/elasticsearch/common/io/stream/RecyclerBytesStreamOutputTests.java index fd54dd12ce189..6cc7b355d4b1c 100644 --- a/server/src/test/java/org/elasticsearch/common/io/stream/RecyclerBytesStreamOutputTests.java +++ b/server/src/test/java/org/elasticsearch/common/io/stream/RecyclerBytesStreamOutputTests.java @@ -1027,6 +1027,11 @@ public V obtain() { pagesAllocated.incrementAndGet(); return page; } + + @Override + public int pageSize() { + return pageSize; + } })) { var bytesAllocated = 0; while (bytesAllocated < Integer.MAX_VALUE) { diff --git a/server/src/test/java/org/elasticsearch/common/io/stream/StreamInputTests.java b/server/src/test/java/org/elasticsearch/common/io/stream/StreamInputTests.java new file mode 100644 index 0000000000000..645461778f637 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/io/stream/StreamInputTests.java @@ -0,0 +1,125 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.io.stream; + +import org.elasticsearch.test.ESTestCase; +import org.mockito.Mockito; + +import java.io.IOException; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.hamcrest.Matchers.is; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.clearInvocations; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; + +// Note: read* methods are tested for concrete implementations, this just covers helpers to read strings +public class StreamInputTests extends ESTestCase { + + private StreamInput in = Mockito.spy(StreamInput.class); + byte[] bytes = "0123456789".getBytes(UTF_8); + + public void testCalculateByteLengthOfAscii() throws IOException { + // not enough bytes to read all chars + assertNull(in.tryReadStringFromBytes(bytes, 1, 10, 10)); + assertNull(in.tryReadStringFromBytes(bytes, 0, 9, 10)); + verify(in, never()).skip(anyLong()); + + assertThat(in.tryReadStringFromBytes(bytes, 9, 10, 1), is("9")); + verify(in).skip(1); + clearInvocations(in); + + assertThat(in.tryReadStringFromBytes(bytes, 0, 10, 10), is("0123456789")); + verify(in).skip(10); + } + + public void testCalculateByteLengthOfNonAscii() throws IOException { + // copy a two bytes char into bytes + System.arraycopy("©".getBytes(UTF_8), 0, bytes, 0, 2); + + assertNull(in.tryReadStringFromBytes(bytes, 0, 1, 1)); + verify(in, never()).skip(anyLong()); + + assertThat(in.tryReadStringFromBytes(bytes, 0, 2, 1), is("©")); + verify(in).skip(2); + clearInvocations(in); + + assertThat(in.tryReadStringFromBytes(bytes, 0, 10, 9), is("©23456789")); + verify(in).skip(10); + clearInvocations(in); + + // copy a three bytes char into bytes + System.arraycopy("€".getBytes(UTF_8), 0, bytes, 0, 3); + + assertNull(in.tryReadStringFromBytes(bytes, 0, 2, 1)); + verify(in, never()).skip(anyLong()); + + assertThat(in.tryReadStringFromBytes(bytes, 0, 3, 1), is("€")); + verify(in).skip(3); + clearInvocations(in); + + assertThat(in.tryReadStringFromBytes(bytes, 0, 10, 8), is("€3456789")); + verify(in).skip(10); + clearInvocations(in); + + // not enough bytes to read all chars + assertNull(in.tryReadStringFromBytes(bytes, 0, 10, 9)); + verify(in, never()).skip(anyLong()); + } + + public void testCalculateByteLengthOfIncompleteNonAscii() throws IOException { + // copy first byte to the end of bytes, this way the string can't ever be read completely + System.arraycopy("©".getBytes(UTF_8), 0, bytes, 9, 1); + + assertThat(in.tryReadStringFromBytes(bytes, 8, 10, 1), is("8")); + verify(in).skip(1); + clearInvocations(in); + + assertNull(in.tryReadStringFromBytes(bytes, 9, 10, 1)); + verify(in, never()).skip(anyLong()); + + // copy first two bytes of a three bytes char into bytes (similar to above) + System.arraycopy("€".getBytes(UTF_8), 0, bytes, 8, 2); + + assertThat(in.tryReadStringFromBytes(bytes, 7, 10, 1), is("7")); + verify(in).skip(1); + clearInvocations(in); + + assertNull(in.tryReadStringFromBytes(bytes, 8, 10, 1)); + verify(in, never()).skip(anyLong()); + } + + public void testCalculateByteLengthOfSurrogate() throws IOException { + BytesStreamOutput bytesOut = new BytesStreamOutput(); + bytesOut.writeString("ab💩"); + bytes = bytesOut.bytes.array(); + + assertThat(bytes[0], is((byte) 4)); // 2+2 characters + assertThat(in.tryReadStringFromBytes(bytes, 1, bytes.length, 2), is("ab")); + verify(in).skip(2); + clearInvocations(in); + + // surrogates use a special encoding, their byte length differs to what new String expects + assertNull(in.tryReadStringFromBytes(bytes, 1, bytes.length, 4)); + assertNull(in.tryReadStringFromBytes(bytes, 3, bytes.length, 2)); + assertNull(in.tryReadStringFromBytes(bytes, 3, bytes.length, 1)); + verify(in, never()).skip(anyLong()); + + // set limit so tight that we cannot read the first 3 byte char + assertNull(in.tryReadStringFromBytes(bytes, 3, 5, 1)); + verify(in, never()).skip(anyLong()); + + // if using the UTF-8 encoding, the surrogate pair is encoded as 4 bytes (rather than 2x 3 bytes) + // this form of encoding isn't supported + System.arraycopy("💩".getBytes(UTF_8), 0, bytes, 0, 4); + assertThrows(IOException.class, () -> in.tryReadStringFromBytes(bytes, 0, bytes.length, 2)); + verify(in, never()).skip(anyLong()); + } +} diff --git a/server/src/test/java/org/elasticsearch/common/logging/HeaderWarningTests.java b/server/src/test/java/org/elasticsearch/common/logging/HeaderWarningTests.java index 997b076b328d9..3b4834d7ad0b4 100644 --- a/server/src/test/java/org/elasticsearch/common/logging/HeaderWarningTests.java +++ b/server/src/test/java/org/elasticsearch/common/logging/HeaderWarningTests.java @@ -12,7 +12,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.hamcrest.RegexMatcher; import org.hamcrest.core.IsSame; import java.io.IOException; @@ -26,10 +25,10 @@ import java.util.stream.IntStream; import static org.elasticsearch.common.logging.HeaderWarning.WARNING_HEADER_PATTERN; -import static org.elasticsearch.test.hamcrest.RegexMatcher.matches; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.matchesRegex; import static org.hamcrest.Matchers.not; /** @@ -37,7 +36,7 @@ */ public class HeaderWarningTests extends ESTestCase { - private static final RegexMatcher warningValueMatcher = matches(WARNING_HEADER_PATTERN.pattern()); + private static final org.hamcrest.Matcher warningValueMatcher = matchesRegex(WARNING_HEADER_PATTERN); private final HeaderWarning logger = new HeaderWarning(); diff --git a/server/src/test/java/org/elasticsearch/common/recycler/AbstractRecyclerTestCase.java b/server/src/test/java/org/elasticsearch/common/recycler/AbstractRecyclerTestCase.java index bd43bbbdcfbc2..5efa3e2fea300 100644 --- a/server/src/test/java/org/elasticsearch/common/recycler/AbstractRecyclerTestCase.java +++ b/server/src/test/java/org/elasticsearch/common/recycler/AbstractRecyclerTestCase.java @@ -26,7 +26,7 @@ public abstract class AbstractRecyclerTestCase extends ESTestCase { @Override public byte[] newInstance() { - byte[] value = new byte[10]; + byte[] value = new byte[pageSize()]; // "fresh" is intentionally not 0 to ensure we covered this code path Arrays.fill(value, FRESH); return value; @@ -43,6 +43,11 @@ public void destroy(byte[] value) { Arrays.fill(value, DEAD); } + @Override + public int pageSize() { + return 10; + } + }; protected void assertFresh(byte[] data) { diff --git a/server/src/test/java/org/elasticsearch/common/util/BigArraysTests.java b/server/src/test/java/org/elasticsearch/common/util/BigArraysTests.java index 3372aa9bc685b..946effda16a76 100644 --- a/server/src/test/java/org/elasticsearch/common/util/BigArraysTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/BigArraysTests.java @@ -9,9 +9,11 @@ package org.elasticsearch.common.util; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefIterator; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.breaker.PreallocatedCircuitBreakerService; +import org.elasticsearch.common.io.stream.ByteArrayStreamInput; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; @@ -275,6 +277,27 @@ public void testByteArrayBulkSet() { array2.close(); } + public void testByteIterator() throws Exception { + final byte[] bytes = new byte[randomIntBetween(1, 4000000)]; + random().nextBytes(bytes); + ByteArray array = bigArrays.newByteArray(bytes.length, randomBoolean()); + array.fillWith(new ByteArrayStreamInput(bytes)); + for (int i = 0; i < bytes.length; i++) { + assertEquals(bytes[i], array.get(i)); + } + BytesRefIterator it = array.iterator(); + BytesRef ref; + int offset = 0; + while ((ref = it.next()) != null) { + for (int i = 0; i < ref.length; i++) { + assertEquals(bytes[offset], ref.bytes[ref.offset + i]); + offset++; + } + } + assertThat(offset, equalTo(bytes.length)); + array.close(); + } + public void testByteArrayEquals() { final ByteArray empty1 = byteArrayWithBytes(BytesRef.EMPTY_BYTES); final ByteArray empty2 = byteArrayWithBytes(BytesRef.EMPTY_BYTES); diff --git a/server/src/test/java/org/elasticsearch/env/BuildVersionTests.java b/server/src/test/java/org/elasticsearch/env/BuildVersionTests.java new file mode 100644 index 0000000000000..a6bc5495be877 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/env/BuildVersionTests.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.env; + +import org.elasticsearch.Version; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.equalTo; + +public class BuildVersionTests extends ESTestCase { + public void testBuildVersionCurrent() { + assertThat(BuildVersion.current(), equalTo(BuildVersion.fromVersionId(Version.CURRENT.id()))); + } + + public void testBeforeMinimumCompatibleVersion() { + BuildVersion beforeMinCompat = BuildVersion.fromVersionId(between(0, Version.CURRENT.minimumCompatibilityVersion().id() - 1)); + BuildVersion afterMinCompat = BuildVersion.fromVersionId( + between(Version.CURRENT.minimumCompatibilityVersion().id(), Version.CURRENT.id()) + ); + BuildVersion futureVersion = BuildVersion.fromVersionId(between(Version.CURRENT.id() + 1, Version.CURRENT.id() + 1_000_000)); + + assertFalse(beforeMinCompat.onOrAfterMinimumCompatible()); + assertTrue(afterMinCompat.onOrAfterMinimumCompatible()); + assertTrue(futureVersion.onOrAfterMinimumCompatible()); + } + + public void testIsFutureVersion() { + BuildVersion beforeMinCompat = BuildVersion.fromVersionId(between(0, Version.CURRENT.minimumCompatibilityVersion().id() - 1)); + BuildVersion afterMinCompat = BuildVersion.fromVersionId( + between(Version.CURRENT.minimumCompatibilityVersion().id(), Version.CURRENT.id()) + ); + BuildVersion futureVersion = BuildVersion.fromVersionId(between(Version.CURRENT.id() + 1, Version.CURRENT.id() + 1_000_000)); + + assertFalse(beforeMinCompat.isFutureVersion()); + assertFalse(afterMinCompat.isFutureVersion()); + assertTrue(futureVersion.isFutureVersion()); + } +} diff --git a/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java b/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java index b7001943073bc..f60812977d578 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java @@ -33,7 +33,11 @@ public class NodeMetadataTests extends ESTestCase { // (Index)VersionUtils.randomVersion() only returns known versions, which are necessarily no later than (Index)Version.CURRENT; // however we want to also consider our behaviour with all versions, so occasionally pick up a truly random version. private Version randomVersion() { - return rarely() ? Version.fromId(randomInt()) : VersionUtils.randomVersion(random()); + return rarely() ? Version.fromId(randomNonNegativeInt()) : VersionUtils.randomVersion(random()); + } + + private BuildVersion randomBuildVersion() { + return BuildVersion.fromVersionId(randomVersion().id()); } private IndexVersion randomIndexVersion() { @@ -43,7 +47,7 @@ private IndexVersion randomIndexVersion() { public void testEqualsHashcodeSerialization() { final Path tempDir = createTempDir(); EqualsHashCodeTestUtils.checkEqualsAndHashCode( - new NodeMetadata(randomAlphaOfLength(10), randomVersion(), randomIndexVersion()), + new NodeMetadata(randomAlphaOfLength(10), randomBuildVersion(), randomIndexVersion()), nodeMetadata -> { final long generation = NodeMetadata.FORMAT.writeAndCleanup(nodeMetadata, tempDir); final Tuple nodeMetadataLongTuple = NodeMetadata.FORMAT.loadLatestStateWithGeneration( @@ -62,7 +66,7 @@ public void testEqualsHashcodeSerialization() { ); case 1 -> new NodeMetadata( nodeMetadata.nodeId(), - randomValueOtherThan(nodeMetadata.nodeVersion(), this::randomVersion), + randomValueOtherThan(nodeMetadata.nodeVersion(), this::randomBuildVersion), nodeMetadata.oldestIndexVersion() ); default -> new NodeMetadata( @@ -87,20 +91,17 @@ public void testReadsFormatWithoutVersion() throws IOException { Files.copy(resource, stateDir.resolve(NodeMetadata.FORMAT.getStateFileName(between(0, Integer.MAX_VALUE)))); final NodeMetadata nodeMetadata = NodeMetadata.FORMAT.loadLatestState(logger, xContentRegistry(), tempDir); assertThat(nodeMetadata.nodeId(), equalTo("y6VUVMSaStO4Tz-B5BxcOw")); - assertThat(nodeMetadata.nodeVersion(), equalTo(Version.V_EMPTY)); + assertThat(nodeMetadata.nodeVersion(), equalTo(BuildVersion.fromVersionId(0))); } public void testUpgradesLegitimateVersions() { final String nodeId = randomAlphaOfLength(10); final NodeMetadata nodeMetadata = new NodeMetadata( nodeId, - randomValueOtherThanMany( - v -> v.after(Version.CURRENT) || v.before(Version.CURRENT.minimumCompatibilityVersion()), - this::randomVersion - ), + randomValueOtherThanMany(v -> v.isFutureVersion() || v.onOrAfterMinimumCompatible() == false, this::randomBuildVersion), IndexVersion.current() ).upgradeToCurrentVersion(); - assertThat(nodeMetadata.nodeVersion(), equalTo(Version.CURRENT)); + assertThat(nodeMetadata.nodeVersion(), equalTo(BuildVersion.current())); assertThat(nodeMetadata.nodeId(), equalTo(nodeId)); } @@ -109,7 +110,7 @@ public void testUpgradesMissingVersion() { final IllegalStateException illegalStateException = expectThrows( IllegalStateException.class, - () -> new NodeMetadata(nodeId, Version.V_EMPTY, IndexVersion.current()).upgradeToCurrentVersion() + () -> new NodeMetadata(nodeId, BuildVersion.fromVersionId(0), IndexVersion.current()).upgradeToCurrentVersion() ); assertThat( illegalStateException.getMessage(), @@ -122,7 +123,7 @@ public void testUpgradesMissingVersion() { public void testDoesNotUpgradeFutureVersion() { final IllegalStateException illegalStateException = expectThrows( IllegalStateException.class, - () -> new NodeMetadata(randomAlphaOfLength(10), tooNewVersion(), IndexVersion.current()).upgradeToCurrentVersion() + () -> new NodeMetadata(randomAlphaOfLength(10), tooNewBuildVersion(), IndexVersion.current()).upgradeToCurrentVersion() ); assertThat( illegalStateException.getMessage(), @@ -133,7 +134,7 @@ public void testDoesNotUpgradeFutureVersion() { public void testDoesNotUpgradeAncientVersion() { final IllegalStateException illegalStateException = expectThrows( IllegalStateException.class, - () -> new NodeMetadata(randomAlphaOfLength(10), tooOldVersion(), IndexVersion.current()).upgradeToCurrentVersion() + () -> new NodeMetadata(randomAlphaOfLength(10), tooOldBuildVersion(), IndexVersion.current()).upgradeToCurrentVersion() ); assertThat( illegalStateException.getMessage(), @@ -153,10 +154,11 @@ public void testDoesNotUpgradeAncientVersion() { public void testUpgradeMarksPreviousVersion() { final String nodeId = randomAlphaOfLength(10); final Version version = VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.V_8_0_0); + final BuildVersion buildVersion = BuildVersion.fromVersionId(version.id()); - final NodeMetadata nodeMetadata = new NodeMetadata(nodeId, version, IndexVersion.current()).upgradeToCurrentVersion(); - assertThat(nodeMetadata.nodeVersion(), equalTo(Version.CURRENT)); - assertThat(nodeMetadata.previousNodeVersion(), equalTo(version)); + final NodeMetadata nodeMetadata = new NodeMetadata(nodeId, buildVersion, IndexVersion.current()).upgradeToCurrentVersion(); + assertThat(nodeMetadata.nodeVersion(), equalTo(BuildVersion.current())); + assertThat(nodeMetadata.previousNodeVersion(), equalTo(buildVersion)); } public static Version tooNewVersion() { @@ -167,7 +169,15 @@ public static IndexVersion tooNewIndexVersion() { return IndexVersion.fromId(between(IndexVersion.current().id() + 1, 99999999)); } + public static BuildVersion tooNewBuildVersion() { + return BuildVersion.fromVersionId(between(Version.CURRENT.id() + 1, 99999999)); + } + public static Version tooOldVersion() { return Version.fromId(between(1, Version.CURRENT.minimumCompatibilityVersion().id - 1)); } + + public static BuildVersion tooOldBuildVersion() { + return BuildVersion.fromVersionId(between(1, Version.CURRENT.minimumCompatibilityVersion().id - 1)); + } } diff --git a/server/src/test/java/org/elasticsearch/env/OverrideNodeVersionCommandTests.java b/server/src/test/java/org/elasticsearch/env/OverrideNodeVersionCommandTests.java index 8a4da8e8cee94..39872df80236e 100644 --- a/server/src/test/java/org/elasticsearch/env/OverrideNodeVersionCommandTests.java +++ b/server/src/test/java/org/elasticsearch/env/OverrideNodeVersionCommandTests.java @@ -136,7 +136,7 @@ public void testWarnsIfTooOld() throws Exception { expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); final NodeMetadata nodeMetadata = PersistedClusterStateService.nodeMetadata(dataPaths); - assertThat(nodeMetadata.nodeVersion(), equalTo(nodeVersion)); + assertThat(nodeMetadata.nodeVersion().toVersion(), equalTo(nodeVersion)); } public void testWarnsIfTooNew() throws Exception { @@ -161,7 +161,7 @@ public void testWarnsIfTooNew() throws Exception { expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); final NodeMetadata nodeMetadata = PersistedClusterStateService.nodeMetadata(dataPaths); - assertThat(nodeMetadata.nodeVersion(), equalTo(nodeVersion)); + assertThat(nodeMetadata.nodeVersion().toVersion(), equalTo(nodeVersion)); } public void testOverwritesIfTooOld() throws Exception { @@ -184,7 +184,7 @@ public void testOverwritesIfTooOld() throws Exception { expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); final NodeMetadata nodeMetadata = PersistedClusterStateService.nodeMetadata(dataPaths); - assertThat(nodeMetadata.nodeVersion(), equalTo(Version.CURRENT)); + assertThat(nodeMetadata.nodeVersion(), equalTo(BuildVersion.current())); } public void testOverwritesIfTooNew() throws Exception { @@ -206,6 +206,6 @@ public void testOverwritesIfTooNew() throws Exception { expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); final NodeMetadata nodeMetadata = PersistedClusterStateService.nodeMetadata(dataPaths); - assertThat(nodeMetadata.nodeVersion(), equalTo(Version.CURRENT)); + assertThat(nodeMetadata.nodeVersion(), equalTo(BuildVersion.current())); } } diff --git a/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java b/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java index bad6702e8ad83..7951c23ae815a 100644 --- a/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java @@ -52,6 +52,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.env.BuildVersion; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.NodeMetadata; @@ -1439,13 +1440,13 @@ public void testOverrideLuceneVersion() throws IOException { } NodeMetadata prevMetadata = PersistedClusterStateService.nodeMetadata(persistedClusterStateService.getDataPaths()); - assertEquals(Version.CURRENT, prevMetadata.nodeVersion()); + assertEquals(BuildVersion.current(), prevMetadata.nodeVersion()); PersistedClusterStateService.overrideVersion(Version.V_8_0_0, persistedClusterStateService.getDataPaths()); NodeMetadata metadata = PersistedClusterStateService.nodeMetadata(persistedClusterStateService.getDataPaths()); - assertEquals(Version.V_8_0_0, metadata.nodeVersion()); + assertEquals(BuildVersion.fromVersionId(Version.V_8_0_0.id()), metadata.nodeVersion()); for (Path p : persistedClusterStateService.getDataPaths()) { NodeMetadata individualMetadata = PersistedClusterStateService.nodeMetadata(p); - assertEquals(Version.V_8_0_0, individualMetadata.nodeVersion()); + assertEquals(BuildVersion.fromVersionId(Version.V_8_0_0.id()), individualMetadata.nodeVersion()); } } } diff --git a/server/src/test/java/org/elasticsearch/health/node/DiskHealthIndicatorServiceTests.java b/server/src/test/java/org/elasticsearch/health/node/DiskHealthIndicatorServiceTests.java index a622c1ff600d6..0d38aaf5b3e4a 100644 --- a/server/src/test/java/org/elasticsearch/health/node/DiskHealthIndicatorServiceTests.java +++ b/server/src/test/java/org/elasticsearch/health/node/DiskHealthIndicatorServiceTests.java @@ -26,7 +26,9 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.health.Diagnosis; +import org.elasticsearch.health.HealthFeatures; import org.elasticsearch.health.HealthIndicatorImpact; import org.elasticsearch.health.HealthIndicatorResult; import org.elasticsearch.health.HealthStatus; @@ -39,6 +41,8 @@ import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; +import org.junit.Before; +import org.mockito.Mockito; import java.io.IOException; import java.util.Collection; @@ -66,6 +70,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.iterableWithSize; import static org.hamcrest.Matchers.startsWith; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -98,10 +103,20 @@ public class DiskHealthIndicatorServiceTests extends ESTestCase { DiscoveryNodeRole.TRANSFORM_ROLE ); + private FeatureService featureService; + + @Before + public void setUp() throws Exception { + super.setUp(); + + featureService = Mockito.mock(FeatureService.class); + Mockito.when(featureService.clusterHasFeature(any(), any())).thenReturn(true); + } + public void testServiceBasics() { Set discoveryNodes = createNodesWithAllRoles(); ClusterService clusterService = createClusterService(discoveryNodes, false); - DiskHealthIndicatorService diskHealthIndicatorService = new DiskHealthIndicatorService(clusterService); + DiskHealthIndicatorService diskHealthIndicatorService = new DiskHealthIndicatorService(clusterService, featureService); { HealthStatus expectedStatus = HealthStatus.UNKNOWN; HealthInfo healthInfo = HealthInfo.EMPTY_HEALTH_INFO; @@ -125,7 +140,7 @@ public void testServiceBasics() { public void testIndicatorYieldsGreenWhenNodeHasUnknownStatus() { Set discoveryNodes = createNodesWithAllRoles(); ClusterService clusterService = createClusterService(discoveryNodes, false); - DiskHealthIndicatorService diskHealthIndicatorService = new DiskHealthIndicatorService(clusterService); + DiskHealthIndicatorService diskHealthIndicatorService = new DiskHealthIndicatorService(clusterService, featureService); HealthStatus expectedStatus = HealthStatus.GREEN; HealthInfo healthInfo = createHealthInfoWithOneUnhealthyNode(HealthStatus.UNKNOWN, discoveryNodes); @@ -136,7 +151,7 @@ public void testIndicatorYieldsGreenWhenNodeHasUnknownStatus() { public void testGreen() throws IOException { Set discoveryNodes = createNodesWithAllRoles(); ClusterService clusterService = createClusterService(discoveryNodes, false); - DiskHealthIndicatorService diskHealthIndicatorService = new DiskHealthIndicatorService(clusterService); + DiskHealthIndicatorService diskHealthIndicatorService = new DiskHealthIndicatorService(clusterService, featureService); HealthStatus expectedStatus = HealthStatus.GREEN; HealthInfo healthInfo = createHealthInfoWithOneUnhealthyNode(expectedStatus, discoveryNodes); HealthIndicatorResult result = diskHealthIndicatorService.calculate(true, healthInfo); @@ -171,7 +186,7 @@ public void testYellowMixedNodes() throws IOException { final var clusterService = createClusterService(Set.of(), allNodes, indexNameToNodeIdsMap); HealthStatus expectedStatus = HealthStatus.YELLOW; HealthInfo healthInfo = createHealthInfo(new HealthInfoConfig(expectedStatus, allNodes.size(), allNodes)); - DiskHealthIndicatorService diskHealthIndicatorService = new DiskHealthIndicatorService(clusterService); + DiskHealthIndicatorService diskHealthIndicatorService = new DiskHealthIndicatorService(clusterService, featureService); HealthIndicatorResult result = diskHealthIndicatorService.calculate(true, healthInfo); assertThat(result.status(), equalTo(expectedStatus)); assertThat(result.symptom(), containsString("with roles: [data")); @@ -249,7 +264,7 @@ public void testRedNoBlockedIndicesAndRedAllRoleNodes() throws IOException { indexNameToNodeIdsMap.put(indexName, new HashSet<>(randomNonEmptySubsetOf(affectedNodeIds))); } ClusterService clusterService = createClusterService(Set.of(), discoveryNodes, indexNameToNodeIdsMap); - DiskHealthIndicatorService diskHealthIndicatorService = new DiskHealthIndicatorService(clusterService); + DiskHealthIndicatorService diskHealthIndicatorService = new DiskHealthIndicatorService(clusterService, featureService); Map diskInfoByNode = new HashMap<>(); for (DiscoveryNode discoveryNode : discoveryNodes) { if (affectedNodeIds.contains(discoveryNode.getId())) { @@ -313,7 +328,7 @@ public void testRedNoBlockedIndicesAndRedAllRoleNodes() throws IOException { public void testRedWithBlockedIndicesAndGreenNodes() throws IOException { Set discoveryNodes = createNodesWithAllRoles(); ClusterService clusterService = createClusterService(discoveryNodes, true); - DiskHealthIndicatorService diskHealthIndicatorService = new DiskHealthIndicatorService(clusterService); + DiskHealthIndicatorService diskHealthIndicatorService = new DiskHealthIndicatorService(clusterService, featureService); HealthStatus expectedStatus = HealthStatus.RED; HealthInfo healthInfo = createHealthInfoWithOneUnhealthyNode(HealthStatus.GREEN, discoveryNodes); @@ -358,7 +373,7 @@ public void testRedWithBlockedIndicesAndGreenNodes() throws IOException { public void testRedWithBlockedIndicesAndYellowNodes() throws IOException { Set discoveryNodes = createNodesWithAllRoles(); ClusterService clusterService = createClusterService(discoveryNodes, true); - DiskHealthIndicatorService diskHealthIndicatorService = new DiskHealthIndicatorService(clusterService); + DiskHealthIndicatorService diskHealthIndicatorService = new DiskHealthIndicatorService(clusterService, featureService); HealthStatus expectedStatus = HealthStatus.RED; int numberOfYellowNodes = randomIntBetween(1, discoveryNodes.size()); HealthInfo healthInfo = createHealthInfo(new HealthInfoConfig(HealthStatus.YELLOW, numberOfYellowNodes, discoveryNodes)); @@ -437,7 +452,7 @@ public void testRedBlockedIndicesAndRedAllRolesNodes() throws IOException { } } ClusterService clusterService = createClusterService(blockedIndices, discoveryNodes, indexNameToNodeIdsMap); - DiskHealthIndicatorService diskHealthIndicatorService = new DiskHealthIndicatorService(clusterService); + DiskHealthIndicatorService diskHealthIndicatorService = new DiskHealthIndicatorService(clusterService, featureService); HealthIndicatorResult result = diskHealthIndicatorService.calculate(true, healthInfo); assertThat(result.status(), equalTo(expectedStatus)); assertThat( @@ -476,7 +491,7 @@ public void testRedNodesWithoutAnyBlockedIndices() throws IOException { indexNameToNodeIdsMap.put(indexName, nonRedNodeIds); } ClusterService clusterService = createClusterService(Set.of(), discoveryNodes, indexNameToNodeIdsMap); - DiskHealthIndicatorService diskHealthIndicatorService = new DiskHealthIndicatorService(clusterService); + DiskHealthIndicatorService diskHealthIndicatorService = new DiskHealthIndicatorService(clusterService, featureService); HealthIndicatorResult result = diskHealthIndicatorService.calculate(true, healthInfo); assertThat(result.status(), equalTo(expectedStatus)); assertThat(result.impacts().size(), equalTo(3)); @@ -512,7 +527,7 @@ public void testMissingHealthInfo() { Set discoveryNodesInClusterState = new HashSet<>(discoveryNodes); discoveryNodesInClusterState.add(DiscoveryNodeUtils.create(randomAlphaOfLength(30), UUID.randomUUID().toString())); ClusterService clusterService = createClusterService(discoveryNodesInClusterState, false); - DiskHealthIndicatorService diskHealthIndicatorService = new DiskHealthIndicatorService(clusterService); + DiskHealthIndicatorService diskHealthIndicatorService = new DiskHealthIndicatorService(clusterService, featureService); { HealthInfo healthInfo = HealthInfo.EMPTY_HEALTH_INFO; HealthIndicatorResult result = diskHealthIndicatorService.calculate(true, healthInfo); @@ -544,7 +559,7 @@ public void testUnhealthyMasterNodes() { Set roles = Set.of(DiscoveryNodeRole.MASTER_ROLE, otherRole); Set discoveryNodes = createNodes(roles); ClusterService clusterService = createClusterService(discoveryNodes, false); - DiskHealthIndicatorService diskHealthIndicatorService = new DiskHealthIndicatorService(clusterService); + DiskHealthIndicatorService diskHealthIndicatorService = new DiskHealthIndicatorService(clusterService, featureService); HealthStatus expectedStatus = randomFrom(HealthStatus.RED, HealthStatus.YELLOW); int numberOfProblemNodes = randomIntBetween(1, discoveryNodes.size()); HealthInfo healthInfo = createHealthInfo(new HealthInfoConfig(expectedStatus, numberOfProblemNodes, discoveryNodes)); @@ -599,7 +614,7 @@ public void testUnhealthyNonDataNonMasterNodes() { Set roles = new HashSet<>(randomNonEmptySubsetOf(OTHER_ROLES)); Set nodes = createNodes(roles); ClusterService clusterService = createClusterService(nodes, false); - DiskHealthIndicatorService diskHealthIndicatorService = new DiskHealthIndicatorService(clusterService); + DiskHealthIndicatorService diskHealthIndicatorService = new DiskHealthIndicatorService(clusterService, featureService); HealthStatus expectedStatus = randomFrom(HealthStatus.RED, HealthStatus.YELLOW); int numberOfProblemNodes = randomIntBetween(1, nodes.size()); HealthInfo healthInfo = createHealthInfo(new HealthInfoConfig(expectedStatus, numberOfProblemNodes, nodes)); @@ -655,7 +670,7 @@ public void testBlockedIndexWithRedNonDataNodesAndYellowDataNodes() { Set masterNodes = createNodes(masterRole); Set otherNodes = createNodes(otherRoles); ClusterService clusterService = createClusterService(Sets.union(Sets.union(dataNodes, masterNodes), otherNodes), true); - DiskHealthIndicatorService diskHealthIndicatorService = new DiskHealthIndicatorService(clusterService); + DiskHealthIndicatorService diskHealthIndicatorService = new DiskHealthIndicatorService(clusterService, featureService); int numberOfRedMasterNodes = randomIntBetween(1, masterNodes.size()); int numberOfRedOtherNodes = randomIntBetween(1, otherNodes.size()); int numberOfYellowDataNodes = randomIntBetween(1, dataNodes.size()); @@ -877,7 +892,7 @@ public void testLimitNumberOfAffectedResources() { Set masterNodes = createNodes(20, masterRole); Set otherNodes = createNodes(10, otherRoles); ClusterService clusterService = createClusterService(Sets.union(Sets.union(dataNodes, masterNodes), otherNodes), true); - DiskHealthIndicatorService diskHealthIndicatorService = new DiskHealthIndicatorService(clusterService); + DiskHealthIndicatorService diskHealthIndicatorService = new DiskHealthIndicatorService(clusterService, featureService); int numberOfRedMasterNodes = masterNodes.size(); int numberOfRedOtherNodes = otherNodes.size(); int numberOfYellowDataNodes = dataNodes.size(); @@ -1055,9 +1070,11 @@ static ClusterState createClusterState( Collection nodes, Map> indexNameToNodeIdsMap ) { + Map> features = new HashMap<>(); DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); for (DiscoveryNode node : nodes) { nodesBuilder = nodesBuilder.add(node); + features.put(node.getId(), Set.of(HealthFeatures.SUPPORTS_HEALTH.id())); } nodesBuilder.localNodeId(randomFrom(nodes).getId()); nodesBuilder.masterNodeId(randomFrom(nodes).getId()); @@ -1093,6 +1110,7 @@ static ClusterState createClusterState( state.metadata(metadata.generateClusterUuidIfNeeded().build()); state.routingTable(routingTable.build()); state.blocks(clusterBlocksBuilder); + state.nodeFeatures(features); return state.build(); } diff --git a/server/src/test/java/org/elasticsearch/health/node/LocalHealthMonitorTests.java b/server/src/test/java/org/elasticsearch/health/node/LocalHealthMonitorTests.java index 768b646d84beb..a4436fd637c5a 100644 --- a/server/src/test/java/org/elasticsearch/health/node/LocalHealthMonitorTests.java +++ b/server/src/test/java/org/elasticsearch/health/node/LocalHealthMonitorTests.java @@ -27,14 +27,17 @@ import org.elasticsearch.health.HealthFeatures; import org.elasticsearch.health.HealthStatus; import org.elasticsearch.health.metadata.HealthMetadata; +import org.elasticsearch.health.node.selection.HealthNode; import org.elasticsearch.health.node.tracker.HealthTracker; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; +import java.util.ArrayList; import java.util.List; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; @@ -44,12 +47,15 @@ import static org.hamcrest.Matchers.nullValue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class LocalHealthMonitorTests extends ESTestCase { private static final DiskHealthInfo GREEN = new DiskHealthInfo(HealthStatus.GREEN, null); + private static final DiskHealthInfo YELLOW = new DiskHealthInfo(HealthStatus.YELLOW, null); + private static final DiskHealthInfo RED = new DiskHealthInfo(HealthStatus.RED, null); private static ThreadPool threadPool; private ClusterService clusterService; private DiscoveryNode node; @@ -71,7 +77,6 @@ public static void tearDownThreadPool() { } @Before - @SuppressWarnings("unchecked") public void setUp() throws Exception { super.setUp(); // Set-up cluster state @@ -127,6 +132,14 @@ public void setUp() throws Exception { ); } + @After + public void tearDown() throws Exception { + super.tearDown(); + + // Kill monitoring process running in the background after each test. + localHealthMonitor.setEnabled(false); + } + @SuppressWarnings("unchecked") public void testUpdateHealthInfo() throws Exception { doAnswer(invocation -> { @@ -241,8 +254,136 @@ public void testEnablingAndDisabling() throws Exception { assertBusy(() -> assertThat(mockHealthTracker.getLastReportedValue(), equalTo(nextHealthStatus))); } + /** + * This test verifies that the local health monitor is able to deal with the more complex situation where it is forced to restart + * (due to a health node change) while there is an in-flight request to the previous health node. + */ + public void testResetDuringInFlightRequest() throws Exception { + ClusterState initialState = ClusterStateCreationUtils.state(node, node, node, new DiscoveryNode[] { node, frozenNode }) + .copyAndUpdate(b -> b.putCustom(HealthMetadata.TYPE, healthMetadata)); + ClusterState newState = ClusterStateCreationUtils.state(node, frozenNode, node, new DiscoveryNode[] { node, frozenNode }) + .copyAndUpdate(b -> b.putCustom(HealthMetadata.TYPE, healthMetadata)); + when(clusterService.state()).thenReturn(initialState); + + var requestCounter = new AtomicInteger(); + doAnswer(invocation -> { + var diskHealthInfo = ((UpdateHealthInfoCacheAction.Request) invocation.getArgument(1)).getDiskHealthInfo(); + assertThat(diskHealthInfo, equalTo(GREEN)); + var currentValue = requestCounter.incrementAndGet(); + // We only want to switch the health node during the first request. Any following request(s) should simply succeed. + if (currentValue == 1) { + when(clusterService.state()).thenReturn(newState); + localHealthMonitor.clusterChanged(new ClusterChangedEvent("health-node-switch", newState, initialState)); + } + ActionListener listener = invocation.getArgument(2); + listener.onResponse(null); + return null; + }).when(client).execute(any(), any(), any()); + + localHealthMonitor.setMonitorInterval(TimeValue.timeValueMillis(10)); + localHealthMonitor.clusterChanged(new ClusterChangedEvent("start-up", initialState, ClusterState.EMPTY_STATE)); + // Assert that we've sent the update request twice, even though the health info itself hasn't changed (i.e. we send again due to + // the health node change). + assertBusy(() -> assertThat(requestCounter.get(), equalTo(2))); + } + + /** + * The aim of this test is to rapidly fire off a series of state changes and make sure that the health node in the last cluster + * state actually gets the health info. + */ + public void testRapidStateChanges() throws Exception { + ClusterState state = ClusterStateCreationUtils.state(node, node, node, new DiscoveryNode[] { node, frozenNode }) + .copyAndUpdate(b -> b.putCustom(HealthMetadata.TYPE, healthMetadata)); + doReturn(state).when(clusterService).state(); + + // Keep track of the "current" health node. + var currentHealthNode = new AtomicReference<>(node); + // Keep a list of all the health nodes that have received a request. + var updatedHealthNodes = new ArrayList(); + doAnswer(invocation -> { + var diskHealthInfo = ((UpdateHealthInfoCacheAction.Request) invocation.getArgument(1)).getDiskHealthInfo(); + assertThat(diskHealthInfo, equalTo(GREEN)); + ActionListener listener = invocation.getArgument(2); + listener.onResponse(null); + updatedHealthNodes.add(currentHealthNode.get()); + return null; + }).when(client).execute(any(), any(), any()); + + localHealthMonitor.setMonitorInterval(TimeValue.timeValueMillis(0)); + localHealthMonitor.clusterChanged(new ClusterChangedEvent("start-up", state, ClusterState.EMPTY_STATE)); + + int count = randomIntBetween(10, 20); + for (int i = 0; i < count; i++) { + var previous = state; + state = mutateState(previous); + currentHealthNode.set(HealthNode.findHealthNode(state)); + localHealthMonitor.clusterChanged(new ClusterChangedEvent("switch", state, previous)); + } + + var lastHealthNode = DiscoveryNodeUtils.create("health-node", "health-node"); + var previous = state; + state = ClusterStateCreationUtils.state( + node, + previous.nodes().getMasterNode(), + lastHealthNode, + new DiscoveryNode[] { node, frozenNode, lastHealthNode } + ).copyAndUpdate(b -> b.putCustom(HealthMetadata.TYPE, healthMetadata)); + currentHealthNode.set(lastHealthNode); + localHealthMonitor.clusterChanged(new ClusterChangedEvent("switch", state, previous)); + + assertBusy(() -> assertTrue(updatedHealthNodes.contains(lastHealthNode))); + } + + private ClusterState mutateState(ClusterState previous) { + var masterNode = previous.nodes().getMasterNode(); + var healthNode = HealthNode.findHealthNode(previous); + var randomNode = DiscoveryNodeUtils.create(randomAlphaOfLength(10), randomAlphaOfLength(10)); + switch (randomInt(1)) { + case 0 -> masterNode = randomValueOtherThan(masterNode, () -> randomFrom(node, frozenNode, randomNode)); + case 1 -> healthNode = randomValueOtherThan(healthNode, () -> randomFrom(node, frozenNode, randomNode)); + } + return ClusterStateCreationUtils.state(node, masterNode, healthNode, new DiscoveryNode[] { node, frozenNode, randomNode }) + .copyAndUpdate(b -> b.putCustom(HealthMetadata.TYPE, healthMetadata)); + } + + /** + * The aim of this test is to change the health of the health tracker several times and make sure that every change is sent to the + * health node (especially the last change). + */ + public void testChangingHealth() throws Exception { + // Keep a list of disk health info's that we've seen. + var sentHealthInfos = new ArrayList(); + doAnswer(invocation -> { + var diskHealthInfo = ((UpdateHealthInfoCacheAction.Request) invocation.getArgument(1)).getDiskHealthInfo(); + ActionListener listener = invocation.getArgument(2); + listener.onResponse(null); + sentHealthInfos.add(diskHealthInfo); + return null; + }).when(client).execute(any(), any(), any()); + + localHealthMonitor.setMonitorInterval(TimeValue.timeValueMillis(0)); + localHealthMonitor.clusterChanged(new ClusterChangedEvent("initialize", clusterState, ClusterState.EMPTY_STATE)); + // Make sure the initial health value has been registered. + assertBusy(() -> assertFalse(sentHealthInfos.isEmpty())); + + var previousHealthInfo = mockHealthTracker.healthInfo; + var healthChanges = new AtomicInteger(1); + int count = randomIntBetween(10, 20); + for (int i = 0; i < count; i++) { + var newHealthInfo = randomFrom(GREEN, YELLOW); + mockHealthTracker.setHealthInfo(newHealthInfo); + // Check whether the health node has changed. If so, we're going to wait for it to be sent to the health node. + healthChanges.addAndGet(newHealthInfo.equals(previousHealthInfo) ? 0 : 1); + assertBusy(() -> assertEquals(healthChanges.get(), sentHealthInfos.size())); + previousHealthInfo = newHealthInfo; + } + + mockHealthTracker.setHealthInfo(RED); + assertBusy(() -> assertTrue(sentHealthInfos.contains(RED))); + } + private static class MockHealthTracker extends HealthTracker { - private DiskHealthInfo healthInfo = GREEN; + private volatile DiskHealthInfo healthInfo = GREEN; @Override public DiskHealthInfo checkCurrentHealth() { diff --git a/server/src/test/java/org/elasticsearch/health/node/ShardsCapacityHealthIndicatorServiceTests.java b/server/src/test/java/org/elasticsearch/health/node/ShardsCapacityHealthIndicatorServiceTests.java index f6e856079012d..c57f19999a915 100644 --- a/server/src/test/java/org/elasticsearch/health/node/ShardsCapacityHealthIndicatorServiceTests.java +++ b/server/src/test/java/org/elasticsearch/health/node/ShardsCapacityHealthIndicatorServiceTests.java @@ -19,6 +19,8 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.features.FeatureService; +import org.elasticsearch.health.HealthFeatures; import org.elasticsearch.health.HealthStatus; import org.elasticsearch.health.metadata.HealthMetadata; import org.elasticsearch.index.IndexVersion; @@ -36,6 +38,7 @@ import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; +import org.mockito.Mockito; import java.io.IOException; import java.util.List; @@ -60,6 +63,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; +import static org.mockito.ArgumentMatchers.any; public class ShardsCapacityHealthIndicatorServiceTests extends ESTestCase { @@ -68,6 +72,7 @@ public class ShardsCapacityHealthIndicatorServiceTests extends ESTestCase { private static ThreadPool threadPool; private ClusterService clusterService; + private FeatureService featureService; private DiscoveryNode dataNode; private DiscoveryNode frozenNode; @@ -86,6 +91,9 @@ public void setUp() throws Exception { .build(); clusterService = ClusterServiceUtils.createClusterService(threadPool); + + featureService = Mockito.mock(FeatureService.class); + Mockito.when(featureService.clusterHasFeature(any(), any())).thenReturn(true); } @After @@ -113,7 +121,7 @@ public void testNoShardsCapacityMetadata() throws IOException { createIndexInDataNode(100) ) ); - var target = new ShardsCapacityHealthIndicatorService(clusterService); + var target = new ShardsCapacityHealthIndicatorService(clusterService, featureService); var indicatorResult = target.calculate(true, HealthInfo.EMPTY_HEALTH_INFO); assertEquals(indicatorResult.status(), HealthStatus.UNKNOWN); @@ -127,7 +135,10 @@ public void testIndicatorYieldsGreenInCaseThereIsRoom() throws IOException { int maxShardsPerNode = randomValidMaxShards(); int maxShardsPerNodeFrozen = randomValidMaxShards(); var clusterService = createClusterService(maxShardsPerNode, maxShardsPerNodeFrozen, createIndexInDataNode(maxShardsPerNode / 4)); - var indicatorResult = new ShardsCapacityHealthIndicatorService(clusterService).calculate(true, HealthInfo.EMPTY_HEALTH_INFO); + var indicatorResult = new ShardsCapacityHealthIndicatorService(clusterService, featureService).calculate( + true, + HealthInfo.EMPTY_HEALTH_INFO + ); assertEquals(indicatorResult.status(), HealthStatus.GREEN); assertTrue(indicatorResult.impacts().isEmpty()); @@ -151,7 +162,10 @@ public void testIndicatorYieldsYellowInCaseThereIsNotEnoughRoom() throws IOExcep // Only data_nodes does not have enough space int maxShardsPerNodeFrozen = randomValidMaxShards(); var clusterService = createClusterService(25, maxShardsPerNodeFrozen, createIndexInDataNode(4)); - var indicatorResult = new ShardsCapacityHealthIndicatorService(clusterService).calculate(true, HealthInfo.EMPTY_HEALTH_INFO); + var indicatorResult = new ShardsCapacityHealthIndicatorService(clusterService, featureService).calculate( + true, + HealthInfo.EMPTY_HEALTH_INFO + ); assertEquals(indicatorResult.status(), YELLOW); assertEquals(indicatorResult.symptom(), "Cluster is close to reaching the configured maximum number of shards for data nodes."); @@ -174,7 +188,10 @@ public void testIndicatorYieldsYellowInCaseThereIsNotEnoughRoom() throws IOExcep // Only frozen_nodes does not have enough space int maxShardsPerNode = randomValidMaxShards(); var clusterService = createClusterService(maxShardsPerNode, 25, createIndexInFrozenNode(4)); - var indicatorResult = new ShardsCapacityHealthIndicatorService(clusterService).calculate(true, HealthInfo.EMPTY_HEALTH_INFO); + var indicatorResult = new ShardsCapacityHealthIndicatorService(clusterService, featureService).calculate( + true, + HealthInfo.EMPTY_HEALTH_INFO + ); assertEquals(indicatorResult.status(), YELLOW); assertEquals( @@ -199,7 +216,10 @@ public void testIndicatorYieldsYellowInCaseThereIsNotEnoughRoom() throws IOExcep { // Both data and frozen nodes does not have enough space var clusterService = createClusterService(25, 25, createIndexInDataNode(4), createIndexInFrozenNode(4)); - var indicatorResult = new ShardsCapacityHealthIndicatorService(clusterService).calculate(true, HealthInfo.EMPTY_HEALTH_INFO); + var indicatorResult = new ShardsCapacityHealthIndicatorService(clusterService, featureService).calculate( + true, + HealthInfo.EMPTY_HEALTH_INFO + ); assertEquals(indicatorResult.status(), YELLOW); assertEquals( @@ -230,7 +250,10 @@ public void testIndicatorYieldsRedInCaseThereIsNotEnoughRoom() throws IOExceptio // Only data_nodes does not have enough space int maxShardsPerNodeFrozen = randomValidMaxShards(); var clusterService = createClusterService(25, maxShardsPerNodeFrozen, createIndexInDataNode(11)); - var indicatorResult = new ShardsCapacityHealthIndicatorService(clusterService).calculate(true, HealthInfo.EMPTY_HEALTH_INFO); + var indicatorResult = new ShardsCapacityHealthIndicatorService(clusterService, featureService).calculate( + true, + HealthInfo.EMPTY_HEALTH_INFO + ); assertEquals(indicatorResult.status(), RED); assertEquals(indicatorResult.symptom(), "Cluster is close to reaching the configured maximum number of shards for data nodes."); @@ -253,7 +276,10 @@ public void testIndicatorYieldsRedInCaseThereIsNotEnoughRoom() throws IOExceptio // Only frozen_nodes does not have enough space int maxShardsPerNode = randomValidMaxShards(); var clusterService = createClusterService(maxShardsPerNode, 25, createIndexInFrozenNode(11)); - var indicatorResult = new ShardsCapacityHealthIndicatorService(clusterService).calculate(true, HealthInfo.EMPTY_HEALTH_INFO); + var indicatorResult = new ShardsCapacityHealthIndicatorService(clusterService, featureService).calculate( + true, + HealthInfo.EMPTY_HEALTH_INFO + ); assertEquals(indicatorResult.status(), RED); assertEquals( @@ -278,7 +304,10 @@ public void testIndicatorYieldsRedInCaseThereIsNotEnoughRoom() throws IOExceptio { // Both data and frozen nodes does not have enough space var clusterService = createClusterService(25, 25, createIndexInDataNode(11), createIndexInFrozenNode(11)); - var indicatorResult = new ShardsCapacityHealthIndicatorService(clusterService).calculate(true, HealthInfo.EMPTY_HEALTH_INFO); + var indicatorResult = new ShardsCapacityHealthIndicatorService(clusterService, featureService).calculate( + true, + HealthInfo.EMPTY_HEALTH_INFO + ); assertEquals(indicatorResult.status(), RED); assertEquals( @@ -397,7 +426,11 @@ private ClusterState createClusterState( metadata.put(idxMetadata); } - return ClusterState.builder(clusterState).metadata(metadata).build(); + var features = Set.of(HealthFeatures.SUPPORTS_SHARDS_CAPACITY_INDICATOR.id()); + return ClusterState.builder(clusterState) + .metadata(metadata) + .nodeFeatures(Map.of(dataNode.getId(), features, frozenNode.getId(), features)) + .build(); } private static IndexMetadata.Builder createIndexInDataNode(int shards) { diff --git a/server/src/test/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutorTests.java b/server/src/test/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutorTests.java index 5460a45569d71..72a9a36f26e70 100644 --- a/server/src/test/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutorTests.java @@ -102,6 +102,7 @@ public void testTaskCreation() throws Exception { eq("health-node"), eq("health-node"), eq(new HealthNodeTaskParams()), + eq(null), any() ) ); @@ -120,6 +121,7 @@ public void testSkippingTaskCreationIfItExists() { eq("health-node"), eq("health-node"), eq(new HealthNodeTaskParams()), + eq(null), any() ); } diff --git a/server/src/test/java/org/elasticsearch/health/node/tracker/DiskHealthTrackerTests.java b/server/src/test/java/org/elasticsearch/health/node/tracker/DiskHealthTrackerTests.java index 7089e5a19bc63..dd2ef861e85c3 100644 --- a/server/src/test/java/org/elasticsearch/health/node/tracker/DiskHealthTrackerTests.java +++ b/server/src/test/java/org/elasticsearch/health/node/tracker/DiskHealthTrackerTests.java @@ -325,6 +325,7 @@ private NodeStats nodeStats(FsInfo fs) { null, null, null, + null, null ); } diff --git a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java index 928a03eed2bd6..4e6f702b67252 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -232,7 +232,8 @@ public void testWrapperIsBound() throws IOException { Collections.emptyMap(), () -> true, indexNameExpressionResolver, - Collections.emptyMap() + Collections.emptyMap(), + mock(SlowLogFieldProvider.class) ); module.setReaderWrapper(s -> new Wrapper()); @@ -257,7 +258,8 @@ public void testRegisterIndexStore() throws IOException { indexStoreFactories, () -> true, indexNameExpressionResolver, - Collections.emptyMap() + Collections.emptyMap(), + mock(SlowLogFieldProvider.class) ); final IndexService indexService = newIndexService(module); @@ -280,7 +282,8 @@ public void testDirectoryWrapper() throws IOException { Map.of(), () -> true, indexNameExpressionResolver, - Collections.emptyMap() + Collections.emptyMap(), + mock(SlowLogFieldProvider.class) ); module.setDirectoryWrapper(new TestDirectoryWrapper()); @@ -631,7 +634,8 @@ public void testRegisterCustomRecoveryStateFactory() throws IOException { Collections.emptyMap(), () -> true, indexNameExpressionResolver, - recoveryStateFactories + recoveryStateFactories, + mock(SlowLogFieldProvider.class) ); final IndexService indexService = newIndexService(module); @@ -651,7 +655,8 @@ public void testIndexCommitListenerIsBound() throws IOException, ExecutionExcept Collections.emptyMap(), () -> true, indexNameExpressionResolver, - Collections.emptyMap() + Collections.emptyMap(), + mock(SlowLogFieldProvider.class) ); final AtomicLong lastAcquiredPrimaryTerm = new AtomicLong(); @@ -751,7 +756,8 @@ private static IndexModule createIndexModule( Collections.emptyMap(), () -> true, indexNameExpressionResolver, - Collections.emptyMap() + Collections.emptyMap(), + mock(SlowLogFieldProvider.class) ); } diff --git a/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java b/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java index fb83e817c052e..d8d5ab56c6e1d 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java @@ -39,6 +39,7 @@ import org.mockito.Mockito; import java.io.IOException; +import java.util.Map; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.emptyOrNullString; @@ -49,6 +50,7 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.startsWith; +import static org.mockito.Mockito.mock; public class IndexingSlowLogTests extends ESTestCase { static MockAppender appender; @@ -71,7 +73,7 @@ public void testLevelPrecedence() { String uuid = UUIDs.randomBase64UUID(); IndexMetadata metadata = createIndexMetadata("index-precedence", settings(uuid)); IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); - IndexingSlowLog log = new IndexingSlowLog(settings); + IndexingSlowLog log = new IndexingSlowLog(settings, mock(SlowLogFieldProvider.class)); ParsedDocument doc = EngineTestCase.createParsedDoc("1", null); Engine.Index index = new Engine.Index(new Term("_id", Uid.encodeId("doc_id")), randomNonNegativeLong(), doc); @@ -132,7 +134,7 @@ public void testTwoLoggersDifferentLevel() { ), Settings.EMPTY ); - IndexingSlowLog log1 = new IndexingSlowLog(index1Settings); + IndexingSlowLog log1 = new IndexingSlowLog(index1Settings, mock(SlowLogFieldProvider.class)); IndexSettings index2Settings = new IndexSettings( createIndexMetadata( @@ -145,7 +147,7 @@ public void testTwoLoggersDifferentLevel() { ), Settings.EMPTY ); - IndexingSlowLog log2 = new IndexingSlowLog(index2Settings); + IndexingSlowLog log2 = new IndexingSlowLog(index2Settings, mock(SlowLogFieldProvider.class)); ParsedDocument doc = EngineTestCase.createParsedDoc("1", null); Engine.Index index = new Engine.Index(new Term("_id", Uid.encodeId("doc_id")), randomNonNegativeLong(), doc); @@ -169,12 +171,12 @@ public void testMultipleSlowLoggersUseSingleLog4jLogger() { LoggerContext context = (LoggerContext) LogManager.getContext(false); IndexSettings index1Settings = new IndexSettings(createIndexMetadata("index1", settings(UUIDs.randomBase64UUID())), Settings.EMPTY); - IndexingSlowLog log1 = new IndexingSlowLog(index1Settings); + IndexingSlowLog log1 = new IndexingSlowLog(index1Settings, mock(SlowLogFieldProvider.class)); int numberOfLoggersBefore = context.getLoggers().size(); IndexSettings index2Settings = new IndexSettings(createIndexMetadata("index2", settings(UUIDs.randomBase64UUID())), Settings.EMPTY); - IndexingSlowLog log2 = new IndexingSlowLog(index2Settings); + IndexingSlowLog log2 = new IndexingSlowLog(index2Settings, mock(SlowLogFieldProvider.class)); context = (LoggerContext) LogManager.getContext(false); int numberOfLoggersAfter = context.getLoggers().size(); @@ -210,7 +212,7 @@ public void testSlowLogMessageHasJsonFields() throws IOException { ); Index index = new Index("foo", "123"); // Turning off document logging doesn't log source[] - ESLogMessage p = IndexingSlowLogMessage.of(index, pd, 10, true, 0); + ESLogMessage p = IndexingSlowLogMessage.of(Map.of(), index, pd, 10, true, 0); assertThat(p.get("elasticsearch.slowlog.message"), equalTo("[foo/123]")); assertThat(p.get("elasticsearch.slowlog.took"), equalTo("10nanos")); @@ -220,7 +222,36 @@ public void testSlowLogMessageHasJsonFields() throws IOException { assertThat(p.get("elasticsearch.slowlog.source"), is(emptyOrNullString())); // Turning on document logging logs the whole thing - p = IndexingSlowLogMessage.of(index, pd, 10, true, Integer.MAX_VALUE); + p = IndexingSlowLogMessage.of(Map.of(), index, pd, 10, true, Integer.MAX_VALUE); + assertThat(p.get("elasticsearch.slowlog.source"), containsString("{\\\"foo\\\":\\\"bar\\\"}")); + } + + public void testSlowLogMessageHasAdditionalFields() throws IOException { + BytesReference source = BytesReference.bytes(JsonXContent.contentBuilder().startObject().field("foo", "bar").endObject()); + ParsedDocument pd = new ParsedDocument( + new NumericDocValuesField("version", 1), + SeqNoFieldMapper.SequenceIDFields.emptySeqID(), + "id", + "routingValue", + null, + source, + XContentType.JSON, + null + ); + Index index = new Index("foo", "123"); + // Turning off document logging doesn't log source[] + ESLogMessage p = IndexingSlowLogMessage.of(Map.of("field1", "value1", "field2", "value2"), index, pd, 10, true, 0); + assertThat(p.get("field1"), equalTo("value1")); + assertThat(p.get("field2"), equalTo("value2")); + assertThat(p.get("elasticsearch.slowlog.message"), equalTo("[foo/123]")); + assertThat(p.get("elasticsearch.slowlog.took"), equalTo("10nanos")); + assertThat(p.get("elasticsearch.slowlog.took_millis"), equalTo("0")); + assertThat(p.get("elasticsearch.slowlog.id"), equalTo("id")); + assertThat(p.get("elasticsearch.slowlog.routing"), equalTo("routingValue")); + assertThat(p.get("elasticsearch.slowlog.source"), is(emptyOrNullString())); + + // Turning on document logging logs the whole thing + p = IndexingSlowLogMessage.of(Map.of(), index, pd, 10, true, Integer.MAX_VALUE); assertThat(p.get("elasticsearch.slowlog.source"), containsString("{\\\"foo\\\":\\\"bar\\\"}")); } @@ -238,7 +269,7 @@ public void testEmptyRoutingField() throws IOException { ); Index index = new Index("foo", "123"); - ESLogMessage p = IndexingSlowLogMessage.of(index, pd, 10, true, 0); + ESLogMessage p = IndexingSlowLogMessage.of(Map.of(), index, pd, 10, true, 0); assertThat(p.get("routing"), nullValue()); } @@ -256,19 +287,19 @@ public void testSlowLogParsedDocumentPrinterSourceToLog() throws IOException { ); Index index = new Index("foo", "123"); // Turning off document logging doesn't log source[] - ESLogMessage p = IndexingSlowLogMessage.of(index, pd, 10, true, 0); + ESLogMessage p = IndexingSlowLogMessage.of(Map.of(), index, pd, 10, true, 0); assertThat(p.getFormattedMessage(), not(containsString("source["))); // Turning on document logging logs the whole thing - p = IndexingSlowLogMessage.of(index, pd, 10, true, Integer.MAX_VALUE); + p = IndexingSlowLogMessage.of(Map.of(), index, pd, 10, true, Integer.MAX_VALUE); assertThat(p.get("elasticsearch.slowlog.source"), equalTo("{\\\"foo\\\":\\\"bar\\\"}")); // And you can truncate the source - p = IndexingSlowLogMessage.of(index, pd, 10, true, 3); + p = IndexingSlowLogMessage.of(Map.of(), index, pd, 10, true, 3); assertThat(p.get("elasticsearch.slowlog.source"), equalTo("{\\\"f")); // And you can truncate the source - p = IndexingSlowLogMessage.of(index, pd, 10, true, 3); + p = IndexingSlowLogMessage.of(Map.of(), index, pd, 10, true, 3); assertThat(p.get("elasticsearch.slowlog.source"), containsString("{\\\"f")); assertThat(p.get("elasticsearch.slowlog.message"), startsWith("[foo/123]")); assertThat(p.get("elasticsearch.slowlog.took"), containsString("10nanos")); @@ -288,7 +319,7 @@ public void testSlowLogParsedDocumentPrinterSourceToLog() throws IOException { final XContentParseException e = expectThrows( XContentParseException.class, - () -> IndexingSlowLogMessage.of(index, doc, 10, true, 3) + () -> IndexingSlowLogMessage.of(Map.of(), index, doc, 10, true, 3) ); assertThat( e, @@ -311,7 +342,7 @@ public void testReformatSetting() { .build() ); IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); - IndexingSlowLog log = new IndexingSlowLog(settings); + IndexingSlowLog log = new IndexingSlowLog(settings, mock(SlowLogFieldProvider.class)); assertFalse(log.isReformat()); settings.updateIndexMetadata( newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING.getKey(), "true").build()) @@ -328,7 +359,7 @@ public void testReformatSetting() { metadata = newIndexMeta("index", Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()).build()); settings = new IndexSettings(metadata, Settings.EMPTY); - log = new IndexingSlowLog(settings); + log = new IndexingSlowLog(settings, mock(SlowLogFieldProvider.class)); assertTrue(log.isReformat()); try { settings.updateIndexMetadata( @@ -361,7 +392,7 @@ public void testSetLevels() { .build() ); IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); - IndexingSlowLog log = new IndexingSlowLog(settings); + IndexingSlowLog log = new IndexingSlowLog(settings, mock(SlowLogFieldProvider.class)); assertEquals(TimeValue.timeValueMillis(100).nanos(), log.getIndexTraceThreshold()); assertEquals(TimeValue.timeValueMillis(200).nanos(), log.getIndexDebugThreshold()); assertEquals(TimeValue.timeValueMillis(300).nanos(), log.getIndexInfoThreshold()); @@ -392,7 +423,7 @@ public void testSetLevels() { assertEquals(TimeValue.timeValueMillis(-1).nanos(), log.getIndexWarnThreshold()); settings = new IndexSettings(metadata, Settings.EMPTY); - log = new IndexingSlowLog(settings); + log = new IndexingSlowLog(settings, mock(SlowLogFieldProvider.class)); assertEquals(TimeValue.timeValueMillis(-1).nanos(), log.getIndexTraceThreshold()); assertEquals(TimeValue.timeValueMillis(-1).nanos(), log.getIndexDebugThreshold()); diff --git a/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java b/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java index 2fa3216ad5556..a50092a0b8d12 100644 --- a/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java +++ b/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java @@ -36,12 +36,14 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.Map; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.not; +import static org.mockito.Mockito.mock; public class SearchSlowLogTests extends ESSingleNodeTestCase { static MockAppender appender; @@ -92,7 +94,7 @@ public void testLevelPrecedence() { try (SearchContext ctx = searchContextWithSourceAndTask(createIndex("index"))) { String uuid = UUIDs.randomBase64UUID(); IndexSettings settings = new IndexSettings(createIndexMetadata("index", settings(uuid)), Settings.EMPTY); - SearchSlowLog log = new SearchSlowLog(settings); + SearchSlowLog log = new SearchSlowLog(settings, mock(SlowLogFieldProvider.class)); // For this test, when level is not breached, the level below should be used. { @@ -176,7 +178,7 @@ public void testTwoLoggersDifferentLevel() { ), Settings.EMPTY ); - SearchSlowLog log1 = new SearchSlowLog(settings1); + SearchSlowLog log1 = new SearchSlowLog(settings1, mock(SlowLogFieldProvider.class)); IndexSettings settings2 = new IndexSettings( createIndexMetadata( @@ -189,7 +191,7 @@ public void testTwoLoggersDifferentLevel() { ), Settings.EMPTY ); - SearchSlowLog log2 = new SearchSlowLog(settings2); + SearchSlowLog log2 = new SearchSlowLog(settings2, mock(SlowLogFieldProvider.class)); { // threshold set on WARN only, should not log @@ -212,7 +214,7 @@ public void testMultipleSlowLoggersUseSingleLog4jLogger() { try (SearchContext ctx1 = searchContextWithSourceAndTask(createIndex("index-1"))) { IndexSettings settings1 = new IndexSettings(createIndexMetadata("index-1", settings(UUIDs.randomBase64UUID())), Settings.EMPTY); - SearchSlowLog log1 = new SearchSlowLog(settings1); + SearchSlowLog log1 = new SearchSlowLog(settings1, mock(SlowLogFieldProvider.class)); int numberOfLoggersBefore = context.getLoggers().size(); try (SearchContext ctx2 = searchContextWithSourceAndTask(createIndex("index-2"))) { @@ -220,7 +222,7 @@ public void testMultipleSlowLoggersUseSingleLog4jLogger() { createIndexMetadata("index-2", settings(UUIDs.randomBase64UUID())), Settings.EMPTY ); - SearchSlowLog log2 = new SearchSlowLog(settings2); + SearchSlowLog log2 = new SearchSlowLog(settings2, mock(SlowLogFieldProvider.class)); int numberOfLoggersAfter = context.getLoggers().size(); assertThat(numberOfLoggersAfter, equalTo(numberOfLoggersBefore)); @@ -235,7 +237,7 @@ private IndexMetadata createIndexMetadata(String index, Settings.Builder put) { public void testSlowLogHasJsonFields() throws IOException { IndexService index = createIndex("foo"); try (SearchContext searchContext = searchContextWithSourceAndTask(index)) { - ESLogMessage p = SearchSlowLog.SearchSlowLogMessage.of(searchContext, 10); + ESLogMessage p = SearchSlowLog.SearchSlowLogMessage.of(Map.of(), searchContext, 10); assertThat(p.get("elasticsearch.slowlog.message"), equalTo("[foo][0]")); assertThat(p.get("elasticsearch.slowlog.took"), equalTo("10nanos")); @@ -248,6 +250,23 @@ public void testSlowLogHasJsonFields() throws IOException { } } + public void testSlowLogHasAdditionalFields() throws IOException { + IndexService index = createIndex("foo"); + try (SearchContext searchContext = searchContextWithSourceAndTask(index)) { + ESLogMessage p = SearchSlowLog.SearchSlowLogMessage.of(Map.of("field1", "value1", "field2", "value2"), searchContext, 10); + assertThat(p.get("field1"), equalTo("value1")); + assertThat(p.get("field2"), equalTo("value2")); + assertThat(p.get("elasticsearch.slowlog.message"), equalTo("[foo][0]")); + assertThat(p.get("elasticsearch.slowlog.took"), equalTo("10nanos")); + assertThat(p.get("elasticsearch.slowlog.took_millis"), equalTo("0")); + assertThat(p.get("elasticsearch.slowlog.total_hits"), equalTo("-1")); + assertThat(p.get("elasticsearch.slowlog.stats"), equalTo("[]")); + assertThat(p.get("elasticsearch.slowlog.search_type"), Matchers.nullValue()); + assertThat(p.get("elasticsearch.slowlog.total_shards"), equalTo("1")); + assertThat(p.get("elasticsearch.slowlog.source"), equalTo("{\\\"query\\\":{\\\"match_all\\\":{\\\"boost\\\":1.0}}}")); + } + } + public void testSlowLogsWithStats() throws IOException { IndexService index = createIndex("foo"); try (SearchContext searchContext = createSearchContext(index, "group1")) { @@ -257,7 +276,7 @@ public void testSlowLogsWithStats() throws IOException { new SearchShardTask(0, "n/a", "n/a", "test", null, Collections.singletonMap(Task.X_OPAQUE_ID_HTTP_HEADER, "my_id")) ); - ESLogMessage p = SearchSlowLog.SearchSlowLogMessage.of(searchContext, 10); + ESLogMessage p = SearchSlowLog.SearchSlowLogMessage.of(Map.of(), searchContext, 10); assertThat(p.get("elasticsearch.slowlog.stats"), equalTo("[\\\"group1\\\"]")); } @@ -267,7 +286,7 @@ public void testSlowLogsWithStats() throws IOException { searchContext.setTask( new SearchShardTask(0, "n/a", "n/a", "test", null, Collections.singletonMap(Task.X_OPAQUE_ID_HTTP_HEADER, "my_id")) ); - ESLogMessage p = SearchSlowLog.SearchSlowLogMessage.of(searchContext, 10); + ESLogMessage p = SearchSlowLog.SearchSlowLogMessage.of(Map.of(), searchContext, 10); assertThat(p.get("elasticsearch.slowlog.stats"), equalTo("[\\\"group1\\\", \\\"group2\\\"]")); } } @@ -275,7 +294,7 @@ public void testSlowLogsWithStats() throws IOException { public void testSlowLogSearchContextPrinterToLog() throws IOException { IndexService index = createIndex("foo"); try (SearchContext searchContext = searchContextWithSourceAndTask(index)) { - ESLogMessage p = SearchSlowLog.SearchSlowLogMessage.of(searchContext, 10); + ESLogMessage p = SearchSlowLog.SearchSlowLogMessage.of(Map.of(), searchContext, 10); assertThat(p.get("elasticsearch.slowlog.message"), equalTo("[foo][0]")); // Makes sure that output doesn't contain any new lines assertThat(p.get("elasticsearch.slowlog.source"), not(containsString("\n"))); @@ -295,7 +314,7 @@ public void testSetQueryLevels() { .build() ); IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); - SearchSlowLog log = new SearchSlowLog(settings); + SearchSlowLog log = new SearchSlowLog(settings, mock(SlowLogFieldProvider.class)); assertEquals(TimeValue.timeValueMillis(100).nanos(), log.getQueryTraceThreshold()); assertEquals(TimeValue.timeValueMillis(200).nanos(), log.getQueryDebugThreshold()); assertEquals(TimeValue.timeValueMillis(300).nanos(), log.getQueryInfoThreshold()); @@ -326,7 +345,7 @@ public void testSetQueryLevels() { assertEquals(TimeValue.timeValueMillis(-1).nanos(), log.getQueryWarnThreshold()); settings = new IndexSettings(metadata, Settings.EMPTY); - log = new SearchSlowLog(settings); + log = new SearchSlowLog(settings, mock(SlowLogFieldProvider.class)); assertEquals(TimeValue.timeValueMillis(-1).nanos(), log.getQueryTraceThreshold()); assertEquals(TimeValue.timeValueMillis(-1).nanos(), log.getQueryDebugThreshold()); @@ -401,7 +420,7 @@ public void testSetFetchLevels() { .build() ); IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); - SearchSlowLog log = new SearchSlowLog(settings); + SearchSlowLog log = new SearchSlowLog(settings, mock(SlowLogFieldProvider.class)); assertEquals(TimeValue.timeValueMillis(100).nanos(), log.getFetchTraceThreshold()); assertEquals(TimeValue.timeValueMillis(200).nanos(), log.getFetchDebugThreshold()); assertEquals(TimeValue.timeValueMillis(300).nanos(), log.getFetchInfoThreshold()); @@ -432,7 +451,7 @@ public void testSetFetchLevels() { assertEquals(TimeValue.timeValueMillis(-1).nanos(), log.getFetchWarnThreshold()); settings = new IndexSettings(metadata, Settings.EMPTY); - log = new SearchSlowLog(settings); + log = new SearchSlowLog(settings, mock(SlowLogFieldProvider.class)); assertEquals(TimeValue.timeValueMillis(-1).nanos(), log.getFetchTraceThreshold()); assertEquals(TimeValue.timeValueMillis(-1).nanos(), log.getFetchDebugThreshold()); diff --git a/server/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTests.java b/server/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTests.java index 1c164e898426d..6d72649e90764 100644 --- a/server/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTests.java +++ b/server/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTests.java @@ -28,19 +28,27 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.BitSet; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.lucene.util.MatchAllBitSet; +import org.elasticsearch.node.NodeRoleSettings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; import java.io.IOException; +import java.util.List; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; +import static org.elasticsearch.cluster.node.DiscoveryNode.STATELESS_ENABLED_SETTING_NAME; +import static org.elasticsearch.index.IndexSettings.INDEX_FAST_REFRESH_SETTING; +import static org.elasticsearch.index.cache.bitset.BitsetFilterCache.INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; @@ -259,4 +267,53 @@ public void onRemoval(ShardId shardId, Accountable accountable) { } } + public void testShouldLoadRandomAccessFiltersEagerly() { + var values = List.of(true, false); + for (var hasIndexRole : values) { + for (var indexFastRefresh : values) { + for (var loadFiltersEagerly : values) { + for (var isStateless : values) { + if (isStateless) { + assertEquals( + loadFiltersEagerly && indexFastRefresh && hasIndexRole, + BitsetFilterCache.shouldLoadRandomAccessFiltersEagerly( + bitsetFilterCacheSettings(isStateless, hasIndexRole, loadFiltersEagerly, indexFastRefresh) + ) + ); + } else { + assertEquals( + loadFiltersEagerly, + BitsetFilterCache.shouldLoadRandomAccessFiltersEagerly( + bitsetFilterCacheSettings(isStateless, hasIndexRole, loadFiltersEagerly, indexFastRefresh) + ) + ); + } + } + } + } + } + } + + private IndexSettings bitsetFilterCacheSettings( + boolean isStateless, + boolean hasIndexRole, + boolean loadFiltersEagerly, + boolean indexFastRefresh + ) { + var indexSettingsBuilder = Settings.builder().put(INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING.getKey(), loadFiltersEagerly); + if (isStateless) indexSettingsBuilder.put(INDEX_FAST_REFRESH_SETTING.getKey(), indexFastRefresh); + + var nodeSettingsBuilder = Settings.builder() + .putList( + NodeRoleSettings.NODE_ROLES_SETTING.getKey(), + hasIndexRole ? DiscoveryNodeRole.INDEX_ROLE.roleName() : DiscoveryNodeRole.SEARCH_ROLE.roleName() + ) + .put(STATELESS_ENABLED_SETTING_NAME, isStateless); + + return IndexSettingsModule.newIndexSettings( + new Index("index", IndexMetadata.INDEX_UUID_NA_VALUE), + indexSettingsBuilder.build(), + nodeSettingsBuilder.build() + ); + } } diff --git a/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java b/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java index b50251aef011c..dfd4ad1fc0a45 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java @@ -26,6 +26,7 @@ import java.util.Set; import java.util.UUID; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import static org.elasticsearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; @@ -97,6 +98,36 @@ public void testKeepCommitsAfterGlobalCheckpoint() throws Exception { ); } + public void testReusePreviousSafeCommitInfo() throws Exception { + final AtomicLong globalCheckpoint = new AtomicLong(); + final AtomicInteger getDocCountCalls = new AtomicInteger(); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy( + logger, + new TranslogDeletionPolicy(), + new SoftDeletesPolicy(globalCheckpoint::get, NO_OPS_PERFORMED, between(0, 100), () -> RetentionLeases.EMPTY), + globalCheckpoint::get, + null + ) { + @Override + protected int getDocCountOfCommit(IndexCommit indexCommit) { + getDocCountCalls.incrementAndGet(); + return between(0, 1000); + } + }; + + final long seqNo = between(1, 10000); + final List commitList = new ArrayList<>(); + final var translogUUID = UUID.randomUUID(); + commitList.add(mockIndexCommit(seqNo, seqNo, translogUUID)); + globalCheckpoint.set(seqNo); + indexPolicy.onCommit(commitList); + assertEquals(1, getDocCountCalls.get()); + + commitList.add(mockIndexCommit(seqNo, seqNo, translogUUID)); + indexPolicy.onCommit(commitList); + assertEquals(1, getDocCountCalls.get()); + } + public void testAcquireIndexCommit() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(); final int extraRetainedOps = between(0, 100); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/BooleanScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/BooleanScriptFieldTypeTests.java index 053e4226b3d79..d55eaf9df3452 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/BooleanScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/BooleanScriptFieldTypeTests.java @@ -11,6 +11,7 @@ import org.apache.lucene.document.StoredField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.search.Collector; import org.apache.lucene.search.IndexSearcher; @@ -412,9 +413,16 @@ public XContentParser parser() { } public void testBlockLoader() throws IOException { - try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [false]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [true]}")))); + try ( + Directory directory = newDirectory(); + RandomIndexWriter iw = new RandomIndexWriter(random(), directory, newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE)) + ) { + iw.addDocuments( + List.of( + List.of(new StoredField("_source", new BytesRef("{\"foo\": [false]}"))), + List.of(new StoredField("_source", new BytesRef("{\"foo\": [true]}"))) + ) + ); try (DirectoryReader reader = iw.getReader()) { BooleanScriptFieldType fieldType = build("xor_param", Map.of("param", false), OnScriptError.FAIL); List expected = List.of(false, true); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DateScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DateScriptFieldTypeTests.java index eb3daf472ea2e..25a79022c245e 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DateScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DateScriptFieldTypeTests.java @@ -11,6 +11,7 @@ import org.apache.lucene.document.StoredField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.search.Collector; import org.apache.lucene.search.Explanation; @@ -472,9 +473,16 @@ public void testLegacyDateFormatName() throws IOException { } public void testBlockLoader() throws IOException { - try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181354]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181355]}")))); + try ( + Directory directory = newDirectory(); + RandomIndexWriter iw = new RandomIndexWriter(random(), directory, newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE)) + ) { + iw.addDocuments( + List.of( + List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181354]}"))), + List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181355]}"))) + ) + ); try (DirectoryReader reader = iw.getReader()) { DateScriptFieldType fieldType = build("add_days", Map.of("days", 1), OnScriptError.FAIL); assertThat( diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DoubleScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DoubleScriptFieldTypeTests.java index d37e42e04edca..ed365a2460203 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DoubleScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DoubleScriptFieldTypeTests.java @@ -11,6 +11,7 @@ import org.apache.lucene.document.StoredField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.search.Collector; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.LeafCollector; @@ -231,9 +232,16 @@ public void testTermsQuery() throws IOException { } public void testBlockLoader() throws IOException { - try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); + try ( + Directory directory = newDirectory(); + RandomIndexWriter iw = new RandomIndexWriter(random(), directory, newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE)) + ) { + iw.addDocuments( + List.of( + List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}"))), + List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}"))) + ) + ); try (DirectoryReader reader = iw.getReader()) { DoubleScriptFieldType fieldType = build("add_param", Map.of("param", 1), OnScriptError.FAIL); assertThat(blockLoaderReadValuesFromColumnAtATimeReader(reader, fieldType), equalTo(List.of(2d, 3d))); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java index 38960597647e9..0f285992b749a 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java @@ -1430,7 +1430,7 @@ public void testSubobjectsFalseWithInnerNestedFromDynamicTemplate() { ); assertThat(exception.getRootCause(), instanceOf(MapperParsingException.class)); assertEquals( - "Tried to add subobject [time] to object [__dynamic__test] which does not support subobjects", + "Tried to add nested object [time] to object [__dynamic__test] which does not support subobjects", exception.getRootCause().getMessage() ); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IpScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IpScriptFieldTypeTests.java index cd19bb50b842c..5eb66e631d86f 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IpScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IpScriptFieldTypeTests.java @@ -12,6 +12,7 @@ import org.apache.lucene.document.StoredField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.search.Collector; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.LeafCollector; @@ -247,9 +248,16 @@ public void testTermsQuery() throws IOException { } public void testBlockLoader() throws IOException { - try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.1\"]}")))); + try ( + Directory directory = newDirectory(); + RandomIndexWriter iw = new RandomIndexWriter(random(), directory, newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE)) + ) { + iw.addDocuments( + List.of( + List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0\"]}"))), + List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.1\"]}"))) + ) + ); try (DirectoryReader reader = iw.getReader()) { IpScriptFieldType fieldType = build("append_param", Map.of("param", ".1"), OnScriptError.FAIL); List expected = List.of( diff --git a/server/src/test/java/org/elasticsearch/index/mapper/KeywordScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/KeywordScriptFieldTypeTests.java index ce705f2e9ae8b..d8903251e6c3b 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/KeywordScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/KeywordScriptFieldTypeTests.java @@ -11,6 +11,7 @@ import org.apache.lucene.document.StoredField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.search.Collector; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.LeafCollector; @@ -377,9 +378,16 @@ public void testMatchQuery() throws IOException { } public void testBlockLoader() throws IOException { - try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); + try ( + Directory directory = newDirectory(); + RandomIndexWriter iw = new RandomIndexWriter(random(), directory, newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE)) + ) { + iw.addDocuments( + List.of( + List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}"))), + List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}"))) + ) + ); try (DirectoryReader reader = iw.getReader()) { KeywordScriptFieldType fieldType = build("append_param", Map.of("param", "-Suffix"), OnScriptError.FAIL); assertThat( diff --git a/server/src/test/java/org/elasticsearch/index/mapper/LongScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/LongScriptFieldTypeTests.java index fd20b6c71e984..debcd3c5fa911 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/LongScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/LongScriptFieldTypeTests.java @@ -11,6 +11,7 @@ import org.apache.lucene.document.StoredField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.search.Collector; import org.apache.lucene.search.FieldDoc; @@ -264,9 +265,16 @@ public void testTermsQuery() throws IOException { } public void testBlockLoader() throws IOException { - try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); + try ( + Directory directory = newDirectory(); + RandomIndexWriter iw = new RandomIndexWriter(random(), directory, newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE)) + ) { + iw.addDocuments( + List.of( + List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}"))), + List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}"))) + ) + ); try (DirectoryReader reader = iw.getReader()) { LongScriptFieldType fieldType = build("add_param", Map.of("param", 1), OnScriptError.FAIL); assertThat(blockLoaderReadValuesFromColumnAtATimeReader(reader, fieldType), equalTo(List.of(2L, 3L))); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java index 68e7bd6f24664..7f762bbfa7234 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java @@ -567,10 +567,7 @@ public void testMergeMultipleRoots() throws IOException { } }"""); - final MapperService mapperService = createMapperService(mapping(b -> {})); - mapperService.merge("_doc", List.of(mapping1, mapping2), MergeReason.INDEX_TEMPLATE); - - assertEquals(""" + assertMergeEquals(List.of(mapping1, mapping2), """ { "_doc" : { "_meta" : { @@ -587,7 +584,7 @@ public void testMergeMultipleRoots() throws IOException { } } } - }""", Strings.toString(mapperService.documentMapper().mapping(), true, true)); + }"""); } public void testMergeMultipleRootsWithRootType() throws IOException { @@ -641,10 +638,7 @@ public void testMergeMultipleRootsWithoutRootType() throws IOException { } }"""); - final MapperService mapperService = createMapperService(mapping(b -> {})); - mapperService.merge("_doc", List.of(mapping1, mapping2), MergeReason.INDEX_TEMPLATE); - - assertEquals(""" + assertMergeEquals(List.of(mapping1, mapping2), """ { "_doc" : { "_meta" : { @@ -656,7 +650,7 @@ public void testMergeMultipleRootsWithoutRootType() throws IOException { } } } - }""", Strings.toString(mapperService.documentMapper().mapping(), true, true)); + }"""); } public void testValidMappingSubstitution() throws IOException { @@ -680,10 +674,7 @@ public void testValidMappingSubstitution() throws IOException { } }"""); - final MapperService mapperService = createMapperService(mapping(b -> {})); - mapperService.merge("_doc", List.of(mapping1, mapping2), MergeReason.INDEX_TEMPLATE); - - assertEquals(""" + assertMergeEquals(List.of(mapping1, mapping2), """ { "_doc" : { "properties" : { @@ -693,7 +684,7 @@ public void testValidMappingSubstitution() throws IOException { } } } - }""", Strings.toString(mapperService.documentMapper().mapping(), true, true)); + }"""); } public void testValidMappingSubtreeSubstitution() throws IOException { @@ -770,10 +761,7 @@ public void testSameTypeMerge() throws IOException { } }"""); - final MapperService mapperService = createMapperService(mapping(b -> {})); - mapperService.merge("_doc", List.of(mapping1, mapping2), MergeReason.INDEX_TEMPLATE); - - assertEquals(""" + assertMergeEquals(List.of(mapping1, mapping2), """ { "_doc" : { "properties" : { @@ -788,7 +776,7 @@ public void testSameTypeMerge() throws IOException { } } } - }""", Strings.toString(mapperService.documentMapper().mapping(), true, true)); + }"""); } public void testObjectAndNestedTypeSubstitution() throws IOException { @@ -874,10 +862,7 @@ public void testNestedContradictingProperties() throws IOException { } }"""); - final MapperService mapperService = createMapperService(mapping(b -> {})); - mapperService.merge("_doc", List.of(mapping1, mapping2), MergeReason.INDEX_TEMPLATE); - - assertEquals(""" + assertMergeEquals(List.of(mapping1, mapping2), """ { "_doc" : { "properties" : { @@ -895,7 +880,7 @@ public void testNestedContradictingProperties() throws IOException { } } } - }""", Strings.toString(mapperService.documentMapper().mapping(), true, true)); + }"""); } public void testImplicitObjectHierarchy() throws IOException { @@ -912,10 +897,7 @@ public void testImplicitObjectHierarchy() throws IOException { } }"""); - final MapperService mapperService = createMapperService(mapping(b -> {})); - DocumentMapper bulkMerge = mapperService.merge("_doc", List.of(mapping1), MergeReason.INDEX_TEMPLATE); - - assertEquals(""" + assertMergeEquals(List.of(mapping1), """ { "_doc" : { "properties" : { @@ -932,10 +914,7 @@ public void testImplicitObjectHierarchy() throws IOException { } } } - }""", Strings.toString(mapperService.documentMapper().mapping(), true, true)); - - DocumentMapper sequentialMerge = mapperService.merge("_doc", mapping1, MergeReason.INDEX_TEMPLATE); - assertEquals(bulkMerge.mappingSource(), sequentialMerge.mappingSource()); + }"""); } public void testSubobjectsMerge() throws IOException { @@ -965,7 +944,7 @@ public void testSubobjectsMerge() throws IOException { final MapperService mapperService = createMapperService(mapping(b -> {})); mapperService.merge("_doc", List.of(mapping1, mapping2), MergeReason.INDEX_TEMPLATE); - assertEquals(""" + assertMergeEquals(List.of(mapping1, mapping2), """ { "_doc" : { "properties" : { @@ -979,7 +958,7 @@ public void testSubobjectsMerge() throws IOException { } } } - }""", Strings.toString(mapperService.documentMapper().mapping(), true, true)); + }"""); } public void testContradictingSubobjects() throws IOException { @@ -1039,7 +1018,7 @@ public void testContradictingSubobjects() throws IOException { mapperService = createMapperService(mapping(b -> {})); mapperService.merge("_doc", List.of(mapping2, mapping1), MergeReason.INDEX_TEMPLATE); - assertEquals(""" + assertMergeEquals(List.of(mapping2, mapping1), """ { "_doc" : { "properties" : { @@ -1053,7 +1032,7 @@ public void testContradictingSubobjects() throws IOException { } } } - }""", Strings.toString(mapperService.documentMapper().mapping(), true, true)); + }"""); } public void testSubobjectsImplicitObjectsMerge() throws IOException { @@ -1076,12 +1055,21 @@ public void testSubobjectsImplicitObjectsMerge() throws IOException { } }"""); - final MapperService mapperService = createMapperService(mapping(b -> {})); - MapperParsingException e = expectThrows( - MapperParsingException.class, - () -> mapperService.merge("_doc", List.of(mapping1, mapping2), MergeReason.INDEX_TEMPLATE) - ); - assertThat(e.getMessage(), containsString("Tried to add subobject [child] to object [parent] which does not support subobjects")); + assertMergeEquals(List.of(mapping1, mapping2), """ + { + "_doc" : { + "properties" : { + "parent" : { + "subobjects" : false, + "properties" : { + "child.grandchild" : { + "type" : "keyword" + } + } + } + } + } + }"""); } public void testMultipleTypeMerges() throws IOException { @@ -1467,4 +1455,267 @@ public void testMergeUntilLimitCapacityOnlyForParent() throws IOException { assertNull(mapper.mappers().getMapper("parent.child")); } + public void testAutoFlattenObjectsSubobjectsTopLevelMerge() throws IOException { + CompressedXContent mapping1 = new CompressedXContent(""" + { + "subobjects": false + }"""); + + CompressedXContent mapping2 = new CompressedXContent(""" + { + "properties": { + "parent": { + "properties": { + "child": { + "dynamic": true, + "properties": { + "grandchild": { + "type": "keyword" + } + } + } + } + } + } + }"""); + + assertMergeEquals(List.of(mapping1, mapping2), """ + { + "_doc" : { + "subobjects" : false, + "properties" : { + "parent.child.grandchild" : { + "type" : "keyword" + } + } + } + }"""); + } + + public void testAutoFlattenObjectsSubobjectsMerge() throws IOException { + CompressedXContent mapping1 = new CompressedXContent(""" + { + "properties" : { + "parent" : { + "properties" : { + "child" : { + "type": "object" + } + } + } + } + }"""); + + CompressedXContent mapping2 = new CompressedXContent(""" + { + "properties" : { + "parent" : { + "subobjects" : false, + "properties" : { + "child" : { + "properties" : { + "grandchild" : { + "type" : "keyword" + } + } + } + } + } + } + }"""); + + assertMergeEquals(List.of(mapping1, mapping2), """ + { + "_doc" : { + "properties" : { + "parent" : { + "subobjects" : false, + "properties" : { + "child.grandchild" : { + "type" : "keyword" + } + } + } + } + } + }"""); + + assertMergeEquals(List.of(mapping2, mapping1), """ + { + "_doc" : { + "properties" : { + "parent" : { + "subobjects" : false, + "properties" : { + "child.grandchild" : { + "type" : "keyword" + } + } + } + } + } + }"""); + } + + public void testAutoFlattenObjectsSubobjectsMergeConflictingMappingParameter() throws IOException { + CompressedXContent mapping1 = new CompressedXContent(""" + { + "subobjects": false + }"""); + + CompressedXContent mapping2 = new CompressedXContent(""" + { + "properties": { + "parent": { + "dynamic": "false", + "properties": { + "child": { + "properties": { + "grandchild": { + "type": "keyword" + } + } + } + } + } + } + }"""); + + final MapperService mapperService = createMapperService(mapping(b -> {})); + MapperParsingException e = expectThrows( + MapperParsingException.class, + () -> mapperService.merge("_doc", List.of(mapping1, mapping2), MergeReason.INDEX_TEMPLATE) + ); + assertThat( + e.getMessage(), + containsString( + "Failed to parse mapping: Object mapper [parent] was found in a context where subobjects is set to false. " + + "Auto-flattening [parent] failed because the value of [dynamic] (FALSE) is not compatible " + + "with the value from its parent context (TRUE)" + ) + ); + } + + public void testAutoFlattenObjectsSubobjectsMergeConflictingMappingParameterRoot() throws IOException { + CompressedXContent mapping1 = new CompressedXContent(""" + { + "subobjects": false, + "dynamic": false + }"""); + + CompressedXContent mapping2 = new CompressedXContent(""" + { + "subobjects": false, + "properties": { + "parent": { + "dynamic": "true", + "properties": { + "child": { + "properties": { + "grandchild": { + "type": "keyword" + } + } + } + } + } + } + }"""); + + final MapperService mapperService = createMapperService(mapping(b -> {})); + MapperParsingException e = expectThrows( + MapperParsingException.class, + () -> mapperService.merge("_doc", List.of(mapping1, mapping2), MergeReason.INDEX_TEMPLATE) + ); + assertThat( + e.getMessage(), + containsString( + "Failed to parse mapping: Object mapper [parent] was found in a context where subobjects is set to false. " + + "Auto-flattening [parent] failed because the value of [dynamic] (TRUE) is not compatible " + + "with the value from its parent context (FALSE)" + ) + ); + } + + public void testAutoFlattenObjectsSubobjectsMergeNonConflictingMappingParameter() throws IOException { + CompressedXContent mapping = new CompressedXContent(""" + { + "dynamic": false, + "properties": { + "parent": { + "dynamic": true, + "enabled": false, + "subobjects": false, + "properties": { + "child": { + "properties": { + "grandchild": { + "type": "keyword" + } + } + } + } + } + } + }"""); + + assertMergeEquals(List.of(mapping), """ + { + "_doc" : { + "dynamic" : "false", + "properties" : { + "parent" : { + "dynamic" : "true", + "enabled" : false, + "subobjects" : false, + "properties" : { + "child.grandchild" : { + "type" : "keyword" + } + } + } + } + } + }"""); + } + + public void testExpandDottedNotationToObjectMappers() throws IOException { + CompressedXContent mapping1 = new CompressedXContent(""" + { + "properties": { + "parent.child": { + "type": "keyword" + } + } + }"""); + + CompressedXContent mapping2 = new CompressedXContent("{}"); + + assertMergeEquals(List.of(mapping1, mapping2), """ + { + "_doc" : { + "properties" : { + "parent" : { + "properties" : { + "child" : { + "type" : "keyword" + } + } + } + } + } + }"""); + } + + private void assertMergeEquals(List mappingSources, String expected) throws IOException { + final MapperService mapperServiceBulk = createMapperService(mapping(b -> {})); + // simulates multiple component templates being merged in a composable index template + mapperServiceBulk.merge("_doc", mappingSources, MergeReason.INDEX_TEMPLATE); + assertEquals(expected, Strings.toString(mapperServiceBulk.documentMapper().mapping(), true, true)); + + MapperService mapperServiceSequential = createMapperService(mapping(b -> {})); + // simulates a series of mapping updates + mappingSources.forEach(m -> mapperServiceSequential.merge("_doc", m, MergeReason.INDEX_TEMPLATE)); + assertEquals(expected, Strings.toString(mapperServiceSequential.documentMapper().mapping(), true, true)); + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java index 005b14886d059..e024f2fa7b1ea 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java @@ -311,6 +311,24 @@ public void testMergeWithLimitRuntimeField() { assertEquals(4, mergedAdd1.getTotalFieldsCount()); } + public void testMergeSubobjectsFalseWithObject() { + RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( + new ObjectMapper.Builder("parent", Explicit.IMPLICIT_FALSE) + ).build(MapperBuilderContext.root(false, false)); + RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( + new ObjectMapper.Builder("parent", Explicit.IMPLICIT_TRUE).add( + new ObjectMapper.Builder("child", Explicit.IMPLICIT_TRUE).add( + new KeywordFieldMapper.Builder("grandchild", IndexVersion.current()) + ) + ) + ).build(MapperBuilderContext.root(false, false)); + + ObjectMapper merged = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, Long.MAX_VALUE)); + ObjectMapper parentMapper = (ObjectMapper) merged.getMapper("parent"); + assertNotNull(parentMapper); + assertNotNull(parentMapper.getMapper("child.grandchild")); + } + private static RootObjectMapper createRootSubobjectFalseLeafWithDots() { FieldMapper.Builder fieldBuilder = new KeywordFieldMapper.Builder("host.name", IndexVersion.current()); FieldMapper fieldMapper = fieldBuilder.build(MapperBuilderContext.root(false, false)); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java index 29e5f8540734b..6472f09ce1be7 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java @@ -25,6 +25,7 @@ import java.io.IOException; import java.util.List; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -362,8 +363,8 @@ public void testSubobjectsFalse() throws Exception { assertNotNull(mapperService.fieldType("metrics.service.time.max")); } - public void testSubobjectsFalseWithInnerObject() { - MapperParsingException exception = expectThrows(MapperParsingException.class, () -> createMapperService(mapping(b -> { + public void testSubobjectsFalseWithInnerObject() throws IOException { + MapperService mapperService = createMapperService(mapping(b -> { b.startObject("metrics.service"); { b.field("subobjects", false); @@ -384,11 +385,9 @@ public void testSubobjectsFalseWithInnerObject() { b.endObject(); } b.endObject(); - }))); - assertEquals( - "Failed to parse mapping: Tried to add subobject [time] to object [service] which does not support subobjects", - exception.getMessage() - ); + })); + assertNull(mapperService.fieldType("metrics.service.time")); + assertNotNull(mapperService.fieldType("metrics.service.time.max")); } public void testSubobjectsFalseWithInnerNested() { @@ -407,7 +406,7 @@ public void testSubobjectsFalseWithInnerNested() { b.endObject(); }))); assertEquals( - "Failed to parse mapping: Tried to add subobject [time] to object [service] which does not support subobjects", + "Failed to parse mapping: Tried to add nested object [time] to object [service] which does not support subobjects", exception.getMessage() ); } @@ -430,8 +429,8 @@ public void testExplicitDefaultSubobjects() throws Exception { assertEquals("{\"_doc\":{\"subobjects\":true}}", Strings.toString(mapperService.mappingLookup().getMapping())); } - public void testSubobjectsFalseRootWithInnerObject() { - MapperParsingException exception = expectThrows(MapperParsingException.class, () -> createMapperService(mappingNoSubobjects(b -> { + public void testSubobjectsFalseRootWithInnerObject() throws IOException { + MapperService mapperService = createMapperService(mappingNoSubobjects(b -> { b.startObject("metrics.service.time"); { b.startObject("properties"); @@ -443,11 +442,9 @@ public void testSubobjectsFalseRootWithInnerObject() { b.endObject(); } b.endObject(); - }))); - assertEquals( - "Failed to parse mapping: Tried to add subobject [metrics.service.time] to object [_doc] which does not support subobjects", - exception.getMessage() - ); + })); + assertNull(mapperService.fieldType("metrics.service.time")); + assertNotNull(mapperService.fieldType("metrics.service.time.max")); } public void testSubobjectsFalseRootWithInnerNested() { @@ -457,7 +454,7 @@ public void testSubobjectsFalseRootWithInnerNested() { b.endObject(); }))); assertEquals( - "Failed to parse mapping: Tried to add subobject [metrics.service] to object [_doc] which does not support subobjects", + "Failed to parse mapping: Tried to add nested object [metrics.service] to object [_doc] which does not support subobjects", exception.getMessage() ); } @@ -575,4 +572,63 @@ private ObjectMapper createObjectMapperWithAllParametersSet(CheckedConsumer fields = objectMapper.asFlattenedFieldMappers(rootContext).stream().map(FieldMapper::name).toList(); + assertThat(fields, containsInAnyOrder("parent.keyword1", "parent.child.keyword2")); + } + + public void testFlattenDynamicIncompatible() { + MapperBuilderContext rootContext = MapperBuilderContext.root(false, false); + ObjectMapper objectMapper = new ObjectMapper.Builder("parent", Explicit.IMPLICIT_TRUE).add( + new ObjectMapper.Builder("child", Explicit.IMPLICIT_TRUE).dynamic(Dynamic.FALSE) + ).build(rootContext); + + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> objectMapper.asFlattenedFieldMappers(rootContext) + ); + assertEquals( + "Object mapper [parent.child] was found in a context where subobjects is set to false. " + + "Auto-flattening [parent.child] failed because the value of [dynamic] (FALSE) is not compatible with " + + "the value from its parent context (TRUE)", + exception.getMessage() + ); + } + + public void testFlattenEnabledFalse() { + MapperBuilderContext rootContext = MapperBuilderContext.root(false, false); + ObjectMapper objectMapper = new ObjectMapper.Builder("parent", Explicit.IMPLICIT_TRUE).enabled(false).build(rootContext); + + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> objectMapper.asFlattenedFieldMappers(rootContext) + ); + assertEquals( + "Object mapper [parent] was found in a context where subobjects is set to false. " + + "Auto-flattening [parent] failed because the value of [enabled] is [false]", + exception.getMessage() + ); + } + + public void testFlattenExplicitSubobjectsTrue() { + MapperBuilderContext rootContext = MapperBuilderContext.root(false, false); + ObjectMapper objectMapper = new ObjectMapper.Builder("parent", Explicit.EXPLICIT_TRUE).build(rootContext); + + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> objectMapper.asFlattenedFieldMappers(rootContext) + ); + assertEquals( + "Object mapper [parent] was found in a context where subobjects is set to false. " + + "Auto-flattening [parent] failed because the value of [subobjects] is [true]", + exception.getMessage() + ); + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/PassThroughObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/PassThroughObjectMapperTests.java index 40994e2835e2b..b49ed2cf99df6 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/PassThroughObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/PassThroughObjectMapperTests.java @@ -90,9 +90,9 @@ public void testSubobjectsThrows() throws IOException { ); } - public void testAddSubobjectThrows() throws IOException { - MapperException exception = expectThrows(MapperException.class, () -> createMapperService(mapping(b -> { - b.startObject("labels").field("type", "passthrough"); + public void testAddSubobjectAutoFlatten() throws IOException { + MapperService mapperService = createMapperService(mapping(b -> { + b.startObject("labels").field("type", "passthrough").field("time_series_dimension", "true"); { b.startObject("properties"); { @@ -107,12 +107,11 @@ public void testAddSubobjectThrows() throws IOException { b.endObject(); } b.endObject(); - }))); + })); - assertEquals( - "Failed to parse mapping: Tried to add subobject [subobj] to object [labels] which does not support subobjects", - exception.getMessage() - ); + var dim = mapperService.mappingLookup().getMapper("labels.subobj.dim"); + assertThat(dim, instanceOf(KeywordFieldMapper.class)); + assertTrue(((KeywordFieldMapper) dim).fieldType().isDimension()); } public void testWithoutMappers() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/reindex/ReindexRequestTests.java b/server/src/test/java/org/elasticsearch/index/reindex/ReindexRequestTests.java index 65c060aa9005a..c8cce9a9910e7 100644 --- a/server/src/test/java/org/elasticsearch/index/reindex/ReindexRequestTests.java +++ b/server/src/test/java/org/elasticsearch/index/reindex/ReindexRequestTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Predicates; import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.slice.SliceBuilder; @@ -115,7 +116,7 @@ protected ReindexRequest createTestInstance() { @Override protected ReindexRequest doParseInstance(XContentParser parser) throws IOException { - return ReindexRequest.fromXContent(parser, nf -> false); + return ReindexRequest.fromXContent(parser, Predicates.never()); } @Override @@ -403,7 +404,7 @@ private ReindexRequest parseRequestWithSourceIndices(Object sourceIndices) throw request = BytesReference.bytes(b); } try (XContentParser p = createParser(JsonXContent.jsonXContent, request)) { - return ReindexRequest.fromXContent(p, nf -> false); + return ReindexRequest.fromXContent(p, Predicates.never()); } } } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 9e1bd96f4a3b8..97bf9f4e380fa 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -178,7 +178,6 @@ import static org.elasticsearch.common.lucene.Lucene.cleanLuceneIndex; import static org.elasticsearch.index.IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; -import static org.elasticsearch.test.hamcrest.RegexMatcher.matches; import static org.elasticsearch.xcontent.ToXContent.EMPTY_PARAMS; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsInAnyOrder; @@ -195,6 +194,7 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.matchesRegex; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -1431,7 +1431,7 @@ public void onFailure(Exception e) { * the race, then the other thread lost the race and only one operation should have been executed. */ assertThat(e, instanceOf(IllegalStateException.class)); - assertThat(e, hasToString(matches("operation primary term \\[\\d+\\] is too old"))); + assertThat(e, hasToString(matchesRegex(".*operation primary term \\[\\d+\\] is too old.*"))); assertThat(counter.get(), equalTo(1L)); } else { assertThat(counter.get(), equalTo(2L)); @@ -3800,6 +3800,7 @@ public void testIsSearchIdle() throws Exception { closeShards(primary); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/101008") @TestIssueLogging( issueUrl = "https://github.com/elastic/elasticsearch/issues/101008", value = "org.elasticsearch.index.shard.IndexShard:TRACE" diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java index 846625fc4f790..ee1bdf927a11b 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java @@ -38,6 +38,7 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.SlowLogFieldProvider; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.EngineFactory; @@ -61,7 +62,6 @@ import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.IndexSettingsModule; -import org.elasticsearch.test.hamcrest.RegexMatcher; import java.io.IOException; import java.util.ArrayList; @@ -89,6 +89,7 @@ import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.matchesRegex; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; import static org.mockito.Mockito.mock; @@ -192,6 +193,50 @@ public void onIndexModule(IndexModule indexModule) { } } + public static class TestSlowLogFieldProvider implements SlowLogFieldProvider { + + private static Map fields = Map.of(); + + static void setFields(Map fields) { + TestSlowLogFieldProvider.fields = fields; + } + + @Override + public void init(IndexSettings indexSettings) {} + + @Override + public Map indexSlowLogFields() { + return fields; + } + + @Override + public Map searchSlowLogFields() { + return fields; + } + } + + public static class TestAnotherSlowLogFieldProvider implements SlowLogFieldProvider { + + private static Map fields = Map.of(); + + static void setFields(Map fields) { + TestAnotherSlowLogFieldProvider.fields = fields; + } + + @Override + public void init(IndexSettings indexSettings) {} + + @Override + public Map indexSlowLogFields() { + return fields; + } + + @Override + public Map searchSlowLogFields() { + return fields; + } + } + @Override protected boolean resetNodeAfterTest() { return true; @@ -606,7 +651,7 @@ public void testConflictingEngineFactories() { ); final String pattern = ".*multiple engine factories provided for \\[foobar/.*\\]: \\[.*FooEngineFactory\\],\\[.*BarEngineFactory\\].*"; - assertThat(e, hasToString(new RegexMatcher(pattern))); + assertThat(e, hasToString(matchesRegex(pattern))); } public void testBuildAliasFilter() { @@ -746,4 +791,38 @@ public void testBuildAliasFilterDataStreamAliases() { assertThat(result, is(AliasFilter.EMPTY)); } } + + public void testLoadSlowLogFieldProvider() { + TestSlowLogFieldProvider.setFields(Map.of("key1", "value1")); + TestAnotherSlowLogFieldProvider.setFields(Map.of("key2", "value2")); + + var indicesService = getIndicesService(); + SlowLogFieldProvider fieldProvider = indicesService.loadSlowLogFieldProvider(); + + // The map of fields from the two providers are merged to a single map of fields + assertEquals(Map.of("key1", "value1", "key2", "value2"), fieldProvider.searchSlowLogFields()); + assertEquals(Map.of("key1", "value1", "key2", "value2"), fieldProvider.indexSlowLogFields()); + + TestSlowLogFieldProvider.setFields(Map.of("key1", "value1")); + TestAnotherSlowLogFieldProvider.setFields(Map.of("key1", "value2")); + + // There is an overlap of field names, since this isn't deterministic and probably a + // programming error (two providers provide the same field) throw an exception + assertThrows(IllegalStateException.class, fieldProvider::searchSlowLogFields); + assertThrows(IllegalStateException.class, fieldProvider::indexSlowLogFields); + + TestSlowLogFieldProvider.setFields(Map.of("key1", "value1")); + TestAnotherSlowLogFieldProvider.setFields(Map.of()); + + // One provider has no fields + assertEquals(Map.of("key1", "value1"), fieldProvider.searchSlowLogFields()); + assertEquals(Map.of("key1", "value1"), fieldProvider.indexSlowLogFields()); + + TestSlowLogFieldProvider.setFields(Map.of()); + TestAnotherSlowLogFieldProvider.setFields(Map.of()); + + // Both providers have no fields + assertEquals(Map.of(), fieldProvider.searchSlowLogFields()); + assertEquals(Map.of(), fieldProvider.indexSlowLogFields()); + } } diff --git a/server/src/test/java/org/elasticsearch/ingest/SimulateIngestServiceTests.java b/server/src/test/java/org/elasticsearch/ingest/SimulateIngestServiceTests.java index 3ef1b1983df8b..30145ab37c322 100644 --- a/server/src/test/java/org/elasticsearch/ingest/SimulateIngestServiceTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/SimulateIngestServiceTests.java @@ -19,10 +19,13 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentType; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import static org.elasticsearch.test.LambdaMatchers.transformedMatch; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; @@ -30,6 +33,12 @@ public class SimulateIngestServiceTests extends ESTestCase { + private static Map newHashMap(K key, V value) { + Map map = new HashMap<>(); + map.put(key, value); + return map; + } + public void testGetPipeline() { PipelineConfiguration pipelineConfiguration = new PipelineConfiguration("pipeline1", new BytesArray(""" {"processors": [{"processor1" : {}}]}"""), XContentType.JSON); @@ -57,74 +66,47 @@ public void testGetPipeline() { SimulateBulkRequest simulateBulkRequest = new SimulateBulkRequest((Map>) null); SimulateIngestService simulateIngestService = new SimulateIngestService(ingestService, simulateBulkRequest); Pipeline pipeline = simulateIngestService.getPipeline("pipeline1"); - assertThat(pipeline.getProcessors().size(), equalTo(1)); - assertThat(pipeline.getProcessors().get(0).getType(), equalTo("processor1")); + assertThat(pipeline.getProcessors(), contains(transformedMatch(Processor::getType, equalTo("processor1")))); assertNull(simulateIngestService.getPipeline("pipeline2")); } { // Here we make sure that if we have a substitution with the same name as the original pipeline that we get the new one back - Map> pipelineSubstitutions = new HashMap<>() { - { - put("pipeline1", new HashMap<>() { - { - put("processors", List.of(new HashMap<>() { - { - put("processor2", new HashMap<>()); - } - }, new HashMap<>() { - { - put("processor3", new HashMap<>()); - } - })); - } - }); - put("pipeline2", new HashMap<>() { - { - put("processors", List.of(new HashMap<>() { - { - put("processor3", new HashMap<>()); - } - })); - } - }); - } - }; + Map> pipelineSubstitutions = new HashMap<>(); + pipelineSubstitutions.put( + "pipeline1", + newHashMap( + "processors", + List.of(newHashMap("processor2", Collections.emptyMap()), newHashMap("processor3", Collections.emptyMap())) + ) + ); + pipelineSubstitutions.put("pipeline2", newHashMap("processors", List.of(newHashMap("processor3", Collections.emptyMap())))); + SimulateBulkRequest simulateBulkRequest = new SimulateBulkRequest(pipelineSubstitutions); SimulateIngestService simulateIngestService = new SimulateIngestService(ingestService, simulateBulkRequest); Pipeline pipeline1 = simulateIngestService.getPipeline("pipeline1"); - assertThat(pipeline1.getProcessors().size(), equalTo(2)); - assertThat(pipeline1.getProcessors().get(0).getType(), equalTo("processor2")); - assertThat(pipeline1.getProcessors().get(1).getType(), equalTo("processor3")); + assertThat( + pipeline1.getProcessors(), + contains( + transformedMatch(Processor::getType, equalTo("processor2")), + transformedMatch(Processor::getType, equalTo("processor3")) + ) + ); Pipeline pipeline2 = simulateIngestService.getPipeline("pipeline2"); - assertThat(pipeline2.getProcessors().size(), equalTo(1)); - assertThat(pipeline2.getProcessors().get(0).getType(), equalTo("processor3")); + assertThat(pipeline2.getProcessors(), contains(transformedMatch(Processor::getType, equalTo("processor3")))); } { /* * Here we make sure that if we have a substitution for a new pipeline we still get the original one back (as well as the new * one). */ - Map> pipelineSubstitutions = new HashMap<>() { - { - put("pipeline2", new HashMap<>() { - { - put("processors", List.of(new HashMap<>() { - { - put("processor3", new HashMap<>()); - } - })); - } - }); - } - }; + Map> pipelineSubstitutions = new HashMap<>(); + pipelineSubstitutions.put("pipeline2", newHashMap("processors", List.of(newHashMap("processor3", Collections.emptyMap())))); SimulateBulkRequest simulateBulkRequest = new SimulateBulkRequest(pipelineSubstitutions); SimulateIngestService simulateIngestService = new SimulateIngestService(ingestService, simulateBulkRequest); Pipeline pipeline1 = simulateIngestService.getPipeline("pipeline1"); - assertThat(pipeline1.getProcessors().size(), equalTo(1)); - assertThat(pipeline1.getProcessors().get(0).getType(), equalTo("processor1")); + assertThat(pipeline1.getProcessors(), contains(transformedMatch(Processor::getType, equalTo("processor1")))); Pipeline pipeline2 = simulateIngestService.getPipeline("pipeline2"); - assertThat(pipeline2.getProcessors().size(), equalTo(1)); - assertThat(pipeline2.getProcessors().get(0).getType(), equalTo("processor3")); + assertThat(pipeline2.getProcessors(), contains(transformedMatch(Processor::getType, equalTo("processor3")))); } } diff --git a/server/src/test/java/org/elasticsearch/lucene/grouping/SinglePassGroupingCollectorSearchAfterTests.java b/server/src/test/java/org/elasticsearch/lucene/grouping/SinglePassGroupingCollectorSearchAfterTests.java index 8ad4593602a25..bec0f83f78674 100644 --- a/server/src/test/java/org/elasticsearch/lucene/grouping/SinglePassGroupingCollectorSearchAfterTests.java +++ b/server/src/test/java/org/elasticsearch/lucene/grouping/SinglePassGroupingCollectorSearchAfterTests.java @@ -18,7 +18,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; -import org.apache.lucene.search.TopFieldCollector; +import org.apache.lucene.search.TopFieldCollectorManager; import org.apache.lucene.search.TopFieldDocs; import org.apache.lucene.search.TotalHits; import org.apache.lucene.store.Directory; @@ -107,12 +107,11 @@ private > void assertSearchCollapse( ? SinglePassGroupingCollector.createNumeric("field", fieldType, sort, expectedNumGroups, after) : SinglePassGroupingCollector.createKeyword("field", fieldType, sort, expectedNumGroups, after); - TopFieldCollector topFieldCollector = TopFieldCollector.create(sort, totalHits, after, Integer.MAX_VALUE); + TopFieldCollectorManager topFieldCollectorManager = new TopFieldCollectorManager(sort, totalHits, after, Integer.MAX_VALUE); Query query = new MatchAllDocsQuery(); searcher.search(query, collapsingCollector); - searcher.search(query, topFieldCollector); + TopFieldDocs topDocs = searcher.search(query, topFieldCollectorManager); TopFieldGroups collapseTopFieldDocs = collapsingCollector.getTopGroups(0); - TopFieldDocs topDocs = topFieldCollector.topDocs(); assertEquals(sortField.getField(), collapseTopFieldDocs.field); assertEquals(totalHits, collapseTopFieldDocs.totalHits.value); assertEquals(expectedNumGroups, collapseTopFieldDocs.scoreDocs.length); diff --git a/server/src/test/java/org/elasticsearch/lucene/grouping/SinglePassGroupingCollectorTests.java b/server/src/test/java/org/elasticsearch/lucene/grouping/SinglePassGroupingCollectorTests.java index 8dd7ed9c21896..bb4b3f42fde85 100644 --- a/server/src/test/java/org/elasticsearch/lucene/grouping/SinglePassGroupingCollectorTests.java +++ b/server/src/test/java/org/elasticsearch/lucene/grouping/SinglePassGroupingCollectorTests.java @@ -26,7 +26,7 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.SortedSetSortField; -import org.apache.lucene.search.TopFieldCollector; +import org.apache.lucene.search.TopFieldCollectorManager; import org.apache.lucene.search.TopFieldDocs; import org.apache.lucene.search.TotalHits; import org.apache.lucene.search.Weight; @@ -132,12 +132,11 @@ private > void assertSearchCollapse( ); } - TopFieldCollector topFieldCollector = TopFieldCollector.create(sort, totalHits, Integer.MAX_VALUE); + TopFieldCollectorManager topFieldCollectorManager = new TopFieldCollectorManager(sort, totalHits, Integer.MAX_VALUE); Query query = new MatchAllDocsQuery(); searcher.search(query, collapsingCollector); - searcher.search(query, topFieldCollector); + TopFieldDocs topDocs = searcher.search(query, topFieldCollectorManager); TopFieldGroups collapseTopFieldDocs = collapsingCollector.getTopGroups(0); - TopFieldDocs topDocs = topFieldCollector.topDocs(); assertEquals(collapseField.getField(), collapseTopFieldDocs.field); assertEquals(expectedNumGroups, collapseTopFieldDocs.scoreDocs.length); assertEquals(totalHits, collapseTopFieldDocs.totalHits.value); diff --git a/server/src/test/java/org/elasticsearch/lucene/spatial/CentroidCalculatorTests.java b/server/src/test/java/org/elasticsearch/lucene/spatial/CentroidCalculatorTests.java index d15ea1ac2e469..fce58b07eb090 100644 --- a/server/src/test/java/org/elasticsearch/lucene/spatial/CentroidCalculatorTests.java +++ b/server/src/test/java/org/elasticsearch/lucene/spatial/CentroidCalculatorTests.java @@ -22,9 +22,9 @@ import org.elasticsearch.geometry.utils.GeographyValidator; import org.elasticsearch.geometry.utils.WellKnownText; import org.elasticsearch.test.ESTestCase; -import org.hamcrest.BaseMatcher; import org.hamcrest.Description; import org.hamcrest.Matcher; +import org.hamcrest.TypeSafeMatcher; import java.util.ArrayList; import java.util.Collections; @@ -409,7 +409,7 @@ private Matcher matchesCentroid(CentroidCalculator expectedC return new CentroidMatcher(expectedCentroid.getX(), expectedCentroid.getY(), expectedCentroid.sumWeight(), weightFactor); } - private static class CentroidMatcher extends BaseMatcher { + private static class CentroidMatcher extends TypeSafeMatcher { private final double weightFactor; private final Matcher xMatcher; private final Matcher yMatcher; @@ -423,34 +423,26 @@ private CentroidMatcher(double x, double y, double weight, double weightFactor) } private Matcher matchDouble(double value) { - if (value > 1e20 || value < 1e20) { - // Very large values have floating point errors, so instead of an absolute value, we use a relative one - return closeTo(value, Math.abs(value / 1e10)); - } else { - // Most data (notably geo data) has values within bounds, and an absolute delta makes more sense. - return closeTo(value, DELTA); - } + // Very large values have floating point errors, so instead of an absolute value, we use a relative one + // Most data (notably geo data) has values within bounds, and an absolute delta makes more sense. + double delta = (value > 1e28 || value < -1e28) ? Math.abs(value / 1e6) + : (value > 1e20 || value < -1e20) ? Math.abs(value / 1e10) + : DELTA; + return closeTo(value, delta); } @Override - public boolean matches(Object actual) { - if (actual instanceof CentroidCalculator actualCentroid) { - return xMatcher.matches(actualCentroid.getX()) - && yMatcher.matches(actualCentroid.getY()) - && wMatcher.matches(weightFactor * actualCentroid.sumWeight()); - } - return false; + public boolean matchesSafely(CentroidCalculator actualCentroid) { + return xMatcher.matches(actualCentroid.getX()) + && yMatcher.matches(actualCentroid.getY()) + && wMatcher.matches(weightFactor * actualCentroid.sumWeight()); } @Override - public void describeMismatch(Object item, Description description) { - if (item instanceof CentroidCalculator actualCentroid) { - describeSubMismatch(xMatcher, actualCentroid.getX(), "X value", description); - describeSubMismatch(yMatcher, actualCentroid.getY(), "Y value", description); - describeSubMismatch(wMatcher, weightFactor * actualCentroid.sumWeight(), "sumWeight", description); - } else { - super.describeMismatch(item, description); - } + public void describeMismatchSafely(CentroidCalculator actualCentroid, Description description) { + describeSubMismatch(xMatcher, actualCentroid.getX(), "X value", description); + describeSubMismatch(yMatcher, actualCentroid.getY(), "Y value", description); + describeSubMismatch(wMatcher, weightFactor * actualCentroid.sumWeight(), "sumWeight", description); } private void describeSubMismatch(Matcher matcher, double value, String name, Description description) { diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksNodeServiceTests.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksNodeServiceTests.java index 4bc37ea380bfd..8178505470d9a 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksNodeServiceTests.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksNodeServiceTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.persistent.PersistentTasksCustomMetadata.Assignment; import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestParams; @@ -258,7 +259,12 @@ public void testTaskCancellation() { when(client.settings()).thenReturn(Settings.EMPTY); PersistentTasksService persistentTasksService = new PersistentTasksService(null, null, client) { @Override - void sendCancelRequest(final long taskId, final String reason, final ActionListener listener) { + void sendCancelRequest( + final long taskId, + final String reason, + final TimeValue timeout, + final ActionListener listener + ) { capturedTaskId.set(taskId); capturedListener.set(listener); } @@ -269,6 +275,7 @@ public void sendCompletionRequest( final long taskAllocationId, final Exception taskFailure, final String localAbortReason, + final TimeValue timeout, final ActionListener> listener ) { fail("Shouldn't be called during Cluster State cancellation"); @@ -348,7 +355,12 @@ public void testTaskLocalAbort() { when(client.settings()).thenReturn(Settings.EMPTY); PersistentTasksService persistentTasksService = new PersistentTasksService(null, null, client) { @Override - void sendCancelRequest(final long taskId, final String reason, final ActionListener listener) { + void sendCancelRequest( + final long taskId, + final String reason, + final TimeValue timeout, + final ActionListener listener + ) { fail("Shouldn't be called during local abort"); } @@ -358,6 +370,7 @@ public void sendCompletionRequest( final long taskAllocationId, final Exception taskFailure, final String localAbortReason, + final TimeValue timeout, final ActionListener> listener ) { assertThat(taskId, not(nullValue())); @@ -466,6 +479,7 @@ public void sendCompletionRequest( long taskAllocationId, Exception taskFailure, String localAbortReason, + TimeValue timeout, ActionListener> listener ) { assertThat(taskFailure, instanceOf(RuntimeException.class)); diff --git a/server/src/test/java/org/elasticsearch/plugins/IndexStorePluginTests.java b/server/src/test/java/org/elasticsearch/plugins/IndexStorePluginTests.java index 7d695a238f242..5a2d9480a95e9 100644 --- a/server/src/test/java/org/elasticsearch/plugins/IndexStorePluginTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/IndexStorePluginTests.java @@ -21,9 +21,9 @@ import java.util.Collections; import java.util.Map; -import static org.elasticsearch.test.hamcrest.RegexMatcher.matches; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.hasToString; +import static org.hamcrest.Matchers.matchesRegex; public class IndexStorePluginTests extends ESTestCase { @@ -112,7 +112,7 @@ public void testDuplicateIndexStoreFactories() { assertThat( e, hasToString( - matches( + matchesRegex( "java.lang.IllegalStateException: Duplicate key store \\(attempted merging values " + "org.elasticsearch.index.store.FsDirectoryFactory@[\\w\\d]+ " + "and org.elasticsearch.index.store.FsDirectoryFactory@[\\w\\d]+\\)" diff --git a/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java index 4f7001f00e6a7..5a736b4e1e9dd 100644 --- a/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; @@ -39,6 +40,7 @@ import org.elasticsearch.repositories.blobstore.MeteredBlobStoreRepository; import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotId; +import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.Transport; @@ -51,6 +53,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.Executor; +import java.util.function.BooleanSupplier; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.isA; @@ -332,7 +335,13 @@ public RepositoryMetadata getMetadata() { } @Override - public void getSnapshotInfo(GetSnapshotInfoContext context) { + public void getSnapshotInfo( + Collection snapshotIds, + boolean abortOnFailure, + BooleanSupplier isCancelled, + CheckedConsumer consumer, + ActionListener listener + ) { throw new UnsupportedOperationException(); } @@ -482,8 +491,7 @@ private MeteredRepositoryTypeA(RepositoryMetadata metadata, ClusterService clust MockBigArrays.NON_RECYCLING_INSTANCE, mock(RecoverySettings.class), BlobPath.EMPTY, - Map.of("bucket", "bucket-a"), - RepositoriesMetrics.NOOP + Map.of("bucket", "bucket-a") ); } @@ -510,8 +518,7 @@ private MeteredRepositoryTypeB(RepositoryMetadata metadata, ClusterService clust MockBigArrays.NON_RECYCLING_INSTANCE, mock(RecoverySettings.class), BlobPath.EMPTY, - Map.of("bucket", "bucket-b"), - RepositoriesMetrics.NOOP + Map.of("bucket", "bucket-b") ); } diff --git a/server/src/test/java/org/elasticsearch/repositories/ResolvedRepositoriesTests.java b/server/src/test/java/org/elasticsearch/repositories/ResolvedRepositoriesTests.java new file mode 100644 index 0000000000000..04859d2847522 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/repositories/ResolvedRepositoriesTests.java @@ -0,0 +1,93 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.repositories; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.RepositoriesMetadata; +import org.elasticsearch.cluster.metadata.RepositoryMetadata; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matchers; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +public class ResolvedRepositoriesTests extends ESTestCase { + + public void testAll() { + runMatchAllTest(); + runMatchAllTest("*"); + runMatchAllTest("_all"); + } + + private static void runMatchAllTest(String... patterns) { + final var state = clusterStateWithRepositories(randomList(1, 4, ESTestCase::randomIdentifier).toArray(String[]::new)); + final var result = getRepositories(state, patterns); + assertEquals(RepositoriesMetadata.get(state).repositories(), result.repositoryMetadata()); + assertThat(result.missing(), Matchers.empty()); + assertFalse(result.hasMissingRepositories()); + } + + public void testMatchingName() { + final var state = clusterStateWithRepositories(randomList(1, 4, ESTestCase::randomIdentifier).toArray(String[]::new)); + final var name = randomFrom(RepositoriesMetadata.get(state).repositories()).name(); + final var result = getRepositories(state, name); + assertEquals(List.of(RepositoriesMetadata.get(state).repository(name)), result.repositoryMetadata()); + assertThat(result.missing(), Matchers.empty()); + assertFalse(result.hasMissingRepositories()); + } + + public void testMismatchingName() { + final var state = clusterStateWithRepositories(randomList(1, 4, ESTestCase::randomIdentifier).toArray(String[]::new)); + final var notAName = randomValueOtherThanMany( + n -> RepositoriesMetadata.get(state).repositories().stream().anyMatch(m -> n.equals(m.name())), + ESTestCase::randomIdentifier + ); + final var result = getRepositories(state, notAName); + assertEquals(List.of(), result.repositoryMetadata()); + assertEquals(List.of(notAName), result.missing()); + assertTrue(result.hasMissingRepositories()); + } + + public void testWildcards() { + final var state = clusterStateWithRepositories("test-match-1", "test-match-2", "test-exclude", "other-repo"); + + runWildcardTest(state, List.of("test-match-1", "test-match-2", "test-exclude"), "test-*"); + runWildcardTest(state, List.of("test-match-1", "test-match-2"), "test-*1", "test-*2"); + runWildcardTest(state, List.of("test-match-2", "test-match-1"), "test-*2", "test-*1"); + runWildcardTest(state, List.of("test-match-1", "test-match-2"), "test-*", "-*-exclude"); + runWildcardTest(state, List.of(), "no-*-repositories"); + runWildcardTest(state, List.of("test-match-1", "test-match-2", "other-repo"), "test-*", "-*-exclude", "other-repo"); + runWildcardTest(state, List.of("other-repo", "test-match-1", "test-match-2"), "other-repo", "test-*", "-*-exclude"); + } + + private static void runWildcardTest(ClusterState clusterState, List expectedNames, String... patterns) { + final var result = getRepositories(clusterState, patterns); + final var description = Strings.format("%s should yield %s", Arrays.toString(patterns), expectedNames); + assertFalse(description, result.hasMissingRepositories()); + assertEquals(description, expectedNames, result.repositoryMetadata().stream().map(RepositoryMetadata::name).toList()); + } + + private static ResolvedRepositories getRepositories(ClusterState clusterState, String... patterns) { + return ResolvedRepositories.resolve(clusterState, patterns); + } + + private static ClusterState clusterStateWithRepositories(String... repoNames) { + final var repositories = new ArrayList(repoNames.length); + for (final var repoName : repoNames) { + repositories.add(new RepositoryMetadata(repoName, "test", Settings.EMPTY)); + } + return ClusterState.EMPTY_STATE.copyAndUpdateMetadata( + b -> b.putCustom(RepositoriesMetadata.TYPE, new RepositoriesMetadata(repositories)) + ); + } + +} diff --git a/server/src/test/java/org/elasticsearch/rest/RestRequestTests.java b/server/src/test/java/org/elasticsearch/rest/RestRequestTests.java index 809bf528ba194..bb06dbe5d09aa 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestRequestTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestRequestTests.java @@ -256,11 +256,7 @@ public void testMarkPathRestricted() { IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> request1.markPathRestricted("foo")); assertThat(exception.getMessage(), is("The parameter [" + PATH_RESTRICTED + "] is already defined.")); - RestRequest request2 = contentRestRequest("content", new HashMap<>() { - { - put(PATH_RESTRICTED, "foo"); - } - }); + RestRequest request2 = contentRestRequest("content", Map.of(PATH_RESTRICTED, "foo")); exception = expectThrows(IllegalArgumentException.class, () -> request2.markPathRestricted("bar")); assertThat(exception.getMessage(), is("The parameter [" + PATH_RESTRICTED + "] is already defined.")); } diff --git a/server/src/test/java/org/elasticsearch/rest/action/cat/RestNodesActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/cat/RestNodesActionTests.java index 2f5293d7a44a8..7ddd63db73986 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/cat/RestNodesActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/cat/RestNodesActionTests.java @@ -9,17 +9,24 @@ package org.elasticsearch.rest.action.cat; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.monitor.fs.FsInfo; +import org.elasticsearch.monitor.os.OsStats; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; import org.junit.Before; +import java.util.ArrayList; import java.util.Collections; +import java.util.List; +import java.util.Map; import static java.util.Collections.emptySet; import static org.mockito.Mockito.mock; @@ -48,4 +55,70 @@ public void testBuildTableDoesNotThrowGivenNullNodeInfoAndStats() { action.buildTable(false, new FakeRestRequest(), clusterStateResponse, nodesInfoResponse, nodesStatsResponse); } + + public void testFormattedNumericSort() { + final var clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("node-1")).add(DiscoveryNodeUtils.create("node-2"))) + .build(); + + final var nowMillis = System.currentTimeMillis(); + final var rowOrder = RestTable.getRowOrder( + action.buildTable( + false, + new FakeRestRequest(), + new ClusterStateResponse(clusterState.getClusterName(), clusterState, false), + new NodesInfoResponse(clusterState.getClusterName(), List.of(), List.of()), + new NodesStatsResponse( + clusterState.getClusterName(), + List.of( + // sorting 10 vs 9 in all relevant columns, since these sort incorrectly as strings + getTrickySortingNodeStats(nowMillis, clusterState.nodes().get("node-1"), 10), + getTrickySortingNodeStats(nowMillis, clusterState.nodes().get("node-2"), 9) + ), + Collections.emptyList() + ) + ), + new FakeRestRequest.Builder(xContentRegistry()).withParams( + Map.of("s", randomFrom("load_1m", "load_5m", "load_15m", "disk.used_percent")) + ).build() + ); + + final var nodesList = new ArrayList(); + for (final var node : clusterState.nodes()) { + nodesList.add(node); + } + + assertEquals("node-2", nodesList.get(rowOrder.get(0)).getId()); + assertEquals("node-1", nodesList.get(rowOrder.get(1)).getId()); + } + + private static NodeStats getTrickySortingNodeStats(long nowMillis, DiscoveryNode node, int sortValue) { + return new NodeStats( + node, + nowMillis, + null, + new OsStats( + nowMillis, + new OsStats.Cpu((short) sortValue, new double[] { sortValue, sortValue, sortValue }), + new OsStats.Mem(0, 0, 0), + new OsStats.Swap(0, 0), + null + ), + null, + null, + null, + new FsInfo(nowMillis, null, new FsInfo.Path[] { new FsInfo.Path("/foo", "/foo", 100, 100 - sortValue, 100 - sortValue) }), + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ); + } } diff --git a/server/src/test/java/org/elasticsearch/rest/action/cat/RestTableTests.java b/server/src/test/java/org/elasticsearch/rest/action/cat/RestTableTests.java index ff3b72463d86e..1ec180fdaad77 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/cat/RestTableTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/cat/RestTableTests.java @@ -259,6 +259,26 @@ public void testMultiSort() { assertEquals(Arrays.asList(1, 0, 2), rowOrder); } + public void testFormattedDouble() { + Table table = new Table(); + table.startHeaders(); + table.addCell("number"); + table.endHeaders(); + List comparisonList = Arrays.asList(10, 9, 11); + for (int i = 0; i < comparisonList.size(); i++) { + table.startRow(); + table.addCell(RestTable.FormattedDouble.format2DecimalPlaces(comparisonList.get(i))); + table.endRow(); + } + restRequest.params().put("s", "number"); + List rowOrder = RestTable.getRowOrder(table, restRequest); + assertEquals(Arrays.asList(1, 0, 2), rowOrder); + + restRequest.params().put("s", "number:desc"); + rowOrder = RestTable.getRowOrder(table, restRequest); + assertEquals(Arrays.asList(2, 0, 1), rowOrder); + } + public void testPlainTextChunking() throws Exception { final var cells = randomArray(8, 8, String[]::new, () -> randomAlphaOfLengthBetween(1, 5)); final var expectedRow = String.join(" ", cells) + "\n"; @@ -404,6 +424,11 @@ public void close() { } }; } + + @Override + public int pageSize() { + return pageSize; + } }; final var bodyChunks = new ArrayList(); diff --git a/server/src/test/java/org/elasticsearch/rest/action/info/RestClusterInfoActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/info/RestClusterInfoActionTests.java index f3ef110ad4ce8..f0473ae344a79 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/info/RestClusterInfoActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/info/RestClusterInfoActionTests.java @@ -121,6 +121,7 @@ private NodeStats randomNodeStatsWithOnlyHttpStats(int i) { null, null, null, + null, null ); } diff --git a/server/src/test/java/org/elasticsearch/search/SearchSortValuesTests.java b/server/src/test/java/org/elasticsearch/search/SearchSortValuesTests.java index ac9ae1da0fddd..e21ae8af04b77 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchSortValuesTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchSortValuesTests.java @@ -90,7 +90,7 @@ protected SearchSortValues createTestInstance() { @Override protected Writeable.Reader instanceReader() { - return SearchSortValues::new; + return SearchSortValues::readFrom; } @Override diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsTests.java index 4a9e086d72143..d431a3a156957 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsTests.java @@ -214,5 +214,11 @@ public boolean supportsParallelCollection(ToLongFunction fieldCardinalit assertTrue(terms.supportsParallelCollection(field -> randomIntBetween(1, 10))); assertFalse(terms.supportsParallelCollection(field -> randomIntBetween(11, 100))); } + { + TermsAggregationBuilder terms = new TermsAggregationBuilder("terms"); + terms.shardSize(randomIntBetween(1, 100)); + terms.minDocCount(0); + assertFalse(terms.supportsParallelCollection(field -> randomIntBetween(1, 100))); + } } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregationBuilderTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregationBuilderTests.java index 5514cb441b54c..18808f9b2aa87 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregationBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregationBuilderTests.java @@ -19,6 +19,9 @@ protected RandomSamplerAggregationBuilder createTestAggregatorBuilder() { if (randomBoolean()) { builder.setSeed(randomInt()); } + if (randomBoolean()) { + builder.setShardSeed(randomInt()); + } builder.setProbability(randomFrom(1.0, randomDoubleBetween(0.0, 0.5, false))); builder.subAggregation(AggregationBuilders.max(randomAlphaOfLength(10)).field(randomAlphaOfLength(10))); return builder; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregatorTests.java index 0916d2ad541e8..2b6a38b685303 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregatorTests.java @@ -58,7 +58,7 @@ public void testAggregationSampling() throws IOException { counts[integer.get()] = result.getDocCount(); if (result.getDocCount() > 0) { Avg agg = result.getAggregations().get("avg"); - assertThat(Strings.toString(result), agg.getValue(), allOf(not(notANumber()), IsFinite.isFinite())); + assertThat(Strings.toString(result), agg.getValue(), allOf(not(notANumber()), isFinite())); avgs[integer.get()] = agg.getValue(); } }, @@ -163,11 +163,11 @@ private static void writeTestDocs(RandomIndexWriter w) throws IOException { } } - private static class IsFinite extends TypeSafeMatcher { - public static Matcher isFinite() { - return new IsFinite(); - } + public static Matcher isFinite() { + return new IsFinite(); + } + private static class IsFinite extends TypeSafeMatcher { @Override protected boolean matchesSafely(Double item) { return Double.isFinite(item); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsTests.java index 61428fdeb335a..f0345a70294e0 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.metrics; -import org.elasticsearch.search.aggregations.AggregationInitializationException; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; import org.elasticsearch.xcontent.XContentParser; @@ -53,7 +52,7 @@ public void testFailWithSubAgg() throws Exception { """; XContentParser parser = createParser(JsonXContent.jsonXContent, source); assertSame(XContentParser.Token.START_OBJECT, parser.nextToken()); - Exception e = expectThrows(AggregationInitializationException.class, () -> AggregatorFactories.parseAggregators(parser)); + Exception e = expectThrows(IllegalArgumentException.class, () -> AggregatorFactories.parseAggregators(parser)); assertThat(e.toString(), containsString("Aggregator [viewport] of type [geo_bounds] cannot accept sub-aggregations")); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java index 39f583fd2c56b..e5c5dbbe64696 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java @@ -9,7 +9,6 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.search.aggregations.AggregationInitializationException; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; @@ -170,7 +169,7 @@ public void testFailWithSubAgg() throws Exception { }"""; XContentParser parser = createParser(JsonXContent.jsonXContent, source); assertSame(XContentParser.Token.START_OBJECT, parser.nextToken()); - Exception e = expectThrows(AggregationInitializationException.class, () -> AggregatorFactories.parseAggregators(parser)); + Exception e = expectThrows(IllegalArgumentException.class, () -> AggregatorFactories.parseAggregators(parser)); assertThat(e.toString(), containsString("Aggregator [top_tags_hits] of type [top_hits] cannot accept sub-aggregations")); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/support/SamplingContextTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/support/SamplingContextTests.java index d9e19cf60e481..ffb56f17c7f8f 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/support/SamplingContextTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/support/SamplingContextTests.java @@ -14,10 +14,9 @@ import static org.hamcrest.Matchers.equalTo; public class SamplingContextTests extends ESTestCase { - protected static final int NUMBER_OF_TEST_RUNS = 20; private static SamplingContext randomContext() { - return new SamplingContext(randomDoubleBetween(1e-6, 0.1, false), randomInt()); + return new SamplingContext(randomDoubleBetween(1e-6, 0.1, false), randomInt(), randomBoolean() ? null : randomInt()); } public void testScaling() { @@ -41,7 +40,7 @@ public void testScaling() { } public void testNoScaling() { - SamplingContext samplingContext = new SamplingContext(1.0, randomInt()); + SamplingContext samplingContext = new SamplingContext(1.0, randomInt(), randomBoolean() ? null : randomInt()); long randomLong = randomLong(); double randomDouble = randomDouble(); assertThat(randomLong, equalTo(samplingContext.scaleDown(randomLong))); diff --git a/server/src/test/java/org/elasticsearch/search/profile/query/ProfileCollectorManagerTests.java b/server/src/test/java/org/elasticsearch/search/profile/query/ProfileCollectorManagerTests.java index 5cfe368a9a392..fc8b9706d387a 100644 --- a/server/src/test/java/org/elasticsearch/search/profile/query/ProfileCollectorManagerTests.java +++ b/server/src/test/java/org/elasticsearch/search/profile/query/ProfileCollectorManagerTests.java @@ -19,6 +19,7 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopScoreDocCollector; +import org.apache.lucene.search.TopScoreDocCollectorManager; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.tests.search.DummyTotalHitCountCollector; @@ -121,12 +122,12 @@ public Integer reduce(Collection collectors) { */ public void testManagerWithSearcher() throws IOException { { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(10, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(10, null, 1000); TopDocs topDocs = searcher.search(new MatchAllDocsQuery(), topDocsManager); assertEquals(numDocs, topDocs.totalHits.value); } { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(10, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(10, null, 1000); String profileReason = "profiler_reason"; ProfileCollectorManager profileCollectorManager = new ProfileCollectorManager<>(topDocsManager, profileReason); TopDocs topDocs = searcher.search(new MatchAllDocsQuery(), profileCollectorManager); diff --git a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseCollectorTests.java b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseCollectorTests.java index b466101be07d8..f222e697488d2 100644 --- a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseCollectorTests.java +++ b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseCollectorTests.java @@ -34,6 +34,7 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopScoreDocCollector; +import org.apache.lucene.search.TopScoreDocCollectorManager; import org.apache.lucene.search.Weight; import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.store.Directory; @@ -108,7 +109,7 @@ public void testNegativeTerminateAfter() { public void testTopDocsOnly() throws IOException { { - CollectorManager topScoreDocManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topScoreDocManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager> manager = createCollectorManager( topScoreDocManager, null, @@ -121,7 +122,7 @@ public void testTopDocsOnly() throws IOException { assertEquals(numDocs, result.topDocs.totalHits.value); } { - CollectorManager topScoreDocManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topScoreDocManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager> manager = createCollectorManager( topScoreDocManager, null, @@ -137,7 +138,7 @@ public void testTopDocsOnly() throws IOException { public void testWithAggs() throws IOException { { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager aggsManager = DummyTotalHitCountCollector.createManager(); CollectorManager> manager = createCollectorManager( topDocsManager, @@ -152,7 +153,7 @@ public void testWithAggs() throws IOException { assertEquals(numDocs, result.aggs.intValue()); } { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager aggsManager = DummyTotalHitCountCollector.createManager(); CollectorManager> manager = createCollectorManager( topDocsManager, @@ -170,7 +171,7 @@ public void testWithAggs() throws IOException { public void testPostFilterTopDocsOnly() throws IOException { { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); TermQuery termQuery = new TermQuery(new Term("field2", "value")); Weight filterWeight = termQuery.createWeight(searcher, ScoreMode.TOP_DOCS, 1f); CollectorManager> manager = createCollectorManager( @@ -185,7 +186,7 @@ public void testPostFilterTopDocsOnly() throws IOException { assertEquals(numField2Docs, result.topDocs.totalHits.value); } { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); TermQuery termQuery = new TermQuery(new Term("field1", "value")); Weight filterWeight = termQuery.createWeight(searcher, ScoreMode.TOP_DOCS, 1f); CollectorManager> manager = createCollectorManager( @@ -203,7 +204,7 @@ public void testPostFilterTopDocsOnly() throws IOException { public void testPostFilterWithAggs() throws IOException { { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager aggsManager = DummyTotalHitCountCollector.createManager(); TermQuery termQuery = new TermQuery(new Term("field1", "value")); Weight filterWeight = termQuery.createWeight(searcher, ScoreMode.TOP_DOCS, 1f); @@ -220,7 +221,7 @@ public void testPostFilterWithAggs() throws IOException { assertEquals(numDocs, result.aggs.intValue()); } { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager aggsManager = DummyTotalHitCountCollector.createManager(); TermQuery termQuery = new TermQuery(new Term("field2", "value")); Weight filterWeight = termQuery.createWeight(searcher, ScoreMode.TOP_DOCS, 1f); @@ -247,18 +248,14 @@ public void testMinScoreTopDocsOnly() throws IOException { .add(new BoostQuery(new TermQuery(new Term("field2", "value")), 200f), BooleanClause.Occur.SHOULD) .build(); { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager( - numField2Docs + 1, - null, - 1000 - ); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(numField2Docs + 1, null, 1000); TopDocs topDocs = searcher.search(booleanQuery, topDocsManager); assertEquals(numDocs, topDocs.totalHits.value); maxScore = topDocs.scoreDocs[0].score; thresholdScore = topDocs.scoreDocs[numField2Docs].score; } { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager> manager = createCollectorManager( topDocsManager, null, @@ -271,7 +268,7 @@ public void testMinScoreTopDocsOnly() throws IOException { assertEquals(numField2Docs, result.topDocs.totalHits.value); } { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager> manager = createCollectorManager( topDocsManager, null, @@ -284,7 +281,7 @@ public void testMinScoreTopDocsOnly() throws IOException { assertEquals(numDocs, result.topDocs.totalHits.value); } { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager> manager = createCollectorManager( topDocsManager, null, @@ -306,18 +303,14 @@ public void testMinScoreWithAggs() throws IOException { .add(new BoostQuery(new TermQuery(new Term("field2", "value")), 200f), BooleanClause.Occur.SHOULD) .build(); { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager( - numField2Docs + 1, - null, - 1000 - ); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(numField2Docs + 1, null, 1000); TopDocs topDocs = searcher.search(booleanQuery, topDocsManager); assertEquals(numDocs, topDocs.totalHits.value); maxScore = topDocs.scoreDocs[0].score; thresholdScore = topDocs.scoreDocs[numField2Docs].score; } { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager aggsManager = DummyTotalHitCountCollector.createManager(); CollectorManager> manager = createCollectorManager( topDocsManager, @@ -333,7 +326,7 @@ public void testMinScoreWithAggs() throws IOException { assertEquals(numField2Docs, result.aggs.intValue()); } { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager aggsManager = DummyTotalHitCountCollector.createManager(); CollectorManager> manager = createCollectorManager( topDocsManager, @@ -348,7 +341,7 @@ public void testMinScoreWithAggs() throws IOException { assertEquals(numDocs, result.aggs.intValue()); } { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager aggsManager = DummyTotalHitCountCollector.createManager(); CollectorManager> manager = createCollectorManager( topDocsManager, @@ -374,18 +367,14 @@ public void testPostFilterAndMinScoreTopDocsOnly() throws IOException { TermQuery termQuery = new TermQuery(new Term("field2", "value")); Weight filterWeight = termQuery.createWeight(searcher, ScoreMode.TOP_DOCS, 1f); { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager( - numField3Docs + 1, - null, - 1000 - ); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(numField3Docs + 1, null, 1000); TopDocs topDocs = searcher.search(booleanQuery, topDocsManager); assertEquals(numDocs, topDocs.totalHits.value); maxScore = topDocs.scoreDocs[0].score; thresholdScore = topDocs.scoreDocs[numField3Docs].score; } { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager> manager = createCollectorManager( topDocsManager, filterWeight, @@ -398,7 +387,7 @@ public void testPostFilterAndMinScoreTopDocsOnly() throws IOException { assertEquals(numField2AndField3Docs, result.topDocs.totalHits.value); } { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager> manager = createCollectorManager( topDocsManager, filterWeight, @@ -411,7 +400,7 @@ public void testPostFilterAndMinScoreTopDocsOnly() throws IOException { assertEquals(numField2Docs, result.topDocs.totalHits.value); } { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager> manager = createCollectorManager( topDocsManager, filterWeight, @@ -435,18 +424,14 @@ public void testPostFilterAndMinScoreWithAggs() throws IOException { TermQuery termQuery = new TermQuery(new Term("field2", "value")); Weight filterWeight = termQuery.createWeight(searcher, ScoreMode.TOP_DOCS, 1f); { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager( - numField3Docs + 1, - null, - 1000 - ); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(numField3Docs + 1, null, 1000); TopDocs topDocs = searcher.search(booleanQuery, topDocsManager); assertEquals(numDocs, topDocs.totalHits.value); maxScore = topDocs.scoreDocs[0].score; thresholdScore = topDocs.scoreDocs[numField3Docs].score; } { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager aggsManager = DummyTotalHitCountCollector.createManager(); CollectorManager> manager = createCollectorManager( topDocsManager, @@ -461,7 +446,7 @@ public void testPostFilterAndMinScoreWithAggs() throws IOException { assertEquals(numField3Docs, result.aggs.intValue()); } { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager aggsManager = DummyTotalHitCountCollector.createManager(); CollectorManager> manager = createCollectorManager( topDocsManager, @@ -476,7 +461,7 @@ public void testPostFilterAndMinScoreWithAggs() throws IOException { assertEquals(numDocs, result.aggs.intValue()); } { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager aggsManager = DummyTotalHitCountCollector.createManager(); CollectorManager> manager = createCollectorManager( topDocsManager, @@ -635,18 +620,14 @@ public void testTerminateAfterTopDocsOnlyWithMinScore() throws IOException { .add(new BoostQuery(new TermQuery(new Term("field2", "value")), 200f), BooleanClause.Occur.SHOULD) .build(); { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager( - numField2Docs + 1, - null, - 1000 - ); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(numField2Docs + 1, null, 1000); TopDocs topDocs = searcher.search(booleanQuery, topDocsManager); assertEquals(numDocs, topDocs.totalHits.value); maxScore = topDocs.scoreDocs[0].score; } { int terminateAfter = randomIntBetween(1, numField2Docs - 1); - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager> manager = createCollectorManager( topDocsManager, null, @@ -667,18 +648,14 @@ public void testTerminateAfterWithAggsAndMinScore() throws IOException { .add(new BoostQuery(new TermQuery(new Term("field2", "value")), 200f), BooleanClause.Occur.SHOULD) .build(); { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager( - numField2Docs + 1, - null, - 1000 - ); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(numField2Docs + 1, null, 1000); TopDocs topDocs = searcher.search(booleanQuery, topDocsManager); assertEquals(numDocs, topDocs.totalHits.value); maxScore = topDocs.scoreDocs[0].score; } { int terminateAfter = randomIntBetween(1, numField2Docs - 1); - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager aggsManager = DummyTotalHitCountCollector.createManager(); CollectorManager> manager = createCollectorManager( topDocsManager, @@ -703,18 +680,14 @@ public void testTerminateAfterAndPostFilterAndMinScoreTopDocsOnly() throws IOExc TermQuery termQuery = new TermQuery(new Term("field2", "value")); Weight filterWeight = termQuery.createWeight(searcher, ScoreMode.TOP_DOCS, 1f); { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager( - numField3Docs + 1, - null, - 1000 - ); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(numField3Docs + 1, null, 1000); TopDocs topDocs = searcher.search(booleanQuery, topDocsManager); assertEquals(numDocs, topDocs.totalHits.value); maxScore = topDocs.scoreDocs[0].score; } { int terminateAfter = randomIntBetween(1, numField2AndField3Docs - 1); - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager> manager = createCollectorManager( topDocsManager, filterWeight, @@ -737,18 +710,14 @@ public void testTerminateAfterAndPostFilterAndMinScoreWithAggs() throws IOExcept TermQuery termQuery = new TermQuery(new Term("field2", "value")); Weight filterWeight = termQuery.createWeight(searcher, ScoreMode.TOP_DOCS, 1f); { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager( - numField3Docs + 1, - null, - 1000 - ); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(numField3Docs + 1, null, 1000); TopDocs topDocs = searcher.search(booleanQuery, topDocsManager); assertEquals(numDocs, topDocs.totalHits.value); maxScore = topDocs.scoreDocs[0].score; } { int terminateAfter = randomIntBetween(1, numField2AndField3Docs - 1); - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager aggsManager = DummyTotalHitCountCollector.createManager(); CollectorManager> manager = createCollectorManager( topDocsManager, diff --git a/server/src/test/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorServiceTests.java b/server/src/test/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorServiceTests.java index 0dfe27ee6dc50..572375d64d8b8 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorServiceTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorServiceTests.java @@ -18,8 +18,10 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.health.Diagnosis; import org.elasticsearch.health.Diagnosis.Resource.Type; +import org.elasticsearch.health.HealthFeatures; import org.elasticsearch.health.HealthIndicatorDetails; import org.elasticsearch.health.HealthIndicatorResult; import org.elasticsearch.health.SimpleHealthIndicatorDetails; @@ -27,12 +29,14 @@ import org.elasticsearch.health.node.RepositoriesHealthInfo; import org.elasticsearch.test.ESTestCase; import org.junit.Before; +import org.mockito.Mockito; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.stream.Stream; import static org.elasticsearch.cluster.node.DiscoveryNode.DISCOVERY_NODE_COMPARATOR; @@ -47,6 +51,7 @@ import static org.elasticsearch.snapshots.RepositoryIntegrityHealthIndicatorService.NAME; import static org.elasticsearch.snapshots.RepositoryIntegrityHealthIndicatorService.UNKNOWN_DEFINITION; import static org.hamcrest.Matchers.equalTo; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -55,6 +60,7 @@ public class RepositoryIntegrityHealthIndicatorServiceTests extends ESTestCase { private DiscoveryNode node1; private DiscoveryNode node2; private HealthInfo healthInfo; + private FeatureService featureService; @Before public void setUp() throws Exception { @@ -74,6 +80,9 @@ public void setUp() throws Exception { ) ) ); + + featureService = Mockito.mock(FeatureService.class); + Mockito.when(featureService.clusterHasFeature(any(), any())).thenReturn(true); } public void testIsGreenWhenAllRepositoriesAreHealthy() { @@ -349,11 +358,13 @@ public void testMappedFieldsForTelemetry() { } private ClusterState createClusterStateWith(RepositoriesMetadata metadata) { - var builder = ClusterState.builder(new ClusterName("test-cluster")); + var features = Set.of(HealthFeatures.SUPPORTS_EXTENDED_REPOSITORY_INDICATOR.id()); + var builder = ClusterState.builder(new ClusterName("test-cluster")) + .nodes(DiscoveryNodes.builder().add(node1).add(node2).build()) + .nodeFeatures(Map.of(node1.getId(), features, node2.getId(), features)); if (metadata != null) { builder.metadata(Metadata.builder().putCustom(RepositoriesMetadata.TYPE, metadata)); } - builder.nodes(DiscoveryNodes.builder().add(node1).add(node2).build()); return builder.build(); } @@ -361,10 +372,10 @@ private static RepositoryMetadata createRepositoryMetadata(String name, boolean return new RepositoryMetadata(name, "uuid", "s3", Settings.EMPTY, corrupted ? CORRUPTED_REPO_GEN : EMPTY_REPO_GEN, EMPTY_REPO_GEN); } - private static RepositoryIntegrityHealthIndicatorService createRepositoryIntegrityHealthIndicatorService(ClusterState clusterState) { + private RepositoryIntegrityHealthIndicatorService createRepositoryIntegrityHealthIndicatorService(ClusterState clusterState) { var clusterService = mock(ClusterService.class); when(clusterService.state()).thenReturn(clusterState); - return new RepositoryIntegrityHealthIndicatorService(clusterService); + return new RepositoryIntegrityHealthIndicatorService(clusterService, featureService); } private SimpleHealthIndicatorDetails createDetails(int total, int corruptedCount, List corrupted, int unknown, int invalid) { diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index edde9f0164a6e..0a53db94b9aaf 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -188,6 +188,7 @@ import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.BytesRefRecycler; import org.elasticsearch.transport.DisruptableMockTransport; @@ -1390,6 +1391,7 @@ public TransportRequestHandler interceptHandler( safeAwait(testListener); // shouldn't throw } + @TestLogging(reason = "testing logging at INFO level", value = "org.elasticsearch.snapshots.SnapshotsService:INFO") public void testFullSnapshotUnassignedShards() { setupTestCluster(1, 0); // no data nodes, we want unassigned shards @@ -1469,6 +1471,7 @@ public void onFailure(Exception e) { ); } + @TestLogging(reason = "testing logging at INFO level", value = "org.elasticsearch.snapshots.SnapshotsService:INFO") public void testSnapshotNameAlreadyInUseExceptionLogging() { setupTestCluster(1, 1); @@ -1519,6 +1522,7 @@ public void onFailure(Exception e) { ); } + @TestLogging(reason = "testing logging at INFO level", value = "org.elasticsearch.snapshots.SnapshotsService:INFO") public void testIndexNotFoundExceptionLogging() { setupTestCluster(1, 0); // no need for data nodes here @@ -1571,6 +1575,7 @@ public void onFailure(Exception e) { ); } + @TestLogging(reason = "testing logging at INFO level", value = "org.elasticsearch.snapshots.SnapshotsService:INFO") public void testIllegalArgumentExceptionLogging() { setupTestCluster(1, 0); // no need for data nodes here diff --git a/server/src/test/resources/META-INF/services/org.elasticsearch.index.SlowLogFieldProvider b/server/src/test/resources/META-INF/services/org.elasticsearch.index.SlowLogFieldProvider new file mode 100644 index 0000000000000..fcd1211eee0c5 --- /dev/null +++ b/server/src/test/resources/META-INF/services/org.elasticsearch.index.SlowLogFieldProvider @@ -0,0 +1,10 @@ +# +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License +# 2.0 and the Server Side Public License, v 1; you may not use this file except +# in compliance with, at your election, the Elastic License 2.0 or the Server +# Side Public License, v 1. +# + +org.elasticsearch.indices.IndicesServiceTests$TestSlowLogFieldProvider +org.elasticsearch.indices.IndicesServiceTests$TestAnotherSlowLogFieldProvider diff --git a/settings.gradle b/settings.gradle index 5eefe21b360d6..c183971bc12ca 100644 --- a/settings.gradle +++ b/settings.gradle @@ -14,7 +14,7 @@ pluginManagement { } plugins { - id "com.gradle.enterprise" version "3.16.1" + id "com.gradle.enterprise" version "3.16.2" id 'elasticsearch.java-toolchain' } diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java b/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java index 1004ea5b50119..e07c27b22c926 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java @@ -89,7 +89,8 @@ List adjustNodesStats(List nodesStats) { nodeStats.getAdaptiveSelectionStats(), nodeStats.getScriptCacheStats(), nodeStats.getIndexingPressureStats(), - nodeStats.getRepositoriesStats() + nodeStats.getRepositoriesStats(), + nodeStats.getNodeAllocationStats() ); }).collect(Collectors.toList()); } diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java index 1d76c1e40910e..7f39120e83c07 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java @@ -2033,6 +2033,11 @@ public void close() { return trackedRef; } + @Override + public int pageSize() { + return delegate.pageSize(); + } + /** * Release all tracked refs as if the node rebooted. */ diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java index 3a47e0885f2d2..8402b5756e915 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java @@ -74,6 +74,7 @@ import static org.elasticsearch.test.ESTestCase.randomAlphaOfLength; import static org.elasticsearch.test.ESTestCase.randomBoolean; import static org.elasticsearch.test.ESTestCase.randomFrom; +import static org.elasticsearch.test.ESTestCase.randomIntBetween; import static org.elasticsearch.test.ESTestCase.randomMap; import static org.elasticsearch.test.ESTestCase.randomMillisUpToYear9999; import static org.mockito.ArgumentMatchers.any; @@ -90,6 +91,10 @@ public static DataStream newInstance(String name, List indices) { return newInstance(name, indices, indices.size(), null); } + public static DataStream newInstance(String name, List indices, List failureIndices) { + return newInstance(name, indices, indices.size(), null, false, null, failureIndices); + } + public static DataStream newInstance(String name, List indices, long generation, Map metadata) { return newInstance(name, indices, generation, metadata, false); } @@ -136,7 +141,8 @@ public static DataStream newInstance( null, lifecycle, failureStores.size() > 0, - failureStores + failureStores, + null ); } @@ -257,8 +263,20 @@ public static String generateMapping(String timestampFieldName, String type) { + " }"; } + /** + * @return a list of random indices. NOTE: the list can be empty, if you do not want an empty list use + * {@link DataStreamTestHelper#randomNonEmptyIndexInstances()} + */ public static List randomIndexInstances() { - int numIndices = ESTestCase.randomIntBetween(0, 128); + return randomIndexInstances(0, 128); + } + + public static List randomNonEmptyIndexInstances() { + return randomIndexInstances(1, 128); + } + + public static List randomIndexInstances(int min, int max) { + int numIndices = ESTestCase.randomIntBetween(min, max); List indices = new ArrayList<>(numIndices); for (int i = 0; i < numIndices; i++) { indices.add(new Index(randomAlphaOfLength(10).toLowerCase(Locale.ROOT), UUIDs.randomBase64UUID(LuceneTestCase.random()))); @@ -290,7 +308,7 @@ public static DataStream randomInstance(String dataStreamName, LongSupplier time List failureIndices = List.of(); boolean failureStore = randomBoolean(); if (failureStore) { - failureIndices = randomIndexInstances(); + failureIndices = randomNonEmptyIndexInstances(); } return new DataStream( @@ -307,7 +325,14 @@ public static DataStream randomInstance(String dataStreamName, LongSupplier time randomBoolean() ? DataStreamLifecycle.newBuilder().dataRetention(randomMillisUpToYear9999()).build() : null, failureStore, failureIndices, + randomBoolean(), randomBoolean() + ? new DataStreamAutoShardingEvent( + indices.get(indices.size() - 1).getName(), + randomIntBetween(1, 10), + randomMillisUpToYear9999() + ) + : null ); } diff --git a/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java b/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java index 5f6e50a7c83e0..902e089679f49 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java +++ b/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java @@ -17,9 +17,11 @@ import org.apache.lucene.util.Accountable; import org.apache.lucene.util.Accountables; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefIterator; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.breaker.NoopCircuitBreaker; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; @@ -398,6 +400,16 @@ public void fill(long fromIndex, long toIndex, byte value) { in.fill(fromIndex, toIndex, value); } + @Override + public BytesRefIterator iterator() { + return in.iterator(); + } + + @Override + public void fillWith(StreamInput streamInput) throws IOException { + in.fillWith(streamInput); + } + @Override public boolean hasArray() { return in.hasArray(); diff --git a/test/framework/src/main/java/org/elasticsearch/common/util/MockPageCacheRecycler.java b/test/framework/src/main/java/org/elasticsearch/common/util/MockPageCacheRecycler.java index 56dea95b6a282..80f3db60e9432 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/util/MockPageCacheRecycler.java +++ b/test/framework/src/main/java/org/elasticsearch/common/util/MockPageCacheRecycler.java @@ -31,11 +31,11 @@ public MockPageCacheRecycler(Settings settings) { private V wrap(final V v) { return new V() { - private final LeakTracker.Leak> leak = LeakTracker.INSTANCE.track(v); + private final LeakTracker.Leak leak = LeakTracker.INSTANCE.track(v); @Override public void close() { - boolean leakReleased = leak.close(v); + boolean leakReleased = leak.close(); assert leakReleased : "leak should not have been released already"; final T ref = v(); if (ref instanceof Object[]) { diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java index d1a07cd0ee089..d4c6f8f3df873 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java @@ -146,7 +146,8 @@ public FieldInfo getFieldInfoWithName(String name) { 1, VectorEncoding.BYTE, VectorSimilarityFunction.COSINE, - randomBoolean() + randomBoolean(), + false ); } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MockFieldFilterPlugin.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MockFieldFilterPlugin.java index 21c6b50809ea9..16cb0b4656fcf 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MockFieldFilterPlugin.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MockFieldFilterPlugin.java @@ -8,6 +8,7 @@ package org.elasticsearch.index.mapper; +import org.elasticsearch.core.Predicates; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.Plugin; @@ -19,6 +20,6 @@ public class MockFieldFilterPlugin extends Plugin implements MapperPlugin { @Override public Function> getFieldFilter() { // this filter doesn't filter any field out, but it's used to exercise the code path executed when the filter is not no-op - return index -> field -> true; + return index -> Predicates.always(); } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java b/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java index 181b6c82379ed..26e887338158d 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java @@ -14,10 +14,10 @@ import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.repositories.FinalizeSnapshotContext; -import org.elasticsearch.repositories.GetSnapshotInfoContext; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.IndexMetaDataGenerations; import org.elasticsearch.repositories.Repository; @@ -29,10 +29,12 @@ import org.elasticsearch.repositories.SnapshotShardContext; import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotId; +import org.elasticsearch.snapshots.SnapshotInfo; import java.util.Collection; import java.util.Collections; import java.util.concurrent.Executor; +import java.util.function.BooleanSupplier; import static java.util.Collections.emptyList; import static org.elasticsearch.repositories.RepositoryData.EMPTY_REPO_GEN; @@ -61,8 +63,14 @@ public RepositoryMetadata getMetadata() { } @Override - public void getSnapshotInfo(GetSnapshotInfoContext context) { - throw new UnsupportedOperationException(); + public void getSnapshotInfo( + Collection snapshotIds, + boolean abortOnFailure, + BooleanSupplier isCancelled, + CheckedConsumer consumer, + ActionListener listener + ) { + listener.onFailure(new UnsupportedOperationException()); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreTestUtil.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreTestUtil.java index 79e4a8da713c5..d31bd16b07fcc 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreTestUtil.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreTestUtil.java @@ -35,7 +35,6 @@ import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshots; -import org.elasticsearch.repositories.GetSnapshotInfoContext; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.repositories.ShardGeneration; @@ -254,34 +253,26 @@ private static void assertSnapshotUUIDs( } // Assert that for each snapshot, the relevant metadata was written to index and shard folders final List snapshotInfos = Collections.synchronizedList(new ArrayList<>()); - repository.getSnapshotInfo( - new GetSnapshotInfoContext( - List.copyOf(snapshotIds), - true, - () -> false, - (ctx, sni) -> snapshotInfos.add(sni), - new ActionListener<>() { - @Override - public void onResponse(Void unused) { - try { - assertSnapshotInfosConsistency(repository, repositoryData, indices, snapshotInfos); - } catch (Exception e) { - listener.onResponse(new AssertionError(e)); - return; - } catch (AssertionError e) { - listener.onResponse(e); - return; - } - listener.onResponse(null); - } - - @Override - public void onFailure(Exception e) { - listener.onResponse(new AssertionError(e)); - } + repository.getSnapshotInfo(List.copyOf(snapshotIds), true, () -> false, snapshotInfos::add, new ActionListener<>() { + @Override + public void onResponse(Void unused) { + try { + assertSnapshotInfosConsistency(repository, repositoryData, indices, snapshotInfos); + } catch (Exception e) { + listener.onResponse(new AssertionError(e)); + return; + } catch (AssertionError e) { + listener.onResponse(e); + return; } - ) - ); + listener.onResponse(null); + } + + @Override + public void onFailure(Exception e) { + listener.onResponse(new AssertionError(e)); + } + }); } private static void assertSnapshotInfosConsistency( diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java index 99734e5e224aa..1787638f9fdf3 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java @@ -1114,7 +1114,11 @@ public void testSupportedFieldTypes() throws IOException { // We should make sure if the builder says it supports sampling, that the internal aggregations returned override // finalizeSampling if (aggregationBuilder.supportsSampling()) { - SamplingContext randomSamplingContext = new SamplingContext(randomDoubleBetween(1e-8, 0.1, false), randomInt()); + SamplingContext randomSamplingContext = new SamplingContext( + randomDoubleBetween(1e-8, 0.1, false), + randomInt(), + randomBoolean() ? null : randomInt() + ); InternalAggregation sampledResult = internalAggregation.finalizeSampling(randomSamplingContext); assertThat(sampledResult.getClass(), equalTo(internalAggregation.getClass())); } diff --git a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index 3744011b5b9f6..2e7ce0400d78b 100644 --- a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -12,9 +12,8 @@ import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; -import org.elasticsearch.action.admin.cluster.repositories.get.TransportGetRepositoriesAction; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; -import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; +import org.elasticsearch.action.admin.cluster.snapshots.get.SnapshotSortKey; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.support.GroupedActionListener; import org.elasticsearch.action.support.PlainActionFuture; @@ -42,6 +41,7 @@ import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; +import org.elasticsearch.repositories.ResolvedRepositories; import org.elasticsearch.repositories.ShardGenerations; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil; @@ -734,11 +734,7 @@ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IO }); } - public static void assertSnapshotListSorted( - List snapshotInfos, - @Nullable GetSnapshotsRequest.SortBy sort, - SortOrder sortOrder - ) { + public static void assertSnapshotListSorted(List snapshotInfos, @Nullable SnapshotSortKey sort, SortOrder sortOrder) { final BiConsumer assertion; if (sort == null) { assertion = (s1, s2) -> assertThat(s2, greaterThanOrEqualTo(s1)); @@ -799,7 +795,7 @@ public static Map randomUserMetadata() { } public static String[] matchAllPattern() { - return randomBoolean() ? new String[] { "*" } : new String[] { TransportGetRepositoriesAction.ALL_PATTERN }; + return randomBoolean() ? new String[] { "*" } : new String[] { ResolvedRepositories.ALL_PATTERN }; } public RepositoryMetadata getRepositoryMetadata(String repo) { diff --git a/test/framework/src/main/java/org/elasticsearch/telemetry/RecordingMeterRegistry.java b/test/framework/src/main/java/org/elasticsearch/telemetry/RecordingMeterRegistry.java index 86bfd9bf38c26..33693c297f166 100644 --- a/test/framework/src/main/java/org/elasticsearch/telemetry/RecordingMeterRegistry.java +++ b/test/framework/src/main/java/org/elasticsearch/telemetry/RecordingMeterRegistry.java @@ -33,7 +33,7 @@ public class RecordingMeterRegistry implements MeterRegistry { protected final MetricRecorder recorder = new MetricRecorder<>(); - MetricRecorder getRecorder() { + public MetricRecorder getRecorder() { return recorder; } diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializationTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializationTestCase.java index 238f523872f83..922f2ba74dcf2 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializationTestCase.java @@ -10,6 +10,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Predicates; import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -126,7 +127,7 @@ protected boolean supportsUnknownFields() { * Returns a predicate that given the field name indicates whether the field has to be excluded from random fields insertion or not */ protected Predicate getRandomFieldsExcludeFilter() { - return field -> false; + return Predicates.never(); } /** diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractXContentTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractXContentTestCase.java index 4df1e745f3bf4..848ec3c2f1738 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractXContentTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractXContentTestCase.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.CheckedFunction; +import org.elasticsearch.core.Predicates; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -326,7 +327,7 @@ protected boolean assertToXContentEquivalence() { * Returns a predicate that given the field name indicates whether the field has to be excluded from random fields insertion or not */ protected Predicate getRandomFieldsExcludeFilter() { - return field -> false; + return Predicates.never(); } /** diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 7b4032cc56cef..052b9a7165a6c 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -169,6 +169,7 @@ import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; +import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; @@ -499,6 +500,7 @@ public void removeHeaderWarningAppender() { @Before public final void before() { + LeakTracker.setContextHint(getTestName()); logger.info("{}before test", getTestParamsForLogging()); assertNull("Thread context initialized twice", threadContext); if (enableWarningsCheck()) { @@ -530,6 +532,7 @@ public final void after() throws Exception { ensureAllSearchContextsReleased(); ensureCheckIndexPassed(); logger.info("{}after test", getTestParamsForLogging()); + LeakTracker.setContextHint(""); } private String getTestParamsForLogging() { @@ -702,7 +705,6 @@ public void log(StatusData data) { // separate method so that this can be checked again after suite scoped cluster is shut down protected static void checkStaticState() throws Exception { - LeakTracker.INSTANCE.reportLeak(); MockBigArrays.ensureAllArraysAreReleased(); // ensure no one changed the status logger level on us @@ -2082,6 +2084,15 @@ public static void safeAwait(CountDownLatch countDownLatch) { } } + public static void safeAcquire(Semaphore semaphore) { + try { + assertTrue("safeAcquire: Semaphore did not acquire permit within the timeout", semaphore.tryAcquire(10, TimeUnit.SECONDS)); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + fail(e, "safeAcquire: interrupted waiting for Semaphore to acquire permit"); + } + } + public static T safeAwait(SubscribableListener listener) { final var future = new PlainActionFuture(); listener.addListener(future); diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java index f1b147eefe723..12c5085cbcd73 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java @@ -283,7 +283,11 @@ public void testReduceRandom() throws IOException { doAssertReducedMultiBucketConsumer(reduced, bucketConsumer); assertReduced(reduced, inputs.toReduce()); if (supportsSampling()) { - SamplingContext randomContext = new SamplingContext(randomDoubleBetween(1e-8, 0.1, false), randomInt()); + SamplingContext randomContext = new SamplingContext( + randomDoubleBetween(1e-8, 0.1, false), + randomInt(), + randomBoolean() ? null : randomInt() + ); @SuppressWarnings("unchecked") T sampled = (T) reduced.finalizeSampling(randomContext); assertSampled(sampled, reduced, randomContext); diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 38c38e719138e..16320b3b26301 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -64,6 +64,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Predicates; import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; @@ -650,7 +651,7 @@ private NodeAndClient getOrBuildRandomNode() { } private NodeAndClient getRandomNodeAndClient() { - return getRandomNodeAndClient(nc -> true); + return getRandomNodeAndClient(Predicates.always()); } private synchronized NodeAndClient getRandomNodeAndClient(Predicate predicate) { @@ -1621,7 +1622,7 @@ private synchronized T getInstance(Class clazz, Predicate * Returns a reference to a random nodes instances of the given class >T< */ public T getInstance(Class clazz) { - return getInstance(clazz, nc -> true); + return getInstance(clazz, Predicates.always()); } private static T getInstanceFromNode(Class clazz, Node node) { @@ -1990,7 +1991,7 @@ public String getMasterName(@Nullable String viaNode) { * @return the name of a random node in a cluster */ public String getRandomNodeName() { - return getNodeNameThat(ignored -> true); + return getNodeNameThat(Predicates.always()); } /** diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/OptionalMatchers.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/OptionalMatchers.java index 1cd92296a4ec7..043fe40c91f79 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/OptionalMatchers.java +++ b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/OptionalMatchers.java @@ -72,7 +72,7 @@ public void describeMismatch(Object item, Description description) { } } - public static Matcher> isPresent() { + public static Matcher> isPresent() { return new IsPresentMatcher<>(anything()); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/TupleMatchers.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/TupleMatchers.java index ac21cf1410882..38774f0e5cfa2 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/TupleMatchers.java +++ b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/TupleMatchers.java @@ -45,7 +45,10 @@ public void describeTo(final Description description) { * For example: *
    assertThat(Tuple.tuple("myValue1", "myValue2"), isTuple(startsWith("my"), containsString("Val")))
    */ - public static TupleMatcher isTuple(Matcher v1Matcher, Matcher v2Matcher) { + public static Matcher> isTuple( + Matcher v1Matcher, + Matcher v2Matcher + ) { return new TupleMatcher<>(v1Matcher, v2Matcher); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/readiness/ReadinessClientProbe.java b/test/framework/src/main/java/org/elasticsearch/test/readiness/ReadinessClientProbe.java index a479fa0ccbc9e..afefce7b3b2b9 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/readiness/ReadinessClientProbe.java +++ b/test/framework/src/main/java/org/elasticsearch/test/readiness/ReadinessClientProbe.java @@ -14,6 +14,7 @@ import org.elasticsearch.readiness.ReadinessService; import java.io.IOException; +import java.net.ConnectException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.StandardProtocolFamily; @@ -22,8 +23,6 @@ import java.security.PrivilegedAction; import static org.apache.lucene.tests.util.LuceneTestCase.expectThrows; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.containsString; import static org.junit.Assert.fail; /** @@ -70,11 +69,10 @@ default void tcpReadinessProbeFalse(Integer port) throws Exception { try (SocketChannel channel = SocketChannel.open(StandardProtocolFamily.INET)) { AccessController.doPrivileged((PrivilegedAction) () -> { - String message = expectThrows(IOException.class, () -> { + expectThrows(ConnectException.class, () -> { var result = channelConnect(channel, socketAddress); probeLogger.info("No exception on channel connect, connection success [{}]", result); - }).getMessage(); - assertThat(message, containsString("Connection refused")); + }); return null; }); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index 6520e3d0f68bd..307daddd17c37 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -355,16 +355,21 @@ public void initClient() throws IOException { assert nodesVersions != null; } - protected TestFeatureService createTestFeatureService( + protected List createAdditionalFeatureSpecifications() { + return List.of(); + } + + protected final TestFeatureService createTestFeatureService( Map> clusterStateFeatures, Set semanticNodeVersions ) { // Historical features information is unavailable when using legacy test plugins boolean hasHistoricalFeaturesInformation = System.getProperty("tests.features.metadata.path") != null; - final List featureSpecifications; + final List featureSpecifications = new ArrayList<>(createAdditionalFeatureSpecifications()); + featureSpecifications.add(new RestTestLegacyFeatures()); if (hasHistoricalFeaturesInformation) { - featureSpecifications = List.of(new RestTestLegacyFeatures(), new ESRestTestCaseHistoricalFeatures()); + featureSpecifications.add(new ESRestTestCaseHistoricalFeatures()); } else { logger.warn( "This test is running on the legacy test framework; historical features from production code will not be available. " @@ -372,7 +377,6 @@ protected TestFeatureService createTestFeatureService( + "If this is a legacy feature used only in tests, you can add it to a test-only FeatureSpecification such as {}.", RestTestLegacyFeatures.class.getCanonicalName() ); - featureSpecifications = List.of(new RestTestLegacyFeatures()); } return new ESRestTestFeatureService( @@ -1103,8 +1107,10 @@ protected static void wipeAllIndices() throws IOException { protected static void wipeAllIndices(boolean preserveSecurityIndices) throws IOException { boolean includeHidden = clusterHasFeature(RestTestLegacyFeatures.HIDDEN_INDICES_SUPPORTED); try { - // remove all indices except ilm and slm history which can pop up after deleting all data streams but shouldn't interfere - final List indexPatterns = new ArrayList<>(List.of("*", "-.ds-ilm-history-*", "-.ds-.slm-history-*")); + // remove all indices except some history indices which can pop up after deleting all data streams but shouldn't interfere + final List indexPatterns = new ArrayList<>( + List.of("*", "-.ds-ilm-history-*", "-.ds-.slm-history-*", "-.ds-.watcher-history-*") + ); if (preserveSecurityIndices) { indexPatterns.add("-.security-*"); } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/LogType.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/LogType.java index 96178e621e018..76468b9be9ed5 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/LogType.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/LogType.java @@ -11,7 +11,9 @@ public enum LogType { SERVER("%s.log"), SERVER_JSON("%s_server.json"), - AUDIT("%s_audit.json"); + AUDIT("%s_audit.json"), + SEARCH_SLOW("%s_index_search_slowlog.json"), + INDEXING_SLOW("%s_index_indexing_slowlog.json"); private final String filenameFormat; diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index 4be9481df58b1..804f4eae4042d 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -28,6 +28,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.UpdateForV9; +import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.test.ClasspathUtils; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.TestFeatureService; @@ -35,8 +37,10 @@ import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestSpec; import org.elasticsearch.test.rest.yaml.section.ClientYamlTestSection; import org.elasticsearch.test.rest.yaml.section.ClientYamlTestSuite; +import org.elasticsearch.test.rest.yaml.section.DoSection; import org.elasticsearch.test.rest.yaml.section.ExecutableSection; import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.junit.AfterClass; @@ -61,6 +65,7 @@ import java.util.SortedSet; import java.util.TreeSet; import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; @@ -190,6 +195,11 @@ public void initAndResetContext() throws Exception { restTestExecutionContext.clear(); } + @Override + protected List createAdditionalFeatureSpecifications() { + return List.of(new YamlTestLegacyFeatures()); + } + /** * Create the test execution context. Can be overwritten in sub-implementations of the test if the context needs to be modified. */ @@ -230,6 +240,28 @@ public static void closeClient() throws IOException { } } + /** + * Create parameters for this parameterized test. + * Enables support for parsing the legacy version-based node_selector format. + */ + @Deprecated + @UpdateForV9 + public static Iterable createParametersWithLegacyNodeSelectorSupport() throws Exception { + var executableSectionRegistry = new NamedXContentRegistry( + Stream.concat( + ExecutableSection.DEFAULT_EXECUTABLE_CONTEXTS.stream().filter(entry -> entry.name.getPreferredName().equals("do") == false), + Stream.of( + new NamedXContentRegistry.Entry( + ExecutableSection.class, + new ParseField("do"), + DoSection::parseWithLegacyNodeSelectorSupport + ) + ) + ).toList() + ); + return createParameters(executableSectionRegistry, null); + } + /** * Create parameters for this parameterized test. Uses the * {@link ExecutableSection#XCONTENT_REGISTRY list} of executable sections diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/YamlTestLegacyFeatures.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/YamlTestLegacyFeatures.java new file mode 100644 index 0000000000000..0c27cea49f955 --- /dev/null +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/YamlTestLegacyFeatures.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test.rest.yaml; + +import org.elasticsearch.Version; +import org.elasticsearch.features.FeatureSpecification; +import org.elasticsearch.features.NodeFeature; + +import java.util.Map; + +/** + * This class groups historical features that have been removed from the production codebase, but are still used by YAML test + * to support BwC. Rather than leaving them in the main src we group them here, so it's clear they are not used in production code anymore. + */ +public class YamlTestLegacyFeatures implements FeatureSpecification { + + private static final NodeFeature CAT_ALIASES_SHOW_WRITE_INDEX = new NodeFeature("cat_aliases_show_write_index"); + + @Override + public Map getHistoricalFeatures() { + return Map.ofEntries(Map.entry(CAT_ALIASES_SHOW_WRITE_INDEX, Version.V_7_4_0)); + } +} diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java index 65a23bd376212..e5f46ff135171 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java @@ -177,7 +177,7 @@ private static Stream validateExecutableSections( .filter(section -> false == section.getExpectedWarningHeaders().isEmpty()) .filter(section -> false == hasYamlRunnerFeature("warnings", testSection, setupSection, teardownSection)) .map(section -> String.format(Locale.ROOT, """ - attempted to add a [do] with a [warnings] section without a corresponding ["skip": "features": "warnings"] \ + attempted to add a [do] with a [warnings] section without a corresponding ["requires": "test_runner_features": "warnings"] \ so runners that do not support the [warnings] section can skip the test at line [%d]\ """, section.getLocation().lineNumber())); @@ -190,7 +190,7 @@ private static Stream validateExecutableSections( .filter(section -> false == hasYamlRunnerFeature("warnings_regex", testSection, setupSection, teardownSection)) .map(section -> String.format(Locale.ROOT, """ attempted to add a [do] with a [warnings_regex] section without a corresponding \ - ["skip": "features": "warnings_regex"] so runners that do not support the [warnings_regex] \ + ["requires": "test_runner_features": "warnings_regex"] so runners that do not support the [warnings_regex] \ section can skip the test at line [%d]\ """, section.getLocation().lineNumber())) ); @@ -204,7 +204,7 @@ private static Stream validateExecutableSections( .filter(section -> false == hasYamlRunnerFeature("allowed_warnings", testSection, setupSection, teardownSection)) .map(section -> String.format(Locale.ROOT, """ attempted to add a [do] with a [allowed_warnings] section without a corresponding \ - ["skip": "features": "allowed_warnings"] so runners that do not support the [allowed_warnings] \ + ["requires": "test_runner_features": "allowed_warnings"] so runners that do not support the [allowed_warnings] \ section can skip the test at line [%d]\ """, section.getLocation().lineNumber())) ); @@ -218,8 +218,8 @@ private static Stream validateExecutableSections( .filter(section -> false == hasYamlRunnerFeature("allowed_warnings_regex", testSection, setupSection, teardownSection)) .map(section -> String.format(Locale.ROOT, """ attempted to add a [do] with a [allowed_warnings_regex] section without a corresponding \ - ["skip": "features": "allowed_warnings_regex"] so runners that do not support the [allowed_warnings_regex] \ - section can skip the test at line [%d]\ + ["requires": "test_runner_features": "allowed_warnings_regex"] so runners that do not support the \ + [allowed_warnings_regex] section can skip the test at line [%d]\ """, section.getLocation().lineNumber())) ); @@ -232,7 +232,7 @@ private static Stream validateExecutableSections( .filter(section -> false == hasYamlRunnerFeature("node_selector", testSection, setupSection, teardownSection)) .map(section -> String.format(Locale.ROOT, """ attempted to add a [do] with a [node_selector] section without a corresponding \ - ["skip": "features": "node_selector"] so runners that do not support the [node_selector] section \ + ["requires": "test_runner_features": "node_selector"] so runners that do not support the [node_selector] section \ can skip the test at line [%d]\ """, section.getLocation().lineNumber())) ); @@ -243,7 +243,7 @@ private static Stream validateExecutableSections( .filter(section -> section instanceof ContainsAssertion) .filter(section -> false == hasYamlRunnerFeature("contains", testSection, setupSection, teardownSection)) .map(section -> String.format(Locale.ROOT, """ - attempted to add a [contains] assertion without a corresponding ["skip": "features": "contains"] \ + attempted to add a [contains] assertion without a corresponding ["requires": "test_runner_features": "contains"] \ so runners that do not support the [contains] assertion can skip the test at line [%d]\ """, section.getLocation().lineNumber())) ); @@ -256,8 +256,9 @@ private static Stream validateExecutableSections( .filter(section -> false == section.getApiCallSection().getHeaders().isEmpty()) .filter(section -> false == hasYamlRunnerFeature("headers", testSection, setupSection, teardownSection)) .map(section -> String.format(Locale.ROOT, """ - attempted to add a [do] with a [headers] section without a corresponding ["skip": "features": "headers"] \ - so runners that do not support the [headers] section can skip the test at line [%d]\ + attempted to add a [do] with a [headers] section without a corresponding \ + ["requires": "test_runner_features": "headers"] so runners that do not support the [headers] section \ + can skip the test at line [%d]\ """, section.getLocation().lineNumber())) ); @@ -267,7 +268,7 @@ private static Stream validateExecutableSections( .filter(section -> section instanceof CloseToAssertion) .filter(section -> false == hasYamlRunnerFeature("close_to", testSection, setupSection, teardownSection)) .map(section -> String.format(Locale.ROOT, """ - attempted to add a [close_to] assertion without a corresponding ["skip": "features": "close_to"] \ + attempted to add a [close_to] assertion without a corresponding ["requires": "test_runner_features": "close_to"] \ so runners that do not support the [close_to] assertion can skip the test at line [%d]\ """, section.getLocation().lineNumber())) ); @@ -278,7 +279,7 @@ private static Stream validateExecutableSections( .filter(section -> section instanceof IsAfterAssertion) .filter(section -> false == hasYamlRunnerFeature("is_after", testSection, setupSection, teardownSection)) .map(section -> String.format(Locale.ROOT, """ - attempted to add an [is_after] assertion without a corresponding ["skip": "features": "is_after"] \ + attempted to add an [is_after] assertion without a corresponding ["requires": "test_runner_features": "is_after"] \ so runners that do not support the [is_after] assertion can skip the test at line [%d]\ """, section.getLocation().lineNumber())) ); diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java index 00b92eac40d7f..e850ade2bdf1d 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.Build; import org.elasticsearch.Version; import org.elasticsearch.client.HasAttributeNodeSelector; import org.elasticsearch.client.Node; @@ -50,7 +51,7 @@ import static java.util.Collections.unmodifiableList; import static java.util.stream.Collectors.toCollection; import static org.elasticsearch.core.Tuple.tuple; -import static org.elasticsearch.test.hamcrest.RegexMatcher.matches; +import static org.elasticsearch.test.rest.yaml.section.RegexMatcher.matches; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -86,6 +87,16 @@ */ public class DoSection implements ExecutableSection { public static DoSection parse(XContentParser parser) throws IOException { + return parse(parser, false); + } + + @UpdateForV9 + @Deprecated + public static DoSection parseWithLegacyNodeSelectorSupport(XContentParser parser) throws IOException { + return parse(parser, true); + } + + private static DoSection parse(XContentParser parser, boolean enableLegacyNodeSelectorSupport) throws IOException { String currentFieldName = null; XContentParser.Token token; @@ -175,7 +186,7 @@ public static DoSection parse(XContentParser parser) throws IOException { if (token == XContentParser.Token.FIELD_NAME) { selectorName = parser.currentName(); } else { - NodeSelector newSelector = buildNodeSelector(selectorName, parser); + NodeSelector newSelector = buildNodeSelector(selectorName, parser, enableLegacyNodeSelectorSupport); nodeSelector = nodeSelector == NodeSelector.ANY ? newSelector : new ComposeNodeSelector(nodeSelector, newSelector); @@ -610,10 +621,11 @@ private String formatStatusCodeMessage(ClientYamlTestResponse restTestResponse, ) ); - private static NodeSelector buildNodeSelector(String name, XContentParser parser) throws IOException { + private static NodeSelector buildNodeSelector(String name, XContentParser parser, boolean enableLegacyVersionSupport) + throws IOException { return switch (name) { case "attribute" -> parseAttributeValuesSelector(parser); - case "version" -> parseVersionSelector(parser); + case "version" -> parseVersionSelector(parser, enableLegacyVersionSupport); default -> throw new XContentParseException(parser.getTokenLocation(), "unknown node_selector [" + name + "]"); }; } @@ -678,14 +690,31 @@ private static boolean matchWithRange( } } - private static NodeSelector parseVersionSelector(XContentParser parser) throws IOException { + private static NodeSelector parseVersionSelector(XContentParser parser, boolean enableLegacyVersionSupport) throws IOException { if (false == parser.currentToken().isValue()) { throw new XContentParseException(parser.getTokenLocation(), "expected [version] to be a value"); } - var acceptedVersionRange = VersionRange.parseVersionRanges(parser.text()); - final Predicate nodeMatcher = nodeVersion -> matchWithRange(nodeVersion, acceptedVersionRange, parser.getTokenLocation()); - final String versionSelectorString = "version ranges " + acceptedVersionRange; + final Predicate nodeMatcher; + final String versionSelectorString; + if (parser.text().equals("current")) { + nodeMatcher = nodeVersion -> Build.current().version().equals(nodeVersion); + versionSelectorString = "version is " + Build.current().version() + " (current)"; + } else if (parser.text().equals("original")) { + nodeMatcher = nodeVersion -> Build.current().version().equals(nodeVersion) == false; + versionSelectorString = "version is not current (original)"; + } else { + if (enableLegacyVersionSupport) { + var acceptedVersionRange = VersionRange.parseVersionRanges(parser.text()); + nodeMatcher = nodeVersion -> matchWithRange(nodeVersion, acceptedVersionRange, parser.getTokenLocation()); + versionSelectorString = "version ranges " + acceptedVersionRange; + } else { + throw new XContentParseException( + parser.getTokenLocation(), + "unknown version selector [" + parser.text() + "]. Only [current] and [original] are allowed." + ); + } + } return new NodeSelector() { @Override diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/MatchAssertion.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/MatchAssertion.java index 4ecf86081574e..34fa178a1853f 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/MatchAssertion.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/MatchAssertion.java @@ -21,7 +21,7 @@ import static org.elasticsearch.test.ListMatcher.matchesList; import static org.elasticsearch.test.MapMatcher.assertMap; import static org.elasticsearch.test.MapMatcher.matchesMap; -import static org.elasticsearch.test.hamcrest.RegexMatcher.matches; +import static org.elasticsearch.test.rest.yaml.section.RegexMatcher.matches; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.junit.Assert.assertNotNull; diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSection.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSection.java index 7f65a29e510b6..f4c9aaa619911 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSection.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSection.java @@ -9,6 +9,7 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext; import org.elasticsearch.test.rest.yaml.Features; import org.elasticsearch.xcontent.XContentLocation; @@ -17,7 +18,9 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.HashSet; import java.util.List; +import java.util.Set; import java.util.function.Predicate; /** @@ -34,9 +37,13 @@ public class PrerequisiteSection { static class PrerequisiteSectionBuilder { String skipVersionRange = null; String skipReason = null; + String requiresReason = null; List requiredYamlRunnerFeatures = new ArrayList<>(); List skipOperatingSystems = new ArrayList<>(); + Set skipClusterFeatures = new HashSet<>(); + Set requiredClusterFeatures = new HashSet<>(); + enum XPackRequired { NOT_SPECIFIED, YES, @@ -56,6 +63,11 @@ public PrerequisiteSectionBuilder setSkipReason(String skipReason) { return this; } + public PrerequisiteSectionBuilder setRequiresReason(String requiresReason) { + this.requiresReason = requiresReason; + return this; + } + public PrerequisiteSectionBuilder requireYamlRunnerFeature(String featureName) { requiredYamlRunnerFeatures.add(featureName); return this; @@ -79,6 +91,16 @@ public PrerequisiteSectionBuilder skipIfXPack() { return this; } + public PrerequisiteSectionBuilder skipIfClusterFeature(String featureName) { + skipClusterFeatures.add(featureName); + return this; + } + + public PrerequisiteSectionBuilder requireClusterFeature(String featureName) { + requiredClusterFeatures.add(featureName); + return this; + } + public PrerequisiteSectionBuilder skipIfOs(String osName) { this.skipOperatingSystems.add(osName); return this; @@ -88,7 +110,9 @@ void validate(XContentLocation contentLocation) { if ((Strings.hasLength(skipVersionRange) == false) && requiredYamlRunnerFeatures.isEmpty() && skipOperatingSystems.isEmpty() - && xpackRequired == XPackRequired.NOT_SPECIFIED) { + && xpackRequired == XPackRequired.NOT_SPECIFIED + && requiredClusterFeatures.isEmpty() + && skipClusterFeatures.isEmpty()) { throw new ParsingException( contentLocation, "at least one criteria (version, cluster features, runner features, os) is mandatory within a skip section" @@ -100,6 +124,12 @@ void validate(XContentLocation contentLocation) { if (skipOperatingSystems.isEmpty() == false && Strings.hasLength(skipReason) == false) { throw new ParsingException(contentLocation, "reason is mandatory within skip os section"); } + if (skipClusterFeatures.isEmpty() == false && Strings.hasLength(skipReason) == false) { + throw new ParsingException(contentLocation, "reason is mandatory within skip cluster_features section"); + } + if (requiredClusterFeatures.isEmpty() == false && Strings.hasLength(requiresReason) == false) { + throw new ParsingException(contentLocation, "reason is mandatory within requires cluster_features section"); + } // make feature "skip_os" mandatory if os is given, this is a temporary solution until language client tests know about os if (skipOperatingSystems.isEmpty() == false && requiredYamlRunnerFeatures.contains("skip_os") == false) { throw new ParsingException(contentLocation, "if os is specified, test runner feature [skip_os] must be set"); @@ -107,6 +137,9 @@ void validate(XContentLocation contentLocation) { if (xpackRequired == XPackRequired.MISMATCHED) { throw new ParsingException(contentLocation, "either [xpack] or [no_xpack] can be present, not both"); } + if (Sets.haveNonEmptyIntersection(skipClusterFeatures, requiredClusterFeatures)) { + throw new ParsingException(contentLocation, "a cluster feature can be specified either in [requires] or [skip], not both"); + } } public PrerequisiteSection build() { @@ -131,8 +164,14 @@ public PrerequisiteSection build() { if (skipOperatingSystems.isEmpty() == false) { skipCriteriaList.add(Prerequisites.skipOnOsList(skipOperatingSystems)); } + if (requiredClusterFeatures.isEmpty() == false) { + requiresCriteriaList.add(Prerequisites.requireClusterFeatures(requiredClusterFeatures)); + } + if (skipClusterFeatures.isEmpty() == false) { + skipCriteriaList.add(Prerequisites.skipOnClusterFeatures(skipClusterFeatures)); + } } - return new PrerequisiteSection(skipCriteriaList, skipReason, requiresCriteriaList, null, requiredYamlRunnerFeatures); + return new PrerequisiteSection(skipCriteriaList, skipReason, requiresCriteriaList, requiresReason, requiredYamlRunnerFeatures); } } @@ -160,6 +199,10 @@ static PrerequisiteSectionBuilder parseInternal(XContentParser parser) throws IO parseSkipSection(parser, builder); hasPrerequisiteSection = true; maybeAdvanceToNextField(parser); + } else if ("requires".equals(parser.currentName())) { + parseRequiresSection(parser, builder); + hasPrerequisiteSection = true; + maybeAdvanceToNextField(parser); } else { unknownFieldName = true; } @@ -209,6 +252,8 @@ static void parseSkipSection(XContentParser parser, PrerequisiteSectionBuilder b parseFeatureField(parser.text(), builder); } else if ("os".equals(currentFieldName)) { builder.skipIfOs(parser.text()); + } else if ("cluster_features".equals(currentFieldName)) { + builder.skipIfClusterFeature(parser.text()); } else { throw new ParsingException( parser.getTokenLocation(), @@ -224,6 +269,54 @@ static void parseSkipSection(XContentParser parser, PrerequisiteSectionBuilder b while (parser.nextToken() != XContentParser.Token.END_ARRAY) { builder.skipIfOs(parser.text()); } + } else if ("cluster_features".equals(currentFieldName)) { + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + builder.skipIfClusterFeature(parser.text()); + } + } + } + } + parser.nextToken(); + } + + static void parseRequiresSection(XContentParser parser, PrerequisiteSectionBuilder builder) throws IOException { + if (parser.nextToken() != XContentParser.Token.START_OBJECT) { + throw new IllegalArgumentException( + "Expected [" + + XContentParser.Token.START_OBJECT + + ", found [" + + parser.currentToken() + + "], the requires section is not properly indented" + ); + } + String currentFieldName = null; + XContentParser.Token token; + + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if ("reason".equals(currentFieldName)) { + builder.setRequiresReason(parser.text()); + } else if ("test_runner_features".equals(currentFieldName)) { + parseFeatureField(parser.text(), builder); + } else if ("cluster_features".equals(currentFieldName)) { + builder.requireClusterFeature(parser.text()); + } else { + throw new ParsingException( + parser.getTokenLocation(), + "field " + currentFieldName + " not supported within requires section" + ); + } + } else if (token == XContentParser.Token.START_ARRAY) { + if ("test_runner_features".equals(currentFieldName)) { + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + parseFeatureField(parser.text(), builder); + } + } else if ("cluster_features".equals(currentFieldName)) { + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + builder.requireClusterFeature(parser.text()); + } } } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/RegexMatcher.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/RegexMatcher.java similarity index 86% rename from test/framework/src/main/java/org/elasticsearch/test/hamcrest/RegexMatcher.java rename to test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/RegexMatcher.java index 295f817b96afa..b7b1946a82b9b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/RegexMatcher.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/RegexMatcher.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.test.hamcrest; +package org.elasticsearch.test.rest.yaml.section; import org.hamcrest.Description; import org.hamcrest.TypeSafeMatcher; @@ -16,17 +16,17 @@ /** * Matcher that supports regular expression and allows to provide optional flags */ -public class RegexMatcher extends TypeSafeMatcher { +class RegexMatcher extends TypeSafeMatcher { private final String regex; private final Pattern pattern; - public RegexMatcher(String regex) { + RegexMatcher(String regex) { this.regex = regex; this.pattern = Pattern.compile(regex); } - public RegexMatcher(String regex, int flag) { + RegexMatcher(String regex, int flag) { this.regex = regex; this.pattern = Pattern.compile(regex, flag); } diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/VersionRange.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/VersionRange.java index df4eba050dc27..ab5377532bbbc 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/VersionRange.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/VersionRange.java @@ -9,6 +9,7 @@ import org.elasticsearch.Build; import org.elasticsearch.Version; +import org.elasticsearch.core.Predicates; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.rest.ESRestTestCase; @@ -23,9 +24,9 @@ class VersionRange { private VersionRange() {} - static final Predicate> NEVER = v -> false; + static final Predicate> NEVER = Predicates.never(); - static final Predicate> ALWAYS = v -> true; + static final Predicate> ALWAYS = Predicates.always(); static final Predicate> CURRENT = versions -> versions.size() == 1 && versions.contains(Build.current().version()); @@ -57,6 +58,11 @@ public boolean test(Set nodesVersions) { .orElseThrow(() -> new IllegalArgumentException("Checks against a version range require semantic version format (x.y.z)")); return minimumNodeVersion.onOrAfter(lower) && minimumNodeVersion.onOrBefore(upper); } + + @Override + public String toString() { + return "MinimumContainedInVersionRange{lower=" + lower + ", upper=" + upper + '}'; + } } static List>> parseVersionRanges(String rawRanges) { diff --git a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java index edc043e15527d..1f5bdc71dde37 100644 --- a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java +++ b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java @@ -468,6 +468,41 @@ public void testParseSkipOs() throws Exception { assertThat(restTestSuite.getTestSections().get(0).getPrerequisiteSection().hasYamlRunnerFeature("skip_os"), equalTo(true)); } + public void testParseSkipAndRequireClusterFeatures() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + "Broken on some os": + + - skip: + cluster_features: [unsupported-feature1, unsupported-feature2] + reason: "unsupported-features are not supported" + - requires: + cluster_features: required-feature1 + reason: "required-feature1 is required" + - do: + indices.get_mapping: + index: test_index + type: test_type + + - match: {test_type.properties.text.type: string} + - match: {test_type.properties.text.analyzer: whitespace} + """); + + ClientYamlTestSuite restTestSuite = ClientYamlTestSuite.parse(getTestClass().getName(), getTestName(), Optional.empty(), parser); + + assertThat(restTestSuite, notNullValue()); + assertThat(restTestSuite.getName(), equalTo(getTestName())); + assertThat(restTestSuite.getFile().isPresent(), equalTo(false)); + assertThat(restTestSuite.getTestSections().size(), equalTo(1)); + + assertThat(restTestSuite.getTestSections().get(0).getName(), equalTo("Broken on some os")); + assertThat(restTestSuite.getTestSections().get(0).getPrerequisiteSection().isEmpty(), equalTo(false)); + assertThat( + restTestSuite.getTestSections().get(0).getPrerequisiteSection().skipReason, + equalTo("unsupported-features are not supported") + ); + assertThat(restTestSuite.getTestSections().get(0).getPrerequisiteSection().requireReason, equalTo("required-feature1 is required")); + } + public void testParseFileWithSingleTestSection() throws Exception { final Path filePath = createTempFile("tyf", ".yml"); Files.writeString(filePath, """ @@ -541,7 +576,7 @@ public void testAddingDoWithWarningWithoutSkipWarnings() { Exception e = expectThrows(IllegalArgumentException.class, testSuite::validate); assertThat(e.getMessage(), containsString(Strings.format(""" api/name: - attempted to add a [do] with a [warnings] section without a corresponding ["skip": "features": "warnings"] \ + attempted to add a [do] with a [warnings] section without a corresponding ["requires": "test_runner_features": "warnings"] \ so runners that do not support the [warnings] section can skip the test at line [%d]\ """, lineNumber))); } @@ -555,7 +590,8 @@ public void testAddingDoWithWarningRegexWithoutSkipWarnings() { Exception e = expectThrows(IllegalArgumentException.class, testSuite::validate); assertThat(e.getMessage(), containsString(Strings.format(""" api/name: - attempted to add a [do] with a [warnings_regex] section without a corresponding ["skip": "features": "warnings_regex"] \ + attempted to add a [do] with a [warnings_regex] section without a corresponding \ + ["requires": "test_runner_features": "warnings_regex"] \ so runners that do not support the [warnings_regex] section can skip the test at line [%d]\ """, lineNumber))); } @@ -569,7 +605,7 @@ public void testAddingDoWithAllowedWarningWithoutSkipAllowedWarnings() { Exception e = expectThrows(IllegalArgumentException.class, testSuite::validate); assertThat(e.getMessage(), containsString(Strings.format(""" api/name: - attempted to add a [do] with a [allowed_warnings] section without a corresponding ["skip": "features": \ + attempted to add a [do] with a [allowed_warnings] section without a corresponding ["requires": "test_runner_features": \ "allowed_warnings"] so runners that do not support the [allowed_warnings] section can skip the test at \ line [%d]\ """, lineNumber))); @@ -584,7 +620,7 @@ public void testAddingDoWithAllowedWarningRegexWithoutSkipAllowedWarnings() { Exception e = expectThrows(IllegalArgumentException.class, testSuite::validate); assertThat(e.getMessage(), containsString(Strings.format(""" api/name: - attempted to add a [do] with a [allowed_warnings_regex] section without a corresponding ["skip": "features": \ + attempted to add a [do] with a [allowed_warnings_regex] section without a corresponding ["requires": "test_runner_features": \ "allowed_warnings_regex"] so runners that do not support the [allowed_warnings_regex] section can skip the test \ at line [%d]\ """, lineNumber))); @@ -600,7 +636,7 @@ public void testAddingDoWithHeaderWithoutSkipHeaders() { Exception e = expectThrows(IllegalArgumentException.class, testSuite::validate); assertThat(e.getMessage(), containsString(Strings.format(""" api/name: - attempted to add a [do] with a [headers] section without a corresponding ["skip": "features": "headers"] \ + attempted to add a [do] with a [headers] section without a corresponding ["requires": "test_runner_features": "headers"] \ so runners that do not support the [headers] section can skip the test at line [%d]\ """, lineNumber))); } @@ -615,7 +651,8 @@ public void testAddingDoWithNodeSelectorWithoutSkipNodeSelector() { Exception e = expectThrows(IllegalArgumentException.class, testSuite::validate); assertThat(e.getMessage(), containsString(Strings.format(""" api/name: - attempted to add a [do] with a [node_selector] section without a corresponding ["skip": "features": "node_selector"] \ + attempted to add a [do] with a [node_selector] section without a corresponding \ + ["requires": "test_runner_features": "node_selector"] \ so runners that do not support the [node_selector] section can skip the test at line [%d]\ """, lineNumber))); } @@ -631,7 +668,7 @@ public void testAddingContainsWithoutSkipContains() { Exception e = expectThrows(IllegalArgumentException.class, testSuite::validate); assertThat(e.getMessage(), containsString(Strings.format(""" api/name: - attempted to add a [contains] assertion without a corresponding ["skip": "features": "contains"] \ + attempted to add a [contains] assertion without a corresponding ["requires": "test_runner_features": "contains"] \ so runners that do not support the [contains] assertion can skip the test at line [%d]\ """, lineNumber))); } @@ -683,13 +720,15 @@ public void testMultipleValidationErrors() { Exception e = expectThrows(IllegalArgumentException.class, testSuite::validate); assertEquals(Strings.format(""" api/name: - attempted to add a [contains] assertion without a corresponding ["skip": "features": "contains"] so runners that \ - do not support the [contains] assertion can skip the test at line [%d], - attempted to add a [do] with a [warnings] section without a corresponding ["skip": "features": "warnings"] so runners \ - that do not support the [warnings] section can skip the test at line [%d], - attempted to add a [do] with a [node_selector] section without a corresponding ["skip": "features": "node_selector"] so \ - runners that do not support the [node_selector] section can skip the test \ - at line [%d]\ + attempted to add a [contains] assertion without a corresponding \ + ["requires": "test_runner_features": "contains"] \ + so runners that do not support the [contains] assertion can skip the test at line [%d], + attempted to add a [do] with a [warnings] section without a corresponding \ + ["requires": "test_runner_features": "warnings"] \ + so runners that do not support the [warnings] section can skip the test at line [%d], + attempted to add a [do] with a [node_selector] section without a corresponding \ + ["requires": "test_runner_features": "node_selector"] \ + so runners that do not support the [node_selector] section can skip the test at line [%d]\ """, firstLineNumber, secondLineNumber, thirdLineNumber), e.getMessage()); } diff --git a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java index 0cb9a3e29e63f..7d9557d29e568 100644 --- a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java +++ b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java @@ -10,12 +10,12 @@ import org.apache.http.HttpHost; import org.elasticsearch.Build; -import org.elasticsearch.Version; import org.elasticsearch.client.Node; import org.elasticsearch.client.NodeSelector; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.logging.HeaderWarning; import org.elasticsearch.core.Strings; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext; import org.elasticsearch.test.rest.yaml.ClientYamlTestResponse; import org.elasticsearch.xcontent.XContentLocation; @@ -579,14 +579,15 @@ public void testParseDoSectionAllowedWarnings() throws Exception { assertThat(e.getMessage(), equalTo("the warning [foo] was both allowed and expected")); } - public void testNodeSelectorByVersionRange() throws IOException { + @UpdateForV9 // remove + public void testLegacyNodeSelectorByVersionRange() throws IOException { parser = createParser(YamlXContent.yamlXContent, """ node_selector: version: 5.2.0-6.0.0 indices.get_field_mapping: index: test_index"""); - DoSection doSection = DoSection.parse(parser); + DoSection doSection = DoSection.parseWithLegacyNodeSelectorSupport(parser); assertNotSame(NodeSelector.ANY, doSection.getApiCallSection().getNodeSelector()); Node v170 = nodeWithVersion("1.7.0"); Node v521 = nodeWithVersion("5.2.1"); @@ -629,26 +630,21 @@ public void testNodeSelectorByVersionRange() throws IOException { } } - public void testNodeSelectorByVersionRangeFailsWithNonSemanticVersion() throws IOException { + public void testNodeSelectorByVersionRangeFails() throws IOException { parser = createParser(YamlXContent.yamlXContent, """ node_selector: version: 5.2.0-6.0.0 indices.get_field_mapping: index: test_index"""); - DoSection doSection = DoSection.parse(parser); - assertNotSame(NodeSelector.ANY, doSection.getApiCallSection().getNodeSelector()); - Node nonSemantic = nodeWithVersion("abddef"); - List nodes = new ArrayList<>(); + var exception = expectThrows(XContentParseException.class, () -> DoSection.parse(parser)); + assertThat(exception.getMessage(), endsWith("unknown version selector [5.2.0-6.0.0]. Only [current] and [original] are allowed.")); - var exception = expectThrows( - XContentParseException.class, - () -> doSection.getApiCallSection().getNodeSelector().select(List.of(nonSemantic)) - ); - assertThat( - exception.getMessage(), - endsWith("[version] range node selector expects a semantic version format (x.y.z), but found abddef") - ); + // We are throwing an early exception - this means the parser content is not fully consumed. This is OK as it would make + // the tests fail pointing to the correct syntax error location, preventing any further use of parser. + // Explicitly close the parser to avoid AbstractClientYamlTestFragmentParserTestCase checks. + parser.close(); + parser = null; } public void testNodeSelectorCurrentVersion() throws IOException { @@ -663,16 +659,36 @@ public void testNodeSelectorCurrentVersion() throws IOException { Node v170 = nodeWithVersion("1.7.0"); Node v521 = nodeWithVersion("5.2.1"); Node v550 = nodeWithVersion("5.5.0"); - Node oldCurrent = nodeWithVersion(Version.CURRENT.toString()); - Node newCurrent = nodeWithVersion(Build.current().version()); + Node current = nodeWithVersion(Build.current().version()); + List nodes = new ArrayList<>(); + nodes.add(v170); + nodes.add(v521); + nodes.add(v550); + nodes.add(current); + doSection.getApiCallSection().getNodeSelector().select(nodes); + assertEquals(List.of(current), nodes); + } + + public void testNodeSelectorNonCurrentVersion() throws IOException { + parser = createParser(YamlXContent.yamlXContent, """ + node_selector: + version: original + indices.get_field_mapping: + index: test_index"""); + + DoSection doSection = DoSection.parse(parser); + assertNotSame(NodeSelector.ANY, doSection.getApiCallSection().getNodeSelector()); + Node v170 = nodeWithVersion("1.7.0"); + Node v521 = nodeWithVersion("5.2.1"); + Node v550 = nodeWithVersion("5.5.0"); + Node current = nodeWithVersion(Build.current().version()); List nodes = new ArrayList<>(); nodes.add(v170); nodes.add(v521); nodes.add(v550); - nodes.add(oldCurrent); - nodes.add(newCurrent); + nodes.add(current); doSection.getApiCallSection().getNodeSelector().select(nodes); - assertEquals(List.of(oldCurrent, newCurrent), nodes); + assertEquals(List.of(v170, v521, v550), nodes); } private static Node nodeWithVersion(String version) { @@ -741,7 +757,7 @@ private static Node nodeWithAttributes(Map> attributes) { public void testNodeSelectorByTwoThings() throws IOException { parser = createParser(YamlXContent.yamlXContent, """ node_selector: - version: 5.2.0-6.0.0 + version: current attribute: attr: val indices.get_field_mapping: @@ -749,9 +765,9 @@ public void testNodeSelectorByTwoThings() throws IOException { DoSection doSection = DoSection.parse(parser); assertNotSame(NodeSelector.ANY, doSection.getApiCallSection().getNodeSelector()); - Node both = nodeWithVersionAndAttributes("5.2.1", singletonMap("attr", singletonList("val"))); + Node both = nodeWithVersionAndAttributes(Build.current().version(), singletonMap("attr", singletonList("val"))); Node badVersion = nodeWithVersionAndAttributes("5.1.1", singletonMap("attr", singletonList("val"))); - Node badAttr = nodeWithVersionAndAttributes("5.2.1", singletonMap("notattr", singletonList("val"))); + Node badAttr = nodeWithVersionAndAttributes(Build.current().version(), singletonMap("notattr", singletonList("val"))); List nodes = new ArrayList<>(); nodes.add(both); nodes.add(badVersion); diff --git a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSectionTests.java b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSectionTests.java index b02658694d82f..181ec34fefb7e 100644 --- a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSectionTests.java +++ b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSectionTests.java @@ -363,8 +363,10 @@ public void testParseSkipSectionOsListNoVersion() throws Exception { public void testParseSkipSectionOsListTestFeaturesInRequires() throws Exception { parser = createParser(YamlXContent.yamlXContent, """ + - requires: + test_runner_features: skip_os + reason: skip_os is needed for skip based on os - skip: - features: [skip_os] os: [debian-9,windows-95,ms-dos] reason: see gh#xyz """); @@ -391,6 +393,95 @@ public void testParseSkipSectionOsNoFeatureNoVersion() throws Exception { assertThat(e.getMessage(), is("if os is specified, test runner feature [skip_os] must be set")); } + public void testParseRequireSectionClusterFeatures() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + cluster_features: needed-feature + reason: test skipped when cluster lacks needed-feature + """); + + var skipSectionBuilder = new PrerequisiteSection.PrerequisiteSectionBuilder(); + PrerequisiteSection.parseRequiresSection(parser, skipSectionBuilder); + assertThat(skipSectionBuilder, notNullValue()); + assertThat(skipSectionBuilder.skipVersionRange, emptyOrNullString()); + assertThat(skipSectionBuilder.requiredClusterFeatures, contains("needed-feature")); + assertThat(skipSectionBuilder.requiresReason, is("test skipped when cluster lacks needed-feature")); + } + + public void testParseSkipSectionClusterFeatures() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + cluster_features: undesired-feature + reason: test skipped when undesired-feature is present + """); + + var skipSectionBuilder = new PrerequisiteSection.PrerequisiteSectionBuilder(); + PrerequisiteSection.parseSkipSection(parser, skipSectionBuilder); + assertThat(skipSectionBuilder, notNullValue()); + assertThat(skipSectionBuilder.skipVersionRange, emptyOrNullString()); + assertThat(skipSectionBuilder.skipClusterFeatures, contains("undesired-feature")); + assertThat(skipSectionBuilder.skipReason, is("test skipped when undesired-feature is present")); + } + + public void testParseRequireAndSkipSectionsClusterFeatures() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + - requires: + cluster_features: needed-feature + reason: test needs needed-feature to run + - skip: + cluster_features: undesired-feature + reason: test cannot run when undesired-feature are present + """); + + var skipSectionBuilder = PrerequisiteSection.parseInternal(parser); + assertThat(skipSectionBuilder, notNullValue()); + assertThat(skipSectionBuilder.skipVersionRange, emptyOrNullString()); + assertThat(skipSectionBuilder.skipClusterFeatures, contains("undesired-feature")); + assertThat(skipSectionBuilder.requiredClusterFeatures, contains("needed-feature")); + assertThat(skipSectionBuilder.skipReason, is("test cannot run when undesired-feature are present")); + assertThat(skipSectionBuilder.requiresReason, is("test needs needed-feature to run")); + + assertThat(parser.currentToken(), equalTo(XContentParser.Token.END_ARRAY)); + assertThat(parser.nextToken(), nullValue()); + } + + public void testParseRequireAndSkipSectionMultipleClusterFeatures() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + - requires: + cluster_features: [needed-feature-1, needed-feature-2] + reason: test needs some to run + - skip: + cluster_features: [undesired-feature-1, undesired-feature-2] + reason: test cannot run when some are present + """); + + var skipSectionBuilder = PrerequisiteSection.parseInternal(parser); + assertThat(skipSectionBuilder, notNullValue()); + assertThat(skipSectionBuilder.skipVersionRange, emptyOrNullString()); + assertThat(skipSectionBuilder.skipClusterFeatures, containsInAnyOrder("undesired-feature-1", "undesired-feature-2")); + assertThat(skipSectionBuilder.requiredClusterFeatures, containsInAnyOrder("needed-feature-1", "needed-feature-2")); + assertThat(skipSectionBuilder.skipReason, is("test cannot run when some are present")); + assertThat(skipSectionBuilder.requiresReason, is("test needs some to run")); + + assertThat(parser.currentToken(), equalTo(XContentParser.Token.END_ARRAY)); + assertThat(parser.nextToken(), nullValue()); + } + + public void testParseSameRequireAndSkipClusterFeatures() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + - requires: + cluster_features: some-feature + reason: test needs some-feature to run + - skip: + cluster_features: some-feature + reason: test cannot run with some-feature + """); + + var e = expectThrows(ParsingException.class, () -> PrerequisiteSection.parseInternal(parser)); + assertThat(e.getMessage(), is("a cluster feature can be specified either in [requires] or [skip], not both")); + + assertThat(parser.currentToken(), equalTo(XContentParser.Token.END_ARRAY)); + assertThat(parser.nextToken(), nullValue()); + } + public void testSkipClusterFeaturesAllRequiredMatch() { PrerequisiteSection section = new PrerequisiteSection( emptyList(), diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregationBuilder.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregationBuilder.java index ba9dc7ab7eed9..e7e06946a5289 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregationBuilder.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregationBuilder.java @@ -12,7 +12,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregationBuilder; -import org.elasticsearch.search.aggregations.AggregationInitializationException; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -173,9 +172,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { @Override public TopMetricsAggregationBuilder subAggregations(Builder subFactories) { - throw new AggregationInitializationException( - "Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations" - ); + throw new IllegalArgumentException("Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations"); } @Override diff --git a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregationBuilderTests.java b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregationBuilderTests.java index b2986c3f1f170..b8086e038a626 100644 --- a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregationBuilderTests.java +++ b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregationBuilderTests.java @@ -9,7 +9,6 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable.Reader; -import org.elasticsearch.search.aggregations.AggregationInitializationException; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.BaseAggregationBuilder; import org.elasticsearch.search.aggregations.support.MultiValuesSourceFieldConfig; @@ -88,7 +87,7 @@ protected TopMetricsAggregationBuilder mutateInstance(TopMetricsAggregationBuild } public void testValidation() { - AggregationInitializationException e = expectThrows(AggregationInitializationException.class, () -> { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { List> sortBuilders = singletonList( new FieldSortBuilder(randomAlphaOfLength(5)).order(randomFrom(SortOrder.values())) ); diff --git a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java index 3605d6365f867..646ba1465c7c2 100644 --- a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java +++ b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java @@ -566,7 +566,10 @@ public void testClusterDetailsAfterCCSWithFailuresOnOneShardOnly() throws Except assertThat(localClusterSearchInfo.getFailures().size(), equalTo(1)); assertThat(localClusterSearchInfo.getTook().millis(), greaterThan(0L)); ShardSearchFailure localShardSearchFailure = localClusterSearchInfo.getFailures().get(0); - assertTrue("should have 'index corrupted' in reason", localShardSearchFailure.reason().contains("index corrupted")); + assertTrue( + "should have 'index corrupted' in reason but was: " + localShardSearchFailure.reason(), + localShardSearchFailure.reason().contains("index corrupted") + ); SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); assertNotNull(remoteClusterSearchInfo); @@ -578,7 +581,10 @@ public void testClusterDetailsAfterCCSWithFailuresOnOneShardOnly() throws Except assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(1)); assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThan(0L)); ShardSearchFailure remoteShardSearchFailure = remoteClusterSearchInfo.getFailures().get(0); - assertTrue("should have 'index corrupted' in reason", remoteShardSearchFailure.reason().contains("index corrupted")); + assertTrue( + "should have 'index corrupted' in reason but was: " + remoteShardSearchFailure.reason(), + remoteShardSearchFailure.reason().contains("index corrupted") + ); } finally { finishedResponse.decRef(); } @@ -605,7 +611,10 @@ public void testClusterDetailsAfterCCSWithFailuresOnOneShardOnly() throws Except assertThat(localClusterSearchInfo.getFailures().size(), equalTo(1)); assertThat(localClusterSearchInfo.getTook().millis(), greaterThan(0L)); ShardSearchFailure localShardSearchFailure = localClusterSearchInfo.getFailures().get(0); - assertTrue("should have 'index corrupted' in reason", localShardSearchFailure.reason().contains("index corrupted")); + assertTrue( + "should have 'index corrupted' in reason but was: " + localShardSearchFailure.reason(), + localShardSearchFailure.reason().contains("index corrupted") + ); SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); assertNotNull(remoteClusterSearchInfo); @@ -617,7 +626,10 @@ public void testClusterDetailsAfterCCSWithFailuresOnOneShardOnly() throws Except assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(1)); assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThan(0L)); ShardSearchFailure remoteShardSearchFailure = remoteClusterSearchInfo.getFailures().get(0); - assertTrue("should have 'index corrupted' in reason", remoteShardSearchFailure.reason().contains("index corrupted")); + assertTrue( + "should have 'index corrupted' in reason but was: " + remoteShardSearchFailure.reason(), + remoteShardSearchFailure.reason().contains("index corrupted") + ); } } @@ -726,7 +738,10 @@ public void testClusterDetailsAfterCCSWithFailuresOnOneClusterOnly() throws Exce assertNull(remoteClusterSearchInfo.getTook()); assertFalse(remoteClusterSearchInfo.isTimedOut()); ShardSearchFailure remoteShardSearchFailure = remoteClusterSearchInfo.getFailures().get(0); - assertTrue("should have 'index corrupted' in reason", remoteShardSearchFailure.reason().contains("index corrupted")); + assertTrue( + "should have 'index corrupted' in reason but was: " + remoteShardSearchFailure.reason(), + remoteShardSearchFailure.reason().contains("index corrupted") + ); } finally { finishedResponse.decRef(); } @@ -776,7 +791,10 @@ public void testClusterDetailsAfterCCSWithFailuresOnOneClusterOnly() throws Exce assertNull(remoteClusterSearchInfo.getTook()); assertFalse(remoteClusterSearchInfo.isTimedOut()); ShardSearchFailure remoteShardSearchFailure = remoteClusterSearchInfo.getFailures().get(0); - assertTrue("should have 'index corrupted' in reason", remoteShardSearchFailure.reason().contains("index corrupted")); + assertTrue( + "should have 'index corrupted' in reason but was: " + remoteShardSearchFailure.reason(), + remoteShardSearchFailure.reason().contains("index corrupted") + ); } } @@ -1163,7 +1181,10 @@ public void testRemoteClusterOnlyCCSWithFailuresOnOneShardOnly() throws Exceptio assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(1)); assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThan(0L)); ShardSearchFailure remoteShardSearchFailure = remoteClusterSearchInfo.getFailures().get(0); - assertTrue("should have 'index corrupted' in reason", remoteShardSearchFailure.reason().contains("index corrupted")); + assertTrue( + "should have 'index corrupted' in reason but was: " + remoteShardSearchFailure.reason(), + remoteShardSearchFailure.reason().contains("index corrupted") + ); } finally { finishedResponse.decRef(); } @@ -1192,7 +1213,10 @@ public void testRemoteClusterOnlyCCSWithFailuresOnOneShardOnly() throws Exceptio assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(1)); assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThan(0L)); ShardSearchFailure remoteShardSearchFailure = remoteClusterSearchInfo.getFailures().get(0); - assertTrue("should have 'index corrupted' in reason", remoteShardSearchFailure.reason().contains("index corrupted")); + assertTrue( + "should have 'index corrupted' in reason but was: " + remoteShardSearchFailure.reason(), + remoteShardSearchFailure.reason().contains("index corrupted") + ); } } @@ -1665,7 +1689,10 @@ private static void assertAllShardsFailed(boolean minimizeRoundtrips, SearchResp assertNull(cluster.getTook()); assertFalse(cluster.isTimedOut()); ShardSearchFailure shardSearchFailure = cluster.getFailures().get(0); - assertTrue("should have 'index corrupted' in reason", shardSearchFailure.reason().contains("index corrupted")); + assertTrue( + "should have 'index corrupted' in reason but was: " + shardSearchFailure.reason(), + shardSearchFailure.reason().contains("index corrupted") + ); } protected AsyncSearchResponse submitAsyncSearch(SubmitAsyncSearchRequest request) throws ExecutionException, InterruptedException { diff --git a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/MutableSearchResponse.java b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/MutableSearchResponse.java index 7f3099917e9ec..2d0e2295eb859 100644 --- a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/MutableSearchResponse.java +++ b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/MutableSearchResponse.java @@ -306,12 +306,10 @@ synchronized AsyncSearchResponse toAsyncSearchResponse(AsyncSearchTask task, lon * (for local-only/CCS minimize_roundtrips=false) */ private SearchResponseMerger createSearchResponseMerger(AsyncSearchTask task) { - return null; - // TODO uncomment this code once Kibana moves to polling the _async_search/status endpoint to determine if a search is done - // if (task.getSearchResponseMergerSupplier() == null) { - // return null; // local search and CCS minimize_roundtrips=false - // } - // return task.getSearchResponseMergerSupplier().get(); + if (task.getSearchResponseMergerSupplier() == null) { + return null; // local search and CCS minimize_roundtrips=false + } + return task.getSearchResponseMergerSupplier().get(); } private SearchResponse getMergedResponse(SearchResponseMerger merger) { diff --git a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/existence/FrozenExistenceDeciderIT.java b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/existence/FrozenExistenceDeciderIT.java index 7e9e83e616f61..c72d5e83d2bd3 100644 --- a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/existence/FrozenExistenceDeciderIT.java +++ b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/existence/FrozenExistenceDeciderIT.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.autoscaling.existence; +import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.blobcache.BlobCachePlugin; import org.elasticsearch.cluster.health.ClusterHealthStatus; @@ -138,6 +139,9 @@ public void testZeroToOne() throws Exception { // we've seen a case where bootstrapping a node took just over 60 seconds in the test environment, so using an (excessive) 90 // seconds max wait time to avoid flakiness assertBusy(() -> { + // cause a bit of cluster activity using an empty reroute call in case the `wait-for-index-colour` ILM step missed the + // notification that partial-index is now GREEN. + client().admin().cluster().reroute(new ClusterRerouteRequest()).actionGet(); String[] indices = indices(); assertThat(indices, arrayContaining(PARTIAL_INDEX_NAME)); assertThat(indices, not(arrayContaining(INDEX_NAME))); diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java index ffa3a7308da90..2379e5f8e9380 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java @@ -48,6 +48,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.Predicates; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; @@ -199,11 +200,11 @@ static String message(long unassignedBytes, long assignedBytes) { } static boolean isDiskOnlyNoDecision(Decision decision) { - return singleNoDecision(decision, single -> true).map(DiskThresholdDecider.NAME::equals).orElse(false); + return singleNoDecision(decision, Predicates.always()).map(DiskThresholdDecider.NAME::equals).orElse(false); } static boolean isResizeOnlyNoDecision(Decision decision) { - return singleNoDecision(decision, single -> true).map(ResizeAllocationDecider.NAME::equals).orElse(false); + return singleNoDecision(decision, Predicates.always()).map(ResizeAllocationDecider.NAME::equals).orElse(false); } static boolean isFilterTierOnlyDecision(Decision decision, IndexMetadata indexMetadata) { diff --git a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java index 3f147c94c5ec2..9658db911f6df 100644 --- a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java +++ b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java @@ -443,6 +443,7 @@ private static NodeStats statsForNode(DiscoveryNode node, long memory) { null, null, null, + null, null ); } diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/SparseFileTracker.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/SparseFileTracker.java index 5eb146102cd76..e9be9577063cf 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/SparseFileTracker.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/SparseFileTracker.java @@ -525,7 +525,7 @@ private boolean invariant() { @Override public String toString() { - return "SparseFileTracker[" + description + ']'; + return "SparseFileTracker{description=" + description + ", length=" + length + ", complete=" + complete + '}'; } /** diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java index f2ebe61906258..d4c7c04c5b26e 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java @@ -398,23 +398,23 @@ public int getRecoveryRangeSize() { return recoveryRangeSize; } - private int getRegion(long position) { + protected int getRegion(long position) { return (int) (position / regionSize); } - private int getRegionRelativePosition(long position) { + protected int getRegionRelativePosition(long position) { return (int) (position % regionSize); } - private long getRegionStart(int region) { + protected long getRegionStart(int region) { return (long) region * regionSize; } - private long getRegionEnd(int region) { + protected long getRegionEnd(int region) { return (long) (region + 1) * regionSize; } - private int getEndingRegion(long position) { + protected int getEndingRegion(long position) { return getRegion(position - (position % regionSize == 0 ? 1 : 0)); } @@ -683,6 +683,23 @@ public final boolean isEvicted() { } } + protected boolean assertOffsetsWithinFileLength(long offset, long length, long fileLength) { + assert offset >= 0L; + assert length > 0L; + assert fileLength > 0L; + assert offset + length <= fileLength + : "accessing [" + + length + + "] bytes at offset [" + + offset + + "] in cache file [" + + this + + "] would be beyond file length [" + + fileLength + + ']'; + return true; + } + /** * While this class has incRef and tryIncRef methods, incRefEnsureOpen and tryIncrefEnsureOpen should * always be used, ensuring the right ordering between incRef/tryIncRef and ensureOpen @@ -698,6 +715,7 @@ class CacheFileRegion extends EvictableRefCounted { CacheFileRegion(RegionKey regionKey, int regionSize) { this.regionKey = regionKey; assert regionSize > 0; + // NOTE we use a constant string for description to avoid consume extra heap space tracker = new SparseFileTracker("file", regionSize); } @@ -955,6 +973,7 @@ public KeyType getCacheKey() { } public boolean tryRead(ByteBuffer buf, long offset) throws IOException { + assert assertOffsetsWithinFileLength(offset, buf.remaining(), length); final int startRegion = getRegion(offset); final long end = offset + buf.remaining(); final int endRegion = getEndingRegion(end); @@ -984,6 +1003,8 @@ public int populateAndRead( final RangeAvailableHandler reader, final RangeMissingHandler writer ) throws Exception { + assert assertOffsetsWithinFileLength(rangeToWrite.start(), rangeToWrite.length(), length); + assert assertOffsetsWithinFileLength(rangeToRead.start(), rangeToRead.length(), length); // We are interested in the total time that the system spends when fetching a result (including time spent queuing), so we start // our measurement here. final long startTime = threadPool.relativeTimeInNanos(); diff --git a/x-pack/plugin/build.gradle b/x-pack/plugin/build.gradle index eae3031512d4f..72e63b3255999 100644 --- a/x-pack/plugin/build.gradle +++ b/x-pack/plugin/build.gradle @@ -109,6 +109,8 @@ tasks.named("yamlRestTestV7CompatTransform").configure { task -> ) task.skipTest("ml/jobs_crud/Test update job", "Behaviour change #89824 - added limit filter to categorization analyzer") task.skipTest("ml/jobs_crud/Test create job with delimited format", "removing undocumented functionality") + task.skipTest("ml/jobs_crud/Test cannot create job with model snapshot id set", "Exception type has changed.") + task.skipTest("ml/validate/Test job config is invalid because model snapshot id set", "Exception type has changed.") task.skipTest("ml/datafeeds_crud/Test update datafeed to point to missing job", "behaviour change #44752 - not allowing to update datafeed job_id") task.skipTest( "ml/datafeeds_crud/Test update datafeed to point to different job", diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowInfoAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowInfoAction.java index 46c44c9b2392b..a66a79a0f7d76 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowInfoAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowInfoAction.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.Predicates; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -89,7 +90,7 @@ static List getFollowInfos(List concreteFollowerIndices, C if (ccrCustomData != null) { Optional result; if (persistentTasks != null) { - result = persistentTasks.findTasks(ShardFollowTask.NAME, task -> true) + result = persistentTasks.findTasks(ShardFollowTask.NAME, Predicates.always()) .stream() .map(task -> (ShardFollowTask) task.getParams()) .filter(shardFollowTask -> index.equals(shardFollowTask.getFollowShardId().getIndexName())) diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPauseFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPauseFollowAction.java index 6989abdf1de01..99c532f3b077f 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPauseFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPauseFollowAction.java @@ -98,7 +98,7 @@ protected void masterOperation( final ResponseHandler responseHandler = new ResponseHandler(shardFollowTaskIds.size(), listener); for (String taskId : shardFollowTaskIds) { final int taskSlot = i++; - persistentTasksService.sendRemoveRequest(taskId, responseHandler.getActionListener(taskSlot)); + persistentTasksService.sendRemoveRequest(taskId, null, responseHandler.getActionListener(taskSlot)); } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java index b06ff73e29960..c3dd30bd2f242 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java @@ -334,7 +334,8 @@ static DataStream updateLocalDataStream( remoteDataStream.getIndexMode(), remoteDataStream.getLifecycle(), remoteDataStream.isFailureStore(), - remoteDataStream.getFailureIndices() + remoteDataStream.getFailureIndices(), + remoteDataStream.getAutoShardingEvent() ); } else { if (localDataStream.isReplicated() == false) { @@ -387,7 +388,8 @@ static DataStream updateLocalDataStream( localDataStream.getIndexMode(), localDataStream.getLifecycle(), localDataStream.isFailureStore(), - localDataStream.getFailureIndices() + localDataStream.getFailureIndices(), + localDataStream.getAutoShardingEvent() ); } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java index 2e8ee39111ab7..cad1a37a3a17d 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java @@ -202,7 +202,13 @@ void start( followIndexMetadata, filteredHeaders ); - persistentTasksService.sendStartRequest(taskId, ShardFollowTask.NAME, shardFollowTask, handler.getActionListener(shardId)); + persistentTasksService.sendStartRequest( + taskId, + ShardFollowTask.NAME, + shardFollowTask, + null, + handler.getActionListener(shardId) + ); } } @@ -499,12 +505,14 @@ static String[] extractLeaderShardHistoryUUIDs(Map ccrIndexMetad SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING, SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING, SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING, + SearchSlowLog.INDEX_SEARCH_SLOWLOG_INCLUDE_USER_SETTING, IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING, IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING, IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING, IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING, IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING, IndexingSlowLog.INDEX_INDEXING_SLOWLOG_MAX_SOURCE_CHARS_TO_LOG_SETTING, + IndexingSlowLog.INDEX_INDEXING_SLOWLOG_INCLUDE_USER_SETTING, MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, MergePolicyConfig.INDEX_MERGE_POLICY_TYPE_SETTING, MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING, diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java index c13e513ef5164..2702a2e28546c 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java @@ -43,6 +43,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.TimeValue; @@ -64,7 +65,6 @@ import org.elasticsearch.indices.recovery.MultiFileWriter; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.repositories.FinalizeSnapshotContext; -import org.elasticsearch.repositories.GetSnapshotInfoContext; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.IndexMetaDataGenerations; import org.elasticsearch.repositories.Repository; @@ -106,6 +106,7 @@ import java.util.Optional; import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; +import java.util.function.BooleanSupplier; import java.util.function.LongConsumer; import java.util.function.Supplier; import java.util.stream.Collectors; @@ -183,28 +184,36 @@ private RemoteClusterClient getRemoteClusterClient() { } @Override - public void getSnapshotInfo(GetSnapshotInfoContext context) { - final List snapshotIds = context.snapshotIds(); + public void getSnapshotInfo( + Collection snapshotIds, + boolean abortOnFailure, + BooleanSupplier isCancelled, + CheckedConsumer consumer, + ActionListener listener + ) { assert snapshotIds.size() == 1 && SNAPSHOT_ID.equals(snapshotIds.iterator().next()) : "RemoteClusterRepository only supports " + SNAPSHOT_ID + " as the SnapshotId but saw " + snapshotIds; try { csDeduplicator.execute( - new ThreadedActionListener<>(threadPool.executor(ThreadPool.Names.SNAPSHOT_META), context.map(response -> { + new ThreadedActionListener<>(threadPool.executor(ThreadPool.Names.SNAPSHOT_META), listener.map(response -> { Metadata responseMetadata = response.metadata(); Map indicesMap = responseMetadata.indices(); - return new SnapshotInfo( - new Snapshot(this.metadata.name(), SNAPSHOT_ID), - List.copyOf(indicesMap.keySet()), - List.copyOf(responseMetadata.dataStreams().keySet()), - List.of(), - response.getNodes().getMaxDataNodeCompatibleIndexVersion(), - SnapshotState.SUCCESS + consumer.accept( + new SnapshotInfo( + new Snapshot(this.metadata.name(), SNAPSHOT_ID), + List.copyOf(indicesMap.keySet()), + List.copyOf(responseMetadata.dataStreams().keySet()), + List.of(), + response.getNodes().getMaxDataNodeCompatibleIndexVersion(), + SnapshotState.SUCCESS + ) ); + return null; })) ); } catch (Exception e) { assert false : e; - context.onFailure(e); + listener.onFailure(e); } } diff --git a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/action/DataStreamLifecycleUsageTransportActionIT.java b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/action/DataStreamLifecycleUsageTransportActionIT.java index c102470628a00..bc97623c76970 100644 --- a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/action/DataStreamLifecycleUsageTransportActionIT.java +++ b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/action/DataStreamLifecycleUsageTransportActionIT.java @@ -134,7 +134,8 @@ public void testAction() throws Exception { IndexMode.STANDARD, lifecycle, false, - List.of() + List.of(), + null ); dataStreamMap.put(dataStream.getName(), dataStream); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshot.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshot.java index c332694d93975..093ec031d0b30 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshot.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshot.java @@ -262,7 +262,8 @@ private SegmentCommitInfo syncSegment( 0, VectorEncoding.FLOAT32, VectorSimilarityFunction.EUCLIDEAN, - fieldInfo.isSoftDeletesField() + fieldInfo.isSoftDeletesField(), + fieldInfo.isParentField() ) ); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDecider.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDecider.java index 9231dfb744a36..eb4db6c24507d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDecider.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDecider.java @@ -95,21 +95,18 @@ public static Decision shouldFilter( ); if (tier.isPresent()) { String tierName = tier.get(); - if (allocationAllowed(tierName, node)) { - if (allocation.debugDecision()) { - return debugYesAllowed(allocation, tierPreference, tierName); - } - return Decision.YES; + assert Strings.hasText(tierName) : "tierName must be not null and non-empty, but was [" + tierName + "]"; + if (node.hasRole(DiscoveryNodeRole.DATA_ROLE.roleName())) { + return allocation.debugDecision() + ? debugYesAllowed(allocation, tierPreference, DiscoveryNodeRole.DATA_ROLE.roleName()) + : Decision.YES; } - if (allocation.debugDecision()) { - return debugNoRequirementsNotMet(allocation, tierPreference, tierName); + if (node.hasRole(tierName)) { + return allocation.debugDecision() ? debugYesAllowed(allocation, tierPreference, tierName) : Decision.YES; } - return Decision.NO; + return allocation.debugDecision() ? debugNoRequirementsNotMet(allocation, tierPreference, tierName) : Decision.NO; } - if (allocation.debugDecision()) { - return debugNoNoNodesAvailable(allocation, tierPreference); - } - return Decision.NO; + return allocation.debugDecision() ? debugNoNoNodesAvailable(allocation, tierPreference) : Decision.NO; } private static Decision debugNoNoNodesAvailable(RoutingAllocation allocation, List tierPreference) { @@ -278,11 +275,6 @@ static boolean tierNodesPresentConsideringRemovals(String singleTier, DiscoveryN return false; } - public static boolean allocationAllowed(String tierName, DiscoveryNode node) { - assert Strings.hasText(tierName) : "tierName must be not null and non-empty, but was [" + tierName + "]"; - return node.hasRole(DiscoveryNodeRole.DATA_ROLE.roleName()) || node.hasRole(tierName); - } - public static boolean allocationAllowed(String tierName, Set roles) { assert Strings.hasText(tierName) : "tierName must be not null and non-empty, but was [" + tierName + "]"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DeleteStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DeleteStep.java index 755e453790257..ba6b6f9366c61 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DeleteStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DeleteStep.java @@ -41,7 +41,10 @@ public void performDuringNoSnapshot(IndexMetadata indexMetadata, ClusterState cu if (dataStream != null) { assert dataStream.getWriteIndex() != null : dataStream.getName() + " has no write index"; - if (dataStream.getIndices().size() == 1 && dataStream.getIndices().get(0).equals(indexMetadata.getIndex())) { + + // using index name equality across this if/else branch as the UUID of the index might change via restoring a data stream + // with one index from snapshot + if (dataStream.getIndices().size() == 1 && dataStream.getWriteIndex().getName().equals(indexName)) { // This is the last index in the data stream, the entire stream // needs to be deleted, because we can't have an empty data stream DeleteDataStreamAction.Request deleteReq = new DeleteDataStreamAction.Request(new String[] { dataStream.getName() }); @@ -62,7 +65,8 @@ public void performDuringNoSnapshot(IndexMetadata indexMetadata, ClusterState cu policyName ); logger.debug(errorMessage); - throw new IllegalStateException(errorMessage); + listener.onFailure(new IllegalStateException(errorMessage)); + return; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlTasks.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlTasks.java index 6209ead0cc6a1..6281f656954e5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlTasks.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlTasks.java @@ -9,6 +9,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Predicates; import org.elasticsearch.core.TimeValue; import org.elasticsearch.persistent.PersistentTasksClusterService; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; @@ -308,7 +309,7 @@ public static Collection> openJo return Collections.emptyList(); } - return tasks.findTasks(JOB_TASK_NAME, task -> true); + return tasks.findTasks(JOB_TASK_NAME, Predicates.always()); } public static Collection> datafeedTasksOnNode( @@ -360,7 +361,7 @@ public static Collection> snapsh return Collections.emptyList(); } - return tasks.findTasks(JOB_SNAPSHOT_UPGRADE_TASK_NAME, task -> true); + return tasks.findTasks(JOB_SNAPSHOT_UPGRADE_TASK_NAME, Predicates.always()); } public static Collection> snapshotUpgradeTasksOnNode( @@ -439,7 +440,7 @@ public static Set startedDatafeedIds(@Nullable PersistentTasksCustomMeta return Collections.emptySet(); } - return tasks.findTasks(DATAFEED_TASK_NAME, task -> true) + return tasks.findTasks(DATAFEED_TASK_NAME, Predicates.always()) .stream() .map(t -> t.getId().substring(DATAFEED_TASK_ID_PREFIX.length())) .collect(Collectors.toSet()); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PreviewDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PreviewDatafeedAction.java index 8d4e9d25b94a3..d03a6d5c0c7c5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PreviewDatafeedAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PreviewDatafeedAction.java @@ -60,7 +60,7 @@ public static class Request extends ActionRequest implements ToXContentObject { private static final ObjectParser PARSER = new ObjectParser<>("preview_datafeed_action", Request.Builder::new); static { PARSER.declareObject(Builder::setDatafeedBuilder, DatafeedConfig.STRICT_PARSER, DATAFEED_CONFIG); - PARSER.declareObject(Builder::setJobBuilder, Job.STRICT_PARSER, JOB_CONFIG); + PARSER.declareObject(Builder::setJobBuilder, Job.REST_REQUEST_PARSER, JOB_CONFIG); PARSER.declareString(Builder::setStart, START_TIME); PARSER.declareString(Builder::setEnd, END_TIME); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java index 400bdaa3a27ea..efb4dacd83ba4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java @@ -20,7 +20,6 @@ import org.elasticsearch.xpack.core.ml.job.messages.Messages; import java.io.IOException; -import java.util.List; import java.util.Objects; public class PutJobAction extends ActionType { @@ -35,7 +34,7 @@ private PutJobAction() { public static class Request extends AcknowledgedRequest { public static Request parseRequest(String jobId, XContentParser parser, IndicesOptions indicesOptions) { - Job.Builder jobBuilder = Job.STRICT_PARSER.apply(parser, null); + Job.Builder jobBuilder = Job.REST_REQUEST_PARSER.apply(parser, null); if (jobBuilder.getId() == null) { jobBuilder.setId(jobId); } else if (Strings.isNullOrEmpty(jobId) == false && jobId.equals(jobBuilder.getId()) == false) { @@ -58,14 +57,6 @@ public Request(Job.Builder jobBuilder) { // would occur when parsing an old job config that already had duplicate detectors. jobBuilder.validateDetectorsAreUnique(); - // Some fields cannot be set at create time - List invalidJobCreationSettings = jobBuilder.invalidCreateTimeSettings(); - if (invalidJobCreationSettings.isEmpty() == false) { - throw new IllegalArgumentException( - Messages.getMessage(Messages.JOB_CONFIG_INVALID_CREATE_SETTINGS, String.join(",", invalidJobCreationSettings)) - ); - } - this.jobBuilder = jobBuilder; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java index 4e80fcab05e2f..15cd272d12b8b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java @@ -31,7 +31,7 @@ private UpdateJobAction() { public static class Request extends AcknowledgedRequest implements ToXContentObject { public static UpdateJobAction.Request parseRequest(String jobId, XContentParser parser) { - JobUpdate update = JobUpdate.EXTERNAL_PARSER.apply(parser, null).setJobId(jobId).build(); + JobUpdate update = JobUpdate.PARSER.apply(parser, null).setJobId(jobId).build(); return new UpdateJobAction.Request(jobId, update); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateJobConfigAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateJobConfigAction.java index 48549ae100e36..76cba60667c32 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateJobConfigAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateJobConfigAction.java @@ -14,11 +14,9 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.ml.job.config.Job; -import org.elasticsearch.xpack.core.ml.job.messages.Messages; import java.io.IOException; import java.util.Date; -import java.util.List; import java.util.Objects; public class ValidateJobConfigAction extends ActionType { @@ -32,10 +30,10 @@ protected ValidateJobConfigAction() { public static class Request extends ActionRequest { - private Job job; + private final Job job; public static Request parseRequest(XContentParser parser) { - Job.Builder jobBuilder = Job.STRICT_PARSER.apply(parser, null); + Job.Builder jobBuilder = Job.REST_REQUEST_PARSER.apply(parser, null); // When jobs are PUT their ID must be supplied in the URL - assume this will // be valid unless an invalid job ID is specified in the JSON to be validated jobBuilder.setId(jobBuilder.getId() != null ? jobBuilder.getId() : "ok"); @@ -45,14 +43,6 @@ public static Request parseRequest(XContentParser parser) { // would occur when parsing an old job config that already had duplicate detectors. jobBuilder.validateDetectorsAreUnique(); - // Some fields cannot be set at create time - List invalidJobCreationSettings = jobBuilder.invalidCreateTimeSettings(); - if (invalidJobCreationSettings.isEmpty() == false) { - throw new IllegalArgumentException( - Messages.getMessage(Messages.JOB_CONFIG_INVALID_CREATE_SETTINGS, String.join(",", invalidJobCreationSettings)) - ); - } - return new Request(jobBuilder.build(new Date())); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java index 1686cdea4340a..8da0209e10293 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java @@ -98,8 +98,9 @@ public class Job implements SimpleDiffable, Writeable, ToXContentObject { public static final ParseField RESULTS_FIELD = new ParseField("jobs"); // These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly - public static final ObjectParser LENIENT_PARSER = createParser(true); - public static final ObjectParser STRICT_PARSER = createParser(false); + public static final ObjectParser LENIENT_PARSER = createParser(true, true); + // Use the REST request parser to parse a job passed to the API, to disallow setting internal fields. + public static final ObjectParser REST_REQUEST_PARSER = createParser(false, false); public static final TimeValue MIN_BACKGROUND_PERSIST_INTERVAL = TimeValue.timeValueHours(1); @@ -114,26 +115,12 @@ public class Job implements SimpleDiffable, Writeable, ToXContentObject { public static final long DEFAULT_MODEL_SNAPSHOT_RETENTION_DAYS = 10; public static final long DEFAULT_DAILY_MODEL_SNAPSHOT_RETENTION_AFTER_DAYS = 1; - private static ObjectParser createParser(boolean ignoreUnknownFields) { + private static ObjectParser createParser(boolean allowInternalFields, boolean ignoreUnknownFields) { ObjectParser parser = new ObjectParser<>("job_details", ignoreUnknownFields, Builder::new); parser.declareString(Builder::setId, ID); - parser.declareString(Builder::setJobType, JOB_TYPE); - parser.declareString(Builder::setJobVersion, JOB_VERSION); parser.declareStringArray(Builder::setGroups, GROUPS); parser.declareStringOrNull(Builder::setDescription, DESCRIPTION); - parser.declareField( - Builder::setCreateTime, - p -> TimeUtils.parseTimeField(p, CREATE_TIME.getPreferredName()), - CREATE_TIME, - ValueType.VALUE - ); - parser.declareField( - Builder::setFinishedTime, - p -> TimeUtils.parseTimeField(p, FINISHED_TIME.getPreferredName()), - FINISHED_TIME, - ValueType.VALUE - ); parser.declareObject( Builder::setAnalysisConfig, ignoreUnknownFields ? AnalysisConfig.LENIENT_PARSER : AnalysisConfig.STRICT_PARSER, @@ -165,17 +152,35 @@ private static ObjectParser createParser(boolean ignoreUnknownFie parser.declareLong(Builder::setModelSnapshotRetentionDays, MODEL_SNAPSHOT_RETENTION_DAYS); parser.declareLong(Builder::setDailyModelSnapshotRetentionAfterDays, DAILY_MODEL_SNAPSHOT_RETENTION_AFTER_DAYS); parser.declareField(Builder::setCustomSettings, (p, c) -> p.mapOrdered(), CUSTOM_SETTINGS, ValueType.OBJECT); - parser.declareStringOrNull(Builder::setModelSnapshotId, MODEL_SNAPSHOT_ID); - parser.declareStringOrNull(Builder::setModelSnapshotMinVersion, MODEL_SNAPSHOT_MIN_VERSION); parser.declareString(Builder::setResultsIndexName, RESULTS_INDEX_NAME); - parser.declareBoolean(Builder::setDeleting, DELETING); parser.declareBoolean(Builder::setAllowLazyOpen, ALLOW_LAZY_OPEN); - parser.declareObject(Builder::setBlocked, ignoreUnknownFields ? Blocked.LENIENT_PARSER : Blocked.STRICT_PARSER, BLOCKED); parser.declareObject( Builder::setDatafeed, ignoreUnknownFields ? DatafeedConfig.LENIENT_PARSER : DatafeedConfig.STRICT_PARSER, DATAFEED_CONFIG ); + + if (allowInternalFields) { + parser.declareString(Builder::setJobType, JOB_TYPE); + parser.declareString(Builder::setJobVersion, JOB_VERSION); + parser.declareField( + Builder::setCreateTime, + p -> TimeUtils.parseTimeField(p, CREATE_TIME.getPreferredName()), + CREATE_TIME, + ValueType.VALUE + ); + parser.declareField( + Builder::setFinishedTime, + p -> TimeUtils.parseTimeField(p, FINISHED_TIME.getPreferredName()), + FINISHED_TIME, + ValueType.VALUE + ); + parser.declareStringOrNull(Builder::setModelSnapshotId, MODEL_SNAPSHOT_ID); + parser.declareStringOrNull(Builder::setModelSnapshotMinVersion, MODEL_SNAPSHOT_MIN_VERSION); + parser.declareBoolean(Builder::setDeleting, DELETING); + parser.declareObject(Builder::setBlocked, ignoreUnknownFields ? Blocked.LENIENT_PARSER : Blocked.STRICT_PARSER, BLOCKED); + } + return parser; } @@ -485,6 +490,10 @@ public boolean isDeleting() { return deleting; } + public boolean isResetting() { + return blocked != null && Blocked.Reason.RESET.equals(blocked.getReason()); + } + public boolean allowLazyOpen() { return allowLazyOpen; } @@ -1016,26 +1025,6 @@ public Builder setDatafeedIndicesOptionsIfRequired(IndicesOptions indicesOptions return this; } - /** - * Return the list of fields that have been set and are invalid to - * be set when the job is created e.g. model snapshot Id should not - * be set at job creation. - * @return List of fields set fields that should not be. - */ - public List invalidCreateTimeSettings() { - List invalidCreateValues = new ArrayList<>(); - if (modelSnapshotId != null) { - invalidCreateValues.add(MODEL_SNAPSHOT_ID.getPreferredName()); - } - if (finishedTime != null) { - invalidCreateValues.add(FINISHED_TIME.getPreferredName()); - } - if (createTime != null) { - invalidCreateValues.add(CREATE_TIME.getPreferredName()); - } - return invalidCreateValues; - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(id); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java index 3ba40c70d0701..4b11314b8bb43 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java @@ -34,57 +34,43 @@ public class JobUpdate implements Writeable, ToXContentObject { public static final ParseField DETECTORS = new ParseField("detectors"); public static final ParseField CLEAR_JOB_FINISH_TIME = new ParseField("clear_job_finish_time"); - // For internal updates - static final ConstructingObjectParser INTERNAL_PARSER = new ConstructingObjectParser<>( - "job_update", - args -> new Builder((String) args[0]) - ); - // For parsing REST requests - public static final ConstructingObjectParser EXTERNAL_PARSER = new ConstructingObjectParser<>( + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "job_update", args -> new Builder((String) args[0]) ); static { - for (ConstructingObjectParser parser : Arrays.asList(INTERNAL_PARSER, EXTERNAL_PARSER)) { - parser.declareString(ConstructingObjectParser.optionalConstructorArg(), Job.ID); - parser.declareStringArray(Builder::setGroups, Job.GROUPS); - parser.declareStringOrNull(Builder::setDescription, Job.DESCRIPTION); - parser.declareObjectArray(Builder::setDetectorUpdates, DetectorUpdate.PARSER, DETECTORS); - parser.declareObject(Builder::setModelPlotConfig, ModelPlotConfig.STRICT_PARSER, Job.MODEL_PLOT_CONFIG); - parser.declareObject(Builder::setAnalysisLimits, AnalysisLimits.STRICT_PARSER, Job.ANALYSIS_LIMITS); - parser.declareString( - (builder, val) -> builder.setBackgroundPersistInterval( - TimeValue.parseTimeValue(val, Job.BACKGROUND_PERSIST_INTERVAL.getPreferredName()) - ), - Job.BACKGROUND_PERSIST_INTERVAL - ); - parser.declareLong(Builder::setRenormalizationWindowDays, Job.RENORMALIZATION_WINDOW_DAYS); - parser.declareLong(Builder::setResultsRetentionDays, Job.RESULTS_RETENTION_DAYS); - parser.declareLong(Builder::setModelSnapshotRetentionDays, Job.MODEL_SNAPSHOT_RETENTION_DAYS); - parser.declareLong(Builder::setDailyModelSnapshotRetentionAfterDays, Job.DAILY_MODEL_SNAPSHOT_RETENTION_AFTER_DAYS); - parser.declareStringArray(Builder::setCategorizationFilters, AnalysisConfig.CATEGORIZATION_FILTERS); - parser.declareObject( - Builder::setPerPartitionCategorizationConfig, - PerPartitionCategorizationConfig.STRICT_PARSER, - AnalysisConfig.PER_PARTITION_CATEGORIZATION - ); - parser.declareField(Builder::setCustomSettings, (p, c) -> p.map(), Job.CUSTOM_SETTINGS, ObjectParser.ValueType.OBJECT); - parser.declareBoolean(Builder::setAllowLazyOpen, Job.ALLOW_LAZY_OPEN); - parser.declareString( - (builder, val) -> builder.setModelPruneWindow( - TimeValue.parseTimeValue(val, AnalysisConfig.MODEL_PRUNE_WINDOW.getPreferredName()) - ), - AnalysisConfig.MODEL_PRUNE_WINDOW - ); - } - // These fields should not be set by a REST request - INTERNAL_PARSER.declareString(Builder::setModelSnapshotId, Job.MODEL_SNAPSHOT_ID); - INTERNAL_PARSER.declareString(Builder::setModelSnapshotMinVersion, Job.MODEL_SNAPSHOT_MIN_VERSION); - INTERNAL_PARSER.declareString(Builder::setJobVersion, Job.JOB_VERSION); - INTERNAL_PARSER.declareBoolean(Builder::setClearFinishTime, CLEAR_JOB_FINISH_TIME); - INTERNAL_PARSER.declareObject(Builder::setBlocked, Blocked.STRICT_PARSER, Job.BLOCKED); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), Job.ID); + PARSER.declareStringArray(Builder::setGroups, Job.GROUPS); + PARSER.declareStringOrNull(Builder::setDescription, Job.DESCRIPTION); + PARSER.declareObjectArray(Builder::setDetectorUpdates, DetectorUpdate.PARSER, DETECTORS); + PARSER.declareObject(Builder::setModelPlotConfig, ModelPlotConfig.STRICT_PARSER, Job.MODEL_PLOT_CONFIG); + PARSER.declareObject(Builder::setAnalysisLimits, AnalysisLimits.STRICT_PARSER, Job.ANALYSIS_LIMITS); + PARSER.declareString( + (builder, val) -> builder.setBackgroundPersistInterval( + TimeValue.parseTimeValue(val, Job.BACKGROUND_PERSIST_INTERVAL.getPreferredName()) + ), + Job.BACKGROUND_PERSIST_INTERVAL + ); + PARSER.declareLong(Builder::setRenormalizationWindowDays, Job.RENORMALIZATION_WINDOW_DAYS); + PARSER.declareLong(Builder::setResultsRetentionDays, Job.RESULTS_RETENTION_DAYS); + PARSER.declareLong(Builder::setModelSnapshotRetentionDays, Job.MODEL_SNAPSHOT_RETENTION_DAYS); + PARSER.declareLong(Builder::setDailyModelSnapshotRetentionAfterDays, Job.DAILY_MODEL_SNAPSHOT_RETENTION_AFTER_DAYS); + PARSER.declareStringArray(Builder::setCategorizationFilters, AnalysisConfig.CATEGORIZATION_FILTERS); + PARSER.declareObject( + Builder::setPerPartitionCategorizationConfig, + PerPartitionCategorizationConfig.STRICT_PARSER, + AnalysisConfig.PER_PARTITION_CATEGORIZATION + ); + PARSER.declareField(Builder::setCustomSettings, (p, c) -> p.map(), Job.CUSTOM_SETTINGS, ObjectParser.ValueType.OBJECT); + PARSER.declareBoolean(Builder::setAllowLazyOpen, Job.ALLOW_LAZY_OPEN); + PARSER.declareString( + (builder, val) -> builder.setModelPruneWindow( + TimeValue.parseTimeValue(val, AnalysisConfig.MODEL_PRUNE_WINDOW.getPreferredName()) + ), + AnalysisConfig.MODEL_PRUNE_WINDOW + ); } private final String jobId; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java index ad7a6b998fafd..52c97ece1b017 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java @@ -222,8 +222,6 @@ public final class Messages { public static final String JOB_CONFIG_FUNCTION_REQUIRES_OVERFIELD = "over_field_name must be set when the ''{0}'' function is used"; public static final String JOB_CONFIG_ID_ALREADY_TAKEN = "The job cannot be created with the Id ''{0}''. The Id is already used."; public static final String JOB_CONFIG_ID_TOO_LONG = "The job id cannot contain more than {0,number,integer} characters."; - public static final String JOB_CONFIG_INVALID_CREATE_SETTINGS = - "The job is configured with fields [{0}] that are illegal to set at job creation"; public static final String JOB_CONFIG_INVALID_FIELDNAME_CHARS = "Invalid field name ''{0}''. Field names including over, by and partition " + "fields cannot contain any of these characters: {1}"; public static final String JOB_CONFIG_INVALID_FIELDNAME = diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelState.java index de2f6d1fe7849..3c352b4b7dec7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelState.java @@ -21,7 +21,7 @@ public final class ModelState { */ public static final String TYPE = "model_state"; - private static final Pattern V_5_4_DOC_ID_REGEX = Pattern.compile("(.*)-\\d{10}#\\d+"); + private static final Pattern V_5_4_DOC_ID_SUFFIX_REGEX = Pattern.compile("^\\d{10}#\\d+$"); public static String documentId(String jobId, String snapshotId, int docNum) { return jobId + "_" + TYPE + "_" + snapshotId + "#" + docNum; @@ -43,9 +43,13 @@ public static String extractJobId(String docId) { * and ended with hash and an integer. */ private static String v54ExtractJobId(String docId) { - Matcher matcher = V_5_4_DOC_ID_REGEX.matcher(docId); + int potentialSuffixIndex = docId.lastIndexOf('-'); + if (potentialSuffixIndex <= 0 || potentialSuffixIndex >= docId.length() - 1) { + return null; + } + Matcher matcher = V_5_4_DOC_ID_SUFFIX_REGEX.matcher(docId.subSequence(potentialSuffixIndex + 1, docId.length())); if (matcher.matches()) { - return matcher.group(1); + return docId.substring(0, potentialSuffixIndex); } return null; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/EnrollmentToken.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/EnrollmentToken.java index 466caa11771a5..fb1d178250aa3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/EnrollmentToken.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/EnrollmentToken.java @@ -24,6 +24,10 @@ import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; public class EnrollmentToken { + // This was previously a version string, e.g. 8.12.0, but treated exclusively as a string everywhere, never parsed into a version. + // Arbitrarily set to 9 when decoupling this from node version. + public static final String CURRENT_TOKEN_VERSION = "8.14.0"; + private final String apiKey; private final String fingerprint; private final String version; @@ -64,19 +68,22 @@ public List getBoundAddress() { PARSER.declareStringArray(constructorArg(), ADDRESS); } + EnrollmentToken(String apiKey, String fingerprint, String version, List boundAddress) { + this.apiKey = Objects.requireNonNull(apiKey); + this.fingerprint = Objects.requireNonNull(fingerprint); + this.version = Objects.requireNonNull(version); + this.boundAddress = Objects.requireNonNull(boundAddress); + } + /** * Create an EnrollmentToken * * @param apiKey API Key credential in the form apiKeyId:ApiKeySecret to be used for enroll calls * @param fingerprint hex encoded SHA256 fingerprint of the HTTP CA cert - * @param version node version number * @param boundAddress IP Addresses and port numbers for the interfaces where the Elasticsearch node is listening on */ - public EnrollmentToken(String apiKey, String fingerprint, String version, List boundAddress) { - this.apiKey = Objects.requireNonNull(apiKey); - this.fingerprint = Objects.requireNonNull(fingerprint); - this.version = Objects.requireNonNull(version); - this.boundAddress = Objects.requireNonNull(boundAddress); + public EnrollmentToken(String apiKey, String fingerprint, List boundAddress) { + this(apiKey, fingerprint, EnrollmentToken.CURRENT_TOKEN_VERSION, boundAddress); } public String getRaw() throws Exception { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequest.java index f915781c6211a..534c874438e3f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequest.java @@ -49,4 +49,5 @@ public BulkUpdateApiKeyRequest(StreamInput in) throws IOException { public ApiKey.Type getType() { return ApiKey.Type.REST; } + } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequestTranslator.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequestTranslator.java new file mode 100644 index 0000000000000..57a5848970b2e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequestTranslator.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.action.apikey; + +import org.elasticsearch.common.CheckedBiFunction; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; + +public interface BulkUpdateApiKeyRequestTranslator { + BulkUpdateApiKeyRequest translate(RestRequest request) throws IOException; + + class Default implements BulkUpdateApiKeyRequestTranslator { + private static final ConstructingObjectParser PARSER = createParser( + (n, p) -> RoleDescriptor.parse(n, p, false) + ); + + @SuppressWarnings("unchecked") + protected static ConstructingObjectParser createParser( + CheckedBiFunction roleDescriptorParser + ) { + final ConstructingObjectParser parser = new ConstructingObjectParser<>( + "bulk_update_api_key_request", + a -> new BulkUpdateApiKeyRequest( + (List) a[0], + (List) a[1], + (Map) a[2], + TimeValue.parseTimeValue((String) a[3], null, "expiration") + ) + ); + parser.declareStringArray(constructorArg(), new ParseField("ids")); + parser.declareNamedObjects(optionalConstructorArg(), (p, c, n) -> { + p.nextToken(); + return roleDescriptorParser.apply(n, p); + }, new ParseField("role_descriptors")); + parser.declareObject(optionalConstructorArg(), (p, c) -> p.map(), new ParseField("metadata")); + parser.declareString(optionalConstructorArg(), new ParseField("expiration")); + return parser; + } + + @Override + public BulkUpdateApiKeyRequest translate(RestRequest request) throws IOException { + try (XContentParser parser = request.contentParser()) { + return PARSER.parse(parser, null); + } + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequestBuilder.java index 2747dc47058f8..5c156ab4e6166 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequestBuilder.java @@ -9,6 +9,7 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.internal.ElasticsearchClient; +import org.elasticsearch.common.CheckedBiFunction; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentHelper; @@ -29,30 +30,34 @@ /** * Request builder for populating a {@link CreateApiKeyRequest} */ -public final class CreateApiKeyRequestBuilder extends ActionRequestBuilder { +public class CreateApiKeyRequestBuilder extends ActionRequestBuilder { + private static final ConstructingObjectParser PARSER = createParser( + (n, p) -> RoleDescriptor.parse(n, p, false) + ); @SuppressWarnings("unchecked") - static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "api_key_request", - false, - (args, v) -> { - return new CreateApiKeyRequest( + protected static ConstructingObjectParser createParser( + CheckedBiFunction roleDescriptorParser + ) { + ConstructingObjectParser parser = new ConstructingObjectParser<>( + "api_key_request", + false, + (args, v) -> new CreateApiKeyRequest( (String) args[0], (List) args[1], TimeValue.parseTimeValue((String) args[2], null, "expiration"), (Map) args[3] - ); - } - ); + ) + ); - static { - PARSER.declareString(constructorArg(), new ParseField("name")); - PARSER.declareNamedObjects(optionalConstructorArg(), (p, c, n) -> { + parser.declareString(constructorArg(), new ParseField("name")); + parser.declareNamedObjects(optionalConstructorArg(), (p, c, n) -> { p.nextToken(); - return RoleDescriptor.parse(n, p, false); + return roleDescriptorParser.apply(n, p); }, new ParseField("role_descriptors")); - PARSER.declareString(optionalConstructorArg(), new ParseField("expiration")); - PARSER.declareObject(optionalConstructorArg(), (p, c) -> p.map(), new ParseField("metadata")); + parser.declareString(optionalConstructorArg(), new ParseField("expiration")); + parser.declareObject(optionalConstructorArg(), (p, c) -> p.map(), new ParseField("metadata")); + return parser; } public CreateApiKeyRequestBuilder(ElasticsearchClient client) { @@ -85,6 +90,15 @@ public CreateApiKeyRequestBuilder setMetadata(Map metadata) { } public CreateApiKeyRequestBuilder source(BytesReference source, XContentType xContentType) throws IOException { + CreateApiKeyRequest createApiKeyRequest = parse(source, xContentType); + setName(createApiKeyRequest.getName()); + setRoleDescriptors(createApiKeyRequest.getRoleDescriptors()); + setExpiration(createApiKeyRequest.getExpiration()); + setMetadata(createApiKeyRequest.getMetadata()); + return this; + } + + protected CreateApiKeyRequest parse(BytesReference source, XContentType xContentType) throws IOException { try ( XContentParser parser = XContentHelper.createParserNotCompressed( LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, @@ -92,14 +106,8 @@ public CreateApiKeyRequestBuilder source(BytesReference source, XContentType xCo xContentType ) ) { - CreateApiKeyRequest createApiKeyRequest = parse(parser); - setName(createApiKeyRequest.getName()); - setRoleDescriptors(createApiKeyRequest.getRoleDescriptors()); - setExpiration(createApiKeyRequest.getExpiration()); - setMetadata(createApiKeyRequest.getMetadata()); - + return parse(parser); } - return this; } public static CreateApiKeyRequest parse(XContentParser parser) throws IOException { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequestBuilderFactory.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequestBuilderFactory.java new file mode 100644 index 0000000000000..ff5592e339634 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequestBuilderFactory.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.action.apikey; + +import org.elasticsearch.client.internal.Client; + +public interface CreateApiKeyRequestBuilderFactory { + CreateApiKeyRequestBuilder create(Client client, boolean restrictRequest); + + class Default implements CreateApiKeyRequestBuilderFactory { + @Override + public CreateApiKeyRequestBuilder create(Client client, boolean restrictRequest) { + assert false == restrictRequest; + return new CreateApiKeyRequestBuilder(client); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequest.java index c5c8bcc4fc87a..9b1e9194d59fd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequest.java @@ -38,4 +38,5 @@ public UpdateApiKeyRequest(StreamInput in) throws IOException { public ApiKey.Type getType() { return ApiKey.Type.REST; } + } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequestTranslator.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequestTranslator.java new file mode 100644 index 0000000000000..f70732dd50990 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequestTranslator.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.action.apikey; + +import org.elasticsearch.common.CheckedBiFunction; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; + +public interface UpdateApiKeyRequestTranslator { + UpdateApiKeyRequest translate(RestRequest request) throws IOException; + + class Default implements UpdateApiKeyRequestTranslator { + private static final ConstructingObjectParser PARSER = createParser((n, p) -> RoleDescriptor.parse(n, p, false)); + + @SuppressWarnings("unchecked") + protected static ConstructingObjectParser createParser( + CheckedBiFunction roleDescriptorParser + ) { + final ConstructingObjectParser parser = new ConstructingObjectParser<>( + "update_api_key_request_payload", + a -> new Payload( + (List) a[0], + (Map) a[1], + TimeValue.parseTimeValue((String) a[2], null, "expiration") + ) + ); + parser.declareNamedObjects(optionalConstructorArg(), (p, c, n) -> { + p.nextToken(); + return roleDescriptorParser.apply(n, p); + }, new ParseField("role_descriptors")); + parser.declareObject(optionalConstructorArg(), (p, c) -> p.map(), new ParseField("metadata")); + parser.declareString(optionalConstructorArg(), new ParseField("expiration")); + return parser; + } + + @Override + public UpdateApiKeyRequest translate(RestRequest request) throws IOException { + // Note that we use `ids` here even though we only support a single ID. This is because the route where this translator is used + // shares a path prefix with `RestClearApiKeyCacheAction` and our current REST implementation requires that path params have the + // same wildcard if their paths share a prefix + final String apiKeyId = request.param("ids"); + if (false == request.hasContent()) { + return UpdateApiKeyRequest.usingApiKeyId(apiKeyId); + } + final Payload payload = PARSER.parse(request.contentParser(), null); + return new UpdateApiKeyRequest(apiKeyId, payload.roleDescriptors, payload.metadata, payload.expiration); + } + + protected record Payload(List roleDescriptors, Map metadata, TimeValue expiration) {} + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequestBuilder.java index d32a911a440d6..52946c05cf87b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequestBuilder.java @@ -24,17 +24,11 @@ public HasPrivilegesRequestBuilder(ElasticsearchClient client) { super(client, HasPrivilegesAction.INSTANCE, new HasPrivilegesRequest()); } - /** - * Set the username of the user that should enabled or disabled. Must not be {@code null} - */ public HasPrivilegesRequestBuilder username(String username) { request.username(username); return this; } - /** - * Set whether the user should be enabled or not - */ public HasPrivilegesRequestBuilder source(String username, BytesReference source, XContentType xContentType) throws IOException { final AuthorizationEngine.PrivilegesToCheck privilegesToCheck = RoleDescriptor.parsePrivilegesToCheck( username + "/has_privileges", diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequestBuilderFactory.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequestBuilderFactory.java new file mode 100644 index 0000000000000..e610e40333da8 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequestBuilderFactory.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.client.internal.Client; + +public interface HasPrivilegesRequestBuilderFactory { + HasPrivilegesRequestBuilder create(Client client, boolean restrictRequest); + + class Default implements HasPrivilegesRequestBuilderFactory { + + @Override + public HasPrivilegesRequestBuilder create(Client client, boolean restrictRequest) { + assert false == restrictRequest; + return new HasPrivilegesRequestBuilder(client); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/UserRoleMapper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/UserRoleMapper.java index 5addca91902cd..96fb7ff4e6f41 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/UserRoleMapper.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/UserRoleMapper.java @@ -15,6 +15,7 @@ import org.apache.lucene.util.automaton.CharacterRunAutomaton; import org.elasticsearch.action.ActionListener; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Predicates; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.ExpressionModel; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression; @@ -87,7 +88,7 @@ public ExpressionModel asModel() { groups, groups.stream().>map(g -> new DistinguishedNamePredicate(g, dnNormalizer)) .reduce(Predicate::or) - .orElse(fieldValue -> false) + .orElse(Predicates.never()) ); metadata.keySet().forEach(k -> model.defineField("metadata." + k, metadata.get(k))); model.defineField("realm.name", realm.name()); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionModel.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionModel.java index 3251c54945335..9d25e6830bbbd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionModel.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionModel.java @@ -9,6 +9,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Numbers; +import org.elasticsearch.core.Predicates; import java.util.Collection; import java.util.Collections; @@ -100,7 +101,7 @@ static Predicate buildPredicate(Object object) { return ((Collection) object).stream() .map(element -> buildPredicate(element)) .reduce((a, b) -> a.or(b)) - .orElse(fieldValue -> false); + .orElse(Predicates.never()); } throw new IllegalArgumentException("Unsupported value type " + object.getClass()); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/restriction/WorkflowsRestriction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/restriction/WorkflowsRestriction.java index f1d9d694304e5..811c6b36d4f7e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/restriction/WorkflowsRestriction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/restriction/WorkflowsRestriction.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.security.authz.restriction; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Predicates; import java.util.Set; import java.util.function.Predicate; @@ -26,10 +27,10 @@ public WorkflowsRestriction(Set names) { this.names = names; if (names == null) { // No restriction, all workflows are allowed - this.predicate = name -> true; + this.predicate = Predicates.always(); } else if (names.isEmpty()) { // Empty restriction, no workflow is allowed - this.predicate = name -> false; + this.predicate = Predicates.never(); } else { this.predicate = name -> { if (name == null) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Automatons.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Automatons.java index 5d7a4b279298c..f601aa144aa00 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Automatons.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Automatons.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.core.Predicates; import org.elasticsearch.core.TimeValue; import java.util.ArrayList; @@ -312,6 +313,11 @@ static int getMaxDeterminizedStates() { } private static Predicate predicate(Automaton automaton, final String toString) { + if (automaton == MATCH_ALL) { + return Predicates.always(); + } else if (automaton == EMPTY) { + return Predicates.never(); + } CharacterRunAutomaton runAutomaton = new CharacterRunAutomaton(automaton, maxDeterminizedStates); return new Predicate() { @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/StringMatcher.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/StringMatcher.java index 235fb3635bac6..ede11fe157487 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/StringMatcher.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/StringMatcher.java @@ -12,6 +12,7 @@ import org.apache.lucene.util.automaton.TooComplexToDeterminizeException; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.common.Strings; +import org.elasticsearch.core.Predicates; import java.util.ArrayList; import java.util.Collection; @@ -34,9 +35,7 @@ */ public class StringMatcher implements Predicate { - private static final StringMatcher MATCH_NOTHING = new StringMatcher("(empty)", s -> false); - - protected static final Predicate ALWAYS_TRUE_PREDICATE = s -> true; + private static final StringMatcher MATCH_NOTHING = new StringMatcher("(empty)", Predicates.never()); private final String description; private final Predicate predicate; @@ -70,7 +69,7 @@ public boolean test(String s) { } public boolean isTotal() { - return predicate == ALWAYS_TRUE_PREDICATE; + return predicate == Predicates.always(); } // For testing @@ -130,7 +129,7 @@ public StringMatcher build() { final String description = describe(allText); if (nonExactMatch.contains("*")) { - return new StringMatcher(description, ALWAYS_TRUE_PREDICATE); + return new StringMatcher(description, Predicates.always()); } if (exactMatch.isEmpty()) { return new StringMatcher(description, buildAutomataPredicate(nonExactMatch)); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java index db5746f5c1b47..e189116b0179c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java @@ -465,7 +465,7 @@ private void putLegacyTemplate(final IndexTemplateConfig config, final AtomicBoo final String templateName = config.getTemplateName(); PutIndexTemplateRequest request = new PutIndexTemplateRequest(templateName).source(config.loadBytes(), XContentType.JSON); - request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); + request.masterNodeTimeout(TimeValue.MAX_VALUE); executeAsyncWithOrigin( client.threadPool().getThreadContext(), getOrigin(), @@ -498,7 +498,7 @@ private void putComponentTemplate(final String templateName, final ComponentTemp final Executor executor = threadPool.generic(); executor.execute(() -> { PutComponentTemplateAction.Request request = new PutComponentTemplateAction.Request(templateName).componentTemplate(template); - request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); + request.masterNodeTimeout(TimeValue.MAX_VALUE); executeAsyncWithOrigin( client.threadPool().getThreadContext(), getOrigin(), @@ -538,7 +538,7 @@ private void putComposableTemplate( executor.execute(() -> { TransportPutComposableIndexTemplateAction.Request request = new TransportPutComposableIndexTemplateAction.Request(templateName) .indexTemplate(indexTemplate); - request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); + request.masterNodeTimeout(TimeValue.MAX_VALUE); executeAsyncWithOrigin( client.threadPool().getThreadContext(), getOrigin(), @@ -615,7 +615,7 @@ private void putPolicy(final LifecyclePolicy policy, final AtomicBoolean creatio final Executor executor = threadPool.generic(); executor.execute(() -> { PutLifecycleRequest request = new PutLifecycleRequest(policy); - request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); + request.masterNodeTimeout(TimeValue.MAX_VALUE); executeAsyncWithOrigin( client.threadPool().getThreadContext(), getOrigin(), @@ -727,7 +727,7 @@ private void putIngestPipeline(final IngestPipelineConfig pipelineConfig, final pipelineConfig.loadConfig(), pipelineConfig.getXContentType() ); - request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); + request.masterNodeTimeout(TimeValue.MAX_VALUE); executeAsyncWithOrigin( client.threadPool().getThreadContext(), @@ -815,7 +815,7 @@ public void onFailure(Exception e) { ); RolloverRequest request = new RolloverRequest(rolloverTarget, null); request.lazy(true); - request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); + request.masterNodeTimeout(TimeValue.MAX_VALUE); executeAsyncWithOrigin( client.threadPool().getThreadContext(), getOrigin(), diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/AbstractFindStructureRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/AbstractFindStructureRequest.java new file mode 100644 index 0000000000000..e06ffd3b95a05 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/AbstractFindStructureRequest.java @@ -0,0 +1,377 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.textstructure.action; + +import org.elasticsearch.TransportVersions; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.grok.GrokBuiltinPatterns; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xpack.core.textstructure.structurefinder.TextStructure; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Locale; +import java.util.Objects; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +public abstract class AbstractFindStructureRequest extends ActionRequest { + + public static final int MIN_SAMPLE_LINE_COUNT = 2; + + public static final ParseField LINES_TO_SAMPLE = new ParseField("lines_to_sample"); + public static final ParseField DOCUMENTS_TO_SAMPLE = new ParseField("documents_to_sample"); + public static final ParseField LINE_MERGE_SIZE_LIMIT = new ParseField("line_merge_size_limit"); + public static final ParseField TIMEOUT = new ParseField("timeout"); + public static final ParseField CHARSET = TextStructure.CHARSET; + public static final ParseField FORMAT = TextStructure.FORMAT; + public static final ParseField COLUMN_NAMES = TextStructure.COLUMN_NAMES; + public static final ParseField HAS_HEADER_ROW = TextStructure.HAS_HEADER_ROW; + public static final ParseField DELIMITER = TextStructure.DELIMITER; + public static final ParseField QUOTE = TextStructure.QUOTE; + public static final ParseField SHOULD_TRIM_FIELDS = TextStructure.SHOULD_TRIM_FIELDS; + public static final ParseField GROK_PATTERN = TextStructure.GROK_PATTERN; + // This one is plural in FileStructure, but singular in FileStructureOverrides + public static final ParseField TIMESTAMP_FORMAT = new ParseField("timestamp_format"); + public static final ParseField TIMESTAMP_FIELD = TextStructure.TIMESTAMP_FIELD; + + public static final ParseField ECS_COMPATIBILITY = TextStructure.ECS_COMPATIBILITY; + + private static final String ARG_INCOMPATIBLE_WITH_FORMAT_TEMPLATE = "[%s] may only be specified if [" + + FORMAT.getPreferredName() + + "] is [%s]"; + + private Integer linesToSample; + private Integer lineMergeSizeLimit; + private TimeValue timeout; + private String charset; + private TextStructure.Format format; + private List columnNames; + private Boolean hasHeaderRow; + private Character delimiter; + private Character quote; + private Boolean shouldTrimFields; + private String grokPattern; + private String ecsCompatibility; + private String timestampFormat; + private String timestampField; + + AbstractFindStructureRequest() {} + + AbstractFindStructureRequest(StreamInput in) throws IOException { + super(in); + linesToSample = in.readOptionalVInt(); + lineMergeSizeLimit = in.readOptionalVInt(); + timeout = in.readOptionalTimeValue(); + charset = in.readOptionalString(); + format = in.readBoolean() ? in.readEnum(TextStructure.Format.class) : null; + columnNames = in.readBoolean() ? in.readStringCollectionAsList() : null; + hasHeaderRow = in.readOptionalBoolean(); + delimiter = in.readBoolean() ? (char) in.readVInt() : null; + quote = in.readBoolean() ? (char) in.readVInt() : null; + shouldTrimFields = in.readOptionalBoolean(); + grokPattern = in.readOptionalString(); + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_5_0)) { + ecsCompatibility = in.readOptionalString(); + } else { + ecsCompatibility = null; + } + timestampFormat = in.readOptionalString(); + timestampField = in.readOptionalString(); + } + + public Integer getLinesToSample() { + return linesToSample; + } + + public void setLinesToSample(Integer linesToSample) { + this.linesToSample = linesToSample; + } + + public Integer getLineMergeSizeLimit() { + return lineMergeSizeLimit; + } + + public void setLineMergeSizeLimit(Integer lineMergeSizeLimit) { + this.lineMergeSizeLimit = lineMergeSizeLimit; + } + + public TimeValue getTimeout() { + return timeout; + } + + public void setTimeout(TimeValue timeout) { + this.timeout = timeout; + } + + public String getCharset() { + return charset; + } + + public void setCharset(String charset) { + this.charset = (charset == null || charset.isEmpty()) ? null : charset; + } + + public TextStructure.Format getFormat() { + return format; + } + + public void setFormat(TextStructure.Format format) { + this.format = format; + } + + public void setFormat(String format) { + this.format = (format == null || format.isEmpty()) ? null : TextStructure.Format.fromString(format); + } + + public List getColumnNames() { + return columnNames; + } + + public void setColumnNames(List columnNames) { + this.columnNames = (columnNames == null || columnNames.isEmpty()) ? null : columnNames; + } + + public void setColumnNames(String[] columnNames) { + this.columnNames = (columnNames == null || columnNames.length == 0) ? null : Arrays.asList(columnNames); + } + + public Boolean getHasHeaderRow() { + return hasHeaderRow; + } + + public void setHasHeaderRow(Boolean hasHeaderRow) { + this.hasHeaderRow = hasHeaderRow; + } + + public Character getDelimiter() { + return delimiter; + } + + public void setDelimiter(Character delimiter) { + this.delimiter = delimiter; + } + + public void setDelimiter(String delimiter) { + if (delimiter == null || delimiter.isEmpty()) { + this.delimiter = null; + } else if (delimiter.length() == 1) { + this.delimiter = delimiter.charAt(0); + } else { + throw new IllegalArgumentException(DELIMITER.getPreferredName() + " must be a single character"); + } + } + + public Character getQuote() { + return quote; + } + + public void setQuote(Character quote) { + this.quote = quote; + } + + public void setQuote(String quote) { + if (quote == null || quote.isEmpty()) { + this.quote = null; + } else if (quote.length() == 1) { + this.quote = quote.charAt(0); + } else { + throw new IllegalArgumentException(QUOTE.getPreferredName() + " must be a single character"); + } + } + + public Boolean getShouldTrimFields() { + return shouldTrimFields; + } + + public void setShouldTrimFields(Boolean shouldTrimFields) { + this.shouldTrimFields = shouldTrimFields; + } + + public String getGrokPattern() { + return grokPattern; + } + + public void setGrokPattern(String grokPattern) { + this.grokPattern = (grokPattern == null || grokPattern.isEmpty()) ? null : grokPattern; + } + + public String getEcsCompatibility() { + return ecsCompatibility; + } + + public void setEcsCompatibility(String ecsCompatibility) { + this.ecsCompatibility = (ecsCompatibility == null || ecsCompatibility.isEmpty()) ? null : ecsCompatibility; + } + + public String getTimestampFormat() { + return timestampFormat; + } + + public void setTimestampFormat(String timestampFormat) { + this.timestampFormat = (timestampFormat == null || timestampFormat.isEmpty()) ? null : timestampFormat; + } + + public String getTimestampField() { + return timestampField; + } + + public void setTimestampField(String timestampField) { + this.timestampField = (timestampField == null || timestampField.isEmpty()) ? null : timestampField; + } + + private static ActionRequestValidationException addIncompatibleArgError( + ParseField arg, + TextStructure.Format format, + ActionRequestValidationException validationException + ) { + return addValidationError( + String.format(Locale.ROOT, ARG_INCOMPATIBLE_WITH_FORMAT_TEMPLATE, arg.getPreferredName(), format), + validationException + ); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (linesToSample != null && linesToSample < MIN_SAMPLE_LINE_COUNT) { + validationException = addValidationError( + "[" + LINES_TO_SAMPLE.getPreferredName() + "] must be at least [" + MIN_SAMPLE_LINE_COUNT + "] if specified", + validationException + ); + } + if (lineMergeSizeLimit != null && lineMergeSizeLimit <= 0) { + validationException = addValidationError( + "[" + LINE_MERGE_SIZE_LIMIT.getPreferredName() + "] must be positive if specified", + validationException + ); + } + if (format != TextStructure.Format.DELIMITED) { + if (columnNames != null) { + validationException = addIncompatibleArgError(COLUMN_NAMES, TextStructure.Format.DELIMITED, validationException); + } + if (hasHeaderRow != null) { + validationException = addIncompatibleArgError(HAS_HEADER_ROW, TextStructure.Format.DELIMITED, validationException); + } + if (delimiter != null) { + validationException = addIncompatibleArgError(DELIMITER, TextStructure.Format.DELIMITED, validationException); + } + if (quote != null) { + validationException = addIncompatibleArgError(QUOTE, TextStructure.Format.DELIMITED, validationException); + } + if (shouldTrimFields != null) { + validationException = addIncompatibleArgError(SHOULD_TRIM_FIELDS, TextStructure.Format.DELIMITED, validationException); + } + } + if (format != TextStructure.Format.SEMI_STRUCTURED_TEXT) { + if (grokPattern != null) { + validationException = addIncompatibleArgError(GROK_PATTERN, TextStructure.Format.SEMI_STRUCTURED_TEXT, validationException); + } + } + + if (ecsCompatibility != null && GrokBuiltinPatterns.isValidEcsCompatibilityMode(ecsCompatibility) == false) { + validationException = addValidationError( + "[" + + ECS_COMPATIBILITY.getPreferredName() + + "] must be one of [" + + String.join(", ", GrokBuiltinPatterns.ECS_COMPATIBILITY_MODES) + + "] if specified", + validationException + ); + } + + return validationException; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeOptionalVInt(linesToSample); + out.writeOptionalVInt(lineMergeSizeLimit); + out.writeOptionalTimeValue(timeout); + out.writeOptionalString(charset); + if (format == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeEnum(format); + } + if (columnNames == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeStringCollection(columnNames); + } + out.writeOptionalBoolean(hasHeaderRow); + if (delimiter == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeVInt(delimiter); + } + if (quote == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeVInt(quote); + } + out.writeOptionalBoolean(shouldTrimFields); + out.writeOptionalString(grokPattern); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_5_0)) { + out.writeOptionalString(ecsCompatibility); + } + out.writeOptionalString(timestampFormat); + out.writeOptionalString(timestampField); + } + + @Override + public int hashCode() { + return Objects.hash( + linesToSample, + lineMergeSizeLimit, + timeout, + charset, + format, + columnNames, + hasHeaderRow, + delimiter, + grokPattern, + ecsCompatibility, + timestampFormat, + timestampField + ); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other == null || getClass() != other.getClass()) { + return false; + } + AbstractFindStructureRequest that = (AbstractFindStructureRequest) other; + return Objects.equals(this.linesToSample, that.linesToSample) + && Objects.equals(this.lineMergeSizeLimit, that.lineMergeSizeLimit) + && Objects.equals(this.timeout, that.timeout) + && Objects.equals(this.charset, that.charset) + && Objects.equals(this.format, that.format) + && Objects.equals(this.columnNames, that.columnNames) + && Objects.equals(this.hasHeaderRow, that.hasHeaderRow) + && Objects.equals(this.delimiter, that.delimiter) + && Objects.equals(this.grokPattern, that.grokPattern) + && Objects.equals(this.ecsCompatibility, that.ecsCompatibility) + && Objects.equals(this.timestampFormat, that.timestampFormat) + && Objects.equals(this.timestampField, that.timestampField); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindFieldStructureAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindFieldStructureAction.java new file mode 100644 index 0000000000000..2e6f3af312e2b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindFieldStructureAction.java @@ -0,0 +1,98 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.core.textstructure.action; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xcontent.ParseField; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +public class FindFieldStructureAction extends ActionType { + + public static final FindFieldStructureAction INSTANCE = new FindFieldStructureAction(); + public static final String NAME = "cluster:monitor/text_structure/find_field_structure"; + + private FindFieldStructureAction() { + super(NAME); + } + + public static class Request extends AbstractFindStructureRequest { + + public static final ParseField INDEX = new ParseField("index"); + public static final ParseField FIELD = new ParseField("field"); + + private String index; + private String field; + + public Request() {} + + public Request(StreamInput in) throws IOException { + super(in); + index = in.readString(); + field = in.readString(); + } + + public String getIndex() { + return index; + } + + public void setIndex(String index) { + this.index = index; + } + + public String getField() { + return field; + } + + public void setField(String field) { + this.field = field; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = super.validate(); + if (Strings.isNullOrEmpty(index)) { + validationException = addValidationError("index must be specified", validationException); + } + if (Strings.isNullOrEmpty(field)) { + validationException = addValidationError("field must be specified", validationException); + } + return validationException; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(index); + out.writeString(field); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), field, index); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other == null || getClass() != other.getClass()) { + return false; + } + Request that = (Request) other; + return super.equals(other) && Objects.equals(this.index, that.index) && Objects.equals(this.field, that.field); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindMessageStructureAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindMessageStructureAction.java new file mode 100644 index 0000000000000..49035b36ff42c --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindMessageStructureAction.java @@ -0,0 +1,97 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.core.textstructure.action; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +public class FindMessageStructureAction extends ActionType { + + public static final FindMessageStructureAction INSTANCE = new FindMessageStructureAction(); + public static final String NAME = "cluster:monitor/text_structure/find_message_structure"; + + private FindMessageStructureAction() { + super(NAME); + } + + public static class Request extends AbstractFindStructureRequest { + + public static final ParseField MESSAGES = new ParseField("messages"); + + private List messages; + + private static final ObjectParser PARSER = createParser(); + + private static ObjectParser createParser() { + ObjectParser parser = new ObjectParser<>("text_structure/find_message_structure", false, Request::new); + parser.declareStringArray(Request::setMessages, MESSAGES); + return parser; + } + + public Request() {} + + public Request(StreamInput in) throws IOException { + super(in); + messages = in.readStringCollectionAsList(); + } + + public static Request parseRequest(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + public List getMessages() { + return messages; + } + + public void setMessages(List messages) { + this.messages = messages; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = super.validate(); + if (messages == null || messages.isEmpty()) { + validationException = addValidationError("messages must be specified", validationException); + } + return validationException; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringCollection(messages); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), messages); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other == null || getClass() != other.getClass()) { + return false; + } + Request that = (Request) other; + return super.equals(other) && Objects.equals(this.messages, that.messages); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindStructureAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindStructureAction.java index 98bdff8cbced7..15aa3be46a675 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindStructureAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindStructureAction.java @@ -6,290 +6,37 @@ */ package org.elasticsearch.xpack.core.textstructure.action; -import org.elasticsearch.TransportVersions; -import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.grok.GrokBuiltinPatterns; -import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.ToXContentObject; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.textstructure.structurefinder.TextStructure; import java.io.IOException; -import java.util.Arrays; -import java.util.List; -import java.util.Locale; import java.util.Objects; import static org.elasticsearch.action.ValidateActions.addValidationError; -public class FindStructureAction extends ActionType { +public class FindStructureAction extends ActionType { public static final FindStructureAction INSTANCE = new FindStructureAction(); public static final String NAME = "cluster:monitor/text_structure/findstructure"; - public static final int MIN_SAMPLE_LINE_COUNT = 2; - private FindStructureAction() { super(NAME); } - public static class Response extends ActionResponse implements ToXContentObject, Writeable { - - private final TextStructure textStructure; - - public Response(TextStructure textStructure) { - this.textStructure = textStructure; - } - - Response(StreamInput in) throws IOException { - super(in); - textStructure = new TextStructure(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - textStructure.writeTo(out); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - textStructure.toXContent(builder, params); - return builder; - } - - @Override - public int hashCode() { - return Objects.hash(textStructure); - } - - @Override - public boolean equals(Object other) { - - if (this == other) { - return true; - } - - if (other == null || getClass() != other.getClass()) { - return false; - } - - FindStructureAction.Response that = (FindStructureAction.Response) other; - return Objects.equals(textStructure, that.textStructure); - } - } - - public static class Request extends ActionRequest { - - public static final ParseField LINES_TO_SAMPLE = new ParseField("lines_to_sample"); - public static final ParseField LINE_MERGE_SIZE_LIMIT = new ParseField("line_merge_size_limit"); - public static final ParseField TIMEOUT = new ParseField("timeout"); - public static final ParseField CHARSET = TextStructure.CHARSET; - public static final ParseField FORMAT = TextStructure.FORMAT; - public static final ParseField COLUMN_NAMES = TextStructure.COLUMN_NAMES; - public static final ParseField HAS_HEADER_ROW = TextStructure.HAS_HEADER_ROW; - public static final ParseField DELIMITER = TextStructure.DELIMITER; - public static final ParseField QUOTE = TextStructure.QUOTE; - public static final ParseField SHOULD_TRIM_FIELDS = TextStructure.SHOULD_TRIM_FIELDS; - public static final ParseField GROK_PATTERN = TextStructure.GROK_PATTERN; - // This one is plural in FileStructure, but singular in FileStructureOverrides - public static final ParseField TIMESTAMP_FORMAT = new ParseField("timestamp_format"); - public static final ParseField TIMESTAMP_FIELD = TextStructure.TIMESTAMP_FIELD; + public static class Request extends AbstractFindStructureRequest { - public static final ParseField ECS_COMPATIBILITY = TextStructure.ECS_COMPATIBILITY; - - private static final String ARG_INCOMPATIBLE_WITH_FORMAT_TEMPLATE = "[%s] may only be specified if [" - + FORMAT.getPreferredName() - + "] is [%s]"; - - private Integer linesToSample; - private Integer lineMergeSizeLimit; - private TimeValue timeout; - private String charset; - private TextStructure.Format format; - private List columnNames; - private Boolean hasHeaderRow; - private Character delimiter; - private Character quote; - private Boolean shouldTrimFields; - private String grokPattern; - private String ecsCompatibility; - private String timestampFormat; - private String timestampField; private BytesReference sample; public Request() {} public Request(StreamInput in) throws IOException { super(in); - linesToSample = in.readOptionalVInt(); - lineMergeSizeLimit = in.readOptionalVInt(); - timeout = in.readOptionalTimeValue(); - charset = in.readOptionalString(); - format = in.readBoolean() ? in.readEnum(TextStructure.Format.class) : null; - columnNames = in.readBoolean() ? in.readStringCollectionAsList() : null; - hasHeaderRow = in.readOptionalBoolean(); - delimiter = in.readBoolean() ? (char) in.readVInt() : null; - quote = in.readBoolean() ? (char) in.readVInt() : null; - shouldTrimFields = in.readOptionalBoolean(); - grokPattern = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_5_0)) { - ecsCompatibility = in.readOptionalString(); - } else { - ecsCompatibility = null; - } - timestampFormat = in.readOptionalString(); - timestampField = in.readOptionalString(); sample = in.readBytesReference(); } - public Integer getLinesToSample() { - return linesToSample; - } - - public void setLinesToSample(Integer linesToSample) { - this.linesToSample = linesToSample; - } - - public Integer getLineMergeSizeLimit() { - return lineMergeSizeLimit; - } - - public void setLineMergeSizeLimit(Integer lineMergeSizeLimit) { - this.lineMergeSizeLimit = lineMergeSizeLimit; - } - - public TimeValue getTimeout() { - return timeout; - } - - public void setTimeout(TimeValue timeout) { - this.timeout = timeout; - } - - public String getCharset() { - return charset; - } - - public void setCharset(String charset) { - this.charset = (charset == null || charset.isEmpty()) ? null : charset; - } - - public TextStructure.Format getFormat() { - return format; - } - - public void setFormat(TextStructure.Format format) { - this.format = format; - } - - public void setFormat(String format) { - this.format = (format == null || format.isEmpty()) ? null : TextStructure.Format.fromString(format); - } - - public List getColumnNames() { - return columnNames; - } - - public void setColumnNames(List columnNames) { - this.columnNames = (columnNames == null || columnNames.isEmpty()) ? null : columnNames; - } - - public void setColumnNames(String[] columnNames) { - this.columnNames = (columnNames == null || columnNames.length == 0) ? null : Arrays.asList(columnNames); - } - - public Boolean getHasHeaderRow() { - return hasHeaderRow; - } - - public void setHasHeaderRow(Boolean hasHeaderRow) { - this.hasHeaderRow = hasHeaderRow; - } - - public Character getDelimiter() { - return delimiter; - } - - public void setDelimiter(Character delimiter) { - this.delimiter = delimiter; - } - - public void setDelimiter(String delimiter) { - if (delimiter == null || delimiter.isEmpty()) { - this.delimiter = null; - } else if (delimiter.length() == 1) { - this.delimiter = delimiter.charAt(0); - } else { - throw new IllegalArgumentException(DELIMITER.getPreferredName() + " must be a single character"); - } - } - - public Character getQuote() { - return quote; - } - - public void setQuote(Character quote) { - this.quote = quote; - } - - public void setQuote(String quote) { - if (quote == null || quote.isEmpty()) { - this.quote = null; - } else if (quote.length() == 1) { - this.quote = quote.charAt(0); - } else { - throw new IllegalArgumentException(QUOTE.getPreferredName() + " must be a single character"); - } - } - - public Boolean getShouldTrimFields() { - return shouldTrimFields; - } - - public void setShouldTrimFields(Boolean shouldTrimFields) { - this.shouldTrimFields = shouldTrimFields; - } - - public String getGrokPattern() { - return grokPattern; - } - - public void setGrokPattern(String grokPattern) { - this.grokPattern = (grokPattern == null || grokPattern.isEmpty()) ? null : grokPattern; - } - - public String getEcsCompatibility() { - return ecsCompatibility; - } - - public void setEcsCompatibility(String ecsCompatibility) { - this.ecsCompatibility = (ecsCompatibility == null || ecsCompatibility.isEmpty()) ? null : ecsCompatibility; - } - - public String getTimestampFormat() { - return timestampFormat; - } - - public void setTimestampFormat(String timestampFormat) { - this.timestampFormat = (timestampFormat == null || timestampFormat.isEmpty()) ? null : timestampFormat; - } - - public String getTimestampField() { - return timestampField; - } - - public void setTimestampField(String timestampField) { - this.timestampField = (timestampField == null || timestampField.isEmpty()) ? null : timestampField; - } - public BytesReference getSample() { return sample; } @@ -298,70 +45,9 @@ public void setSample(BytesReference sample) { this.sample = sample; } - private static ActionRequestValidationException addIncompatibleArgError( - ParseField arg, - TextStructure.Format format, - ActionRequestValidationException validationException - ) { - return addValidationError( - String.format(Locale.ROOT, ARG_INCOMPATIBLE_WITH_FORMAT_TEMPLATE, arg.getPreferredName(), format), - validationException - ); - } - @Override public ActionRequestValidationException validate() { - ActionRequestValidationException validationException = null; - if (linesToSample != null && linesToSample < MIN_SAMPLE_LINE_COUNT) { - validationException = addValidationError( - "[" + LINES_TO_SAMPLE.getPreferredName() + "] must be at least [" + MIN_SAMPLE_LINE_COUNT + "] if specified", - validationException - ); - } - if (lineMergeSizeLimit != null && lineMergeSizeLimit <= 0) { - validationException = addValidationError( - "[" + LINE_MERGE_SIZE_LIMIT.getPreferredName() + "] must be positive if specified", - validationException - ); - } - if (format != TextStructure.Format.DELIMITED) { - if (columnNames != null) { - validationException = addIncompatibleArgError(COLUMN_NAMES, TextStructure.Format.DELIMITED, validationException); - } - if (hasHeaderRow != null) { - validationException = addIncompatibleArgError(HAS_HEADER_ROW, TextStructure.Format.DELIMITED, validationException); - } - if (delimiter != null) { - validationException = addIncompatibleArgError(DELIMITER, TextStructure.Format.DELIMITED, validationException); - } - if (quote != null) { - validationException = addIncompatibleArgError(QUOTE, TextStructure.Format.DELIMITED, validationException); - } - if (shouldTrimFields != null) { - validationException = addIncompatibleArgError(SHOULD_TRIM_FIELDS, TextStructure.Format.DELIMITED, validationException); - } - } - if (format != TextStructure.Format.SEMI_STRUCTURED_TEXT) { - if (grokPattern != null) { - validationException = addIncompatibleArgError( - GROK_PATTERN, - TextStructure.Format.SEMI_STRUCTURED_TEXT, - validationException - ); - } - } - - if (ecsCompatibility != null && GrokBuiltinPatterns.isValidEcsCompatibilityMode(ecsCompatibility) == false) { - validationException = addValidationError( - "[" - + ECS_COMPATIBILITY.getPreferredName() - + "] must be one of [" - + String.join(", ", GrokBuiltinPatterns.ECS_COMPATIBILITY_MODES) - + "] if specified", - validationException - ); - } - + ActionRequestValidationException validationException = super.validate(); if (sample == null || sample.length() == 0) { validationException = addValidationError("sample must be specified", validationException); } @@ -371,89 +57,24 @@ public ActionRequestValidationException validate() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeOptionalVInt(linesToSample); - out.writeOptionalVInt(lineMergeSizeLimit); - out.writeOptionalTimeValue(timeout); - out.writeOptionalString(charset); - if (format == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeEnum(format); - } - if (columnNames == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeStringCollection(columnNames); - } - out.writeOptionalBoolean(hasHeaderRow); - if (delimiter == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeVInt(delimiter); - } - if (quote == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeVInt(quote); - } - out.writeOptionalBoolean(shouldTrimFields); - out.writeOptionalString(grokPattern); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_5_0)) { - out.writeOptionalString(ecsCompatibility); - } - out.writeOptionalString(timestampFormat); - out.writeOptionalString(timestampField); out.writeBytesReference(sample); } @Override public int hashCode() { - return Objects.hash( - linesToSample, - lineMergeSizeLimit, - timeout, - charset, - format, - columnNames, - hasHeaderRow, - delimiter, - grokPattern, - ecsCompatibility, - timestampFormat, - timestampField, - sample - ); + return Objects.hash(super.hashCode(), sample); } @Override public boolean equals(Object other) { - if (this == other) { return true; } - if (other == null || getClass() != other.getClass()) { return false; } - Request that = (Request) other; - return Objects.equals(this.linesToSample, that.linesToSample) - && Objects.equals(this.lineMergeSizeLimit, that.lineMergeSizeLimit) - && Objects.equals(this.timeout, that.timeout) - && Objects.equals(this.charset, that.charset) - && Objects.equals(this.format, that.format) - && Objects.equals(this.columnNames, that.columnNames) - && Objects.equals(this.hasHeaderRow, that.hasHeaderRow) - && Objects.equals(this.delimiter, that.delimiter) - && Objects.equals(this.grokPattern, that.grokPattern) - && Objects.equals(this.ecsCompatibility, that.ecsCompatibility) - && Objects.equals(this.timestampFormat, that.timestampFormat) - && Objects.equals(this.timestampField, that.timestampField) - && Objects.equals(this.sample, that.sample); + return super.equals(other) && Objects.equals(this.sample, that.sample); } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindStructureResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindStructureResponse.java new file mode 100644 index 0000000000000..5848c2cbd0a1d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindStructureResponse.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.textstructure.action; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.textstructure.structurefinder.TextStructure; + +import java.io.IOException; +import java.util.Objects; + +public class FindStructureResponse extends ActionResponse implements ToXContentObject, Writeable { + + private final TextStructure textStructure; + + public FindStructureResponse(TextStructure textStructure) { + this.textStructure = textStructure; + } + + FindStructureResponse(StreamInput in) throws IOException { + super(in); + textStructure = new TextStructure(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + textStructure.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + textStructure.toXContent(builder, params); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(textStructure); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other == null || getClass() != other.getClass()) { + return false; + } + FindStructureResponse that = (FindStructureResponse) other; + return Objects.equals(textStructure, that.textStructure); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SettingsConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SettingsConfig.java index 9b0fa3876819b..1557f2843b6af 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SettingsConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SettingsConfig.java @@ -31,6 +31,9 @@ import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; public class SettingsConfig implements Writeable, ToXContentObject { + + public static final SettingsConfig EMPTY = new SettingsConfig(null, null, null, null, null, null, null, (Integer) null); + public static final ConstructingObjectParser STRICT_PARSER = createParser(false); public static final ConstructingObjectParser LENIENT_PARSER = createParser(true); @@ -110,10 +113,6 @@ private static ConstructingObjectParser createParser(boole private final Integer numFailureRetries; private final Integer unattended; - public SettingsConfig() { - this(null, null, (Integer) null, (Integer) null, (Integer) null, (Integer) null, (Integer) null, (Integer) null); - } - public SettingsConfig( Integer maxPageSearchSize, Float docsPerSecond, @@ -136,7 +135,7 @@ public SettingsConfig( ); } - SettingsConfig( + private SettingsConfig( Integer maxPageSearchSize, Float docsPerSecond, Integer datesAsEpochMillis, @@ -188,51 +187,51 @@ public Float getDocsPerSecond() { return docsPerSecond; } - public Boolean getDatesAsEpochMillis() { + Boolean getDatesAsEpochMillis() { return datesAsEpochMillis != null ? datesAsEpochMillis > 0 : null; } - public Integer getDatesAsEpochMillisForUpdate() { + Integer getDatesAsEpochMillisForUpdate() { return datesAsEpochMillis; } - public Boolean getAlignCheckpoints() { + Boolean getAlignCheckpoints() { return alignCheckpoints != null ? (alignCheckpoints > 0) || (alignCheckpoints == DEFAULT_ALIGN_CHECKPOINTS) : null; } - public Integer getAlignCheckpointsForUpdate() { + Integer getAlignCheckpointsForUpdate() { return alignCheckpoints; } - public Boolean getUsePit() { + Boolean getUsePit() { return usePit != null ? (usePit > 0) || (usePit == DEFAULT_USE_PIT) : null; } - public Integer getUsePitForUpdate() { + Integer getUsePitForUpdate() { return usePit; } - public Boolean getDeduceMappings() { + Boolean getDeduceMappings() { return deduceMappings != null ? (deduceMappings > 0) || (deduceMappings == DEFAULT_DEDUCE_MAPPINGS) : null; } - public Integer getDeduceMappingsForUpdate() { + Integer getDeduceMappingsForUpdate() { return deduceMappings; } - public Integer getNumFailureRetries() { + Integer getNumFailureRetries() { return numFailureRetries != null ? (numFailureRetries == DEFAULT_NUM_FAILURE_RETRIES ? null : numFailureRetries) : null; } - public Integer getNumFailureRetriesForUpdate() { + Integer getNumFailureRetriesForUpdate() { return numFailureRetries; } - public Boolean getUnattended() { + Boolean getUnattended() { return unattended != null ? (unattended == DEFAULT_UNATTENDED) ? null : (unattended > 0) : null; } - public Integer getUnattendedForUpdate() { + Integer getUnattendedForUpdate() { return unattended; } @@ -495,7 +494,7 @@ public Builder setNumFailureRetries(Integer numFailureRetries) { * An explicit `null` resets to default. * * @param unattended true if this is a unattended transform. - * @return the {@link Builder} with usePit set. + * @return the {@link Builder} with unattended set. */ public Builder setUnattended(Boolean unattended) { this.unattended = unattended == null ? DEFAULT_UNATTENDED : unattended ? 1 : 0; @@ -545,7 +544,6 @@ public Builder update(SettingsConfig update) { if (update.getUnattendedForUpdate() != null) { this.unattended = update.getUnattendedForUpdate().equals(DEFAULT_UNATTENDED) ? null : update.getUnattendedForUpdate(); } - return this; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfig.java index d89eb9b397180..fb782bdae0068 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfig.java @@ -234,7 +234,7 @@ public TransformConfig( this.pivotConfig = pivotConfig; this.latestConfig = latestConfig; this.description = description; - this.settings = settings == null ? new SettingsConfig() : settings; + this.settings = settings == null ? SettingsConfig.EMPTY : settings; this.metadata = metadata; this.retentionPolicyConfig = retentionPolicyConfig; if (this.description != null && this.description.length() > MAX_DESCRIPTION_LENGTH) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformEffectiveSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformEffectiveSettings.java new file mode 100644 index 0000000000000..3d4b8ccc64d89 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformEffectiveSettings.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.transform.transforms; + +import org.elasticsearch.xpack.core.transform.TransformConfigVersion; + +public final class TransformEffectiveSettings { + + private TransformEffectiveSettings() {} + + /** + * Determines if the transform should write dates as epoch millis based on settings and version. + * + * @param settings transform's settings + * @return whether or not the transform is unattended + */ + public static boolean writeDatesAsEpochMillis(SettingsConfig settings, TransformConfigVersion version) { + // defines how dates are written, if not specified in settings + // < 7.11 as epoch millis + // >= 7.11 as string + // note: it depends on the version when the transform has been created, not the version of the code + return settings.getDatesAsEpochMillis() != null + ? settings.getDatesAsEpochMillis() + : version.before(TransformConfigVersion.V_7_11_0); + } + + /** + * Determines if aligning checkpoints is disabled for this transform based on settings. + * + * @param settings transform's settings + * @return whether or not aligning checkpoints is disabled for this transform + */ + public static boolean isAlignCheckpointsDisabled(SettingsConfig settings) { + return Boolean.FALSE.equals(settings.getAlignCheckpoints()); + } + + /** + * Determines if pit is disabled for this transform based on settings. + * + * @param settings transform's settings + * @return whether or not pit is disabled for this transform + */ + public static boolean isPitDisabled(SettingsConfig settings) { + return Boolean.FALSE.equals(settings.getUsePit()); + } + + /** + * Determines if mappings deduction is disabled for this transform based on settings. + * + * @param settings transform's settings + * @return whether or not mappings deduction is disabled for this transform + */ + public static boolean isDeduceMappingsDisabled(SettingsConfig settings) { + return Boolean.FALSE.equals(settings.getDeduceMappings()); + } + + /** + * Determines the appropriate number of retries. + *

    + * The number of retries are read from the config or if not read from the context which is based on a cluster wide default. + * If the transform runs in unattended mode, the number of retries is always indefinite. + * + * @param settings transform's settings + * @return the number of retries or -1 if retries are indefinite + */ + public static int getNumFailureRetries(SettingsConfig settings, int defaultNumFailureRetries) { + return isUnattended(settings) ? -1 + : settings.getNumFailureRetries() != null ? settings.getNumFailureRetries() + : defaultNumFailureRetries; + } + + /** + * Determines if the transform is unattended based on settings. + * + * @param settings transform's settings + * @return whether or not the transform is unattended + */ + public static boolean isUnattended(SettingsConfig settings) { + return Boolean.TRUE.equals(settings.getUnattended()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderTests.java index 7134ceba475fe..7ed57ca93adf0 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderTests.java @@ -202,7 +202,16 @@ public void testIndexPrefer() { ) ); } + } + { + final var state = clusterStateWithIndexAndNodes("data_warm", DiscoveryNodes.builder().add(DATA_NODE).build(), null); + assertAllocationDecision( + state, + DATA_NODE, + Decision.Type.YES, + "index has a preference for tiers [data_warm] and node has tier [data]" + ); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/DeleteStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/DeleteStepTests.java index 32e9148de067c..7445e82da3ecf 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/DeleteStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/DeleteStepTests.java @@ -20,8 +20,12 @@ import org.mockito.Mockito; import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; +import static org.elasticsearch.test.ActionListenerUtils.anyActionListener; import static org.hamcrest.Matchers.is; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doThrow; public class DeleteStepTests extends AbstractStepTestCase { @@ -76,7 +80,7 @@ public void testDeleted() throws Exception { assertEquals(indexMetadata.getIndex().getName(), request.indices()[0]); listener.onResponse(null); return null; - }).when(indicesClient).delete(Mockito.any(), Mockito.any()); + }).when(indicesClient).delete(any(), any()); DeleteStep step = createRandomInstance(); ClusterState clusterState = ClusterState.builder(emptyClusterState()) @@ -86,7 +90,7 @@ public void testDeleted() throws Exception { Mockito.verify(client, Mockito.only()).admin(); Mockito.verify(adminClient, Mockito.only()).indices(); - Mockito.verify(indicesClient, Mockito.only()).delete(Mockito.any(), Mockito.any()); + Mockito.verify(indicesClient, Mockito.only()).delete(any(), any()); } public void testExceptionThrown() { @@ -102,7 +106,7 @@ public void testExceptionThrown() { assertEquals(indexMetadata.getIndex().getName(), request.indices()[0]); listener.onFailure(exception); return null; - }).when(indicesClient).delete(Mockito.any(), Mockito.any()); + }).when(indicesClient).delete(any(), any()); DeleteStep step = createRandomInstance(); ClusterState clusterState = ClusterState.builder(emptyClusterState()) @@ -117,7 +121,13 @@ public void testExceptionThrown() { ); } - public void testPerformActionThrowsExceptionIfIndexIsTheDataStreamWriteIndex() { + public void testPerformActionCallsFailureListenerIfIndexIsTheDataStreamWriteIndex() { + doThrow( + new IllegalStateException( + "the client must not be called in this test as we should fail in the step validation phase before we call the delete API" + ) + ).when(indicesClient).delete(any(DeleteIndexRequest.class), anyActionListener()); + String policyName = "test-ilm-policy"; String dataStreamName = randomAlphaOfLength(10); @@ -149,31 +159,32 @@ public void testPerformActionThrowsExceptionIfIndexIsTheDataStreamWriteIndex() { .metadata(Metadata.builder().put(index1, false).put(sourceIndexMetadata, false).put(dataStream).build()) .build(); - IllegalStateException illegalStateException = expectThrows( - IllegalStateException.class, - () -> createRandomInstance().performDuringNoSnapshot(sourceIndexMetadata, clusterState, new ActionListener<>() { - @Override - public void onResponse(Void complete) { - fail("unexpected listener callback"); - } - - @Override - public void onFailure(Exception e) { - fail("unexpected listener callback"); - } - }) - ); - assertThat( - illegalStateException.getMessage(), - is( - "index [" - + sourceIndexMetadata.getIndex().getName() - + "] is the write index for data stream [" - + dataStreamName - + "]. stopping execution of lifecycle [test-ilm-policy] as a data stream's write index cannot be deleted. " - + "manually rolling over the index will resume the execution of the policy as the index will not be the " - + "data stream's write index anymore" - ) - ); + AtomicBoolean listenerCalled = new AtomicBoolean(false); + createRandomInstance().performDuringNoSnapshot(sourceIndexMetadata, clusterState, new ActionListener<>() { + @Override + public void onResponse(Void complete) { + listenerCalled.set(true); + fail("unexpected listener callback"); + } + + @Override + public void onFailure(Exception e) { + listenerCalled.set(true); + assertThat( + e.getMessage(), + is( + "index [" + + sourceIndexMetadata.getIndex().getName() + + "] is the write index for data stream [" + + dataStreamName + + "]. stopping execution of lifecycle [test-ilm-policy] as a data stream's write index cannot be deleted. " + + "manually rolling over the index will resume the execution of the policy as the index will not be the " + + "data stream's write index anymore" + ) + ); + } + }); + + assertThat(listenerCalled.get(), is(true)); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/AggProviderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/AggProviderTests.java index 40c6a74f4aaa0..19caa6a96d515 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/AggProviderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/AggProviderTests.java @@ -67,7 +67,7 @@ public static AggProvider createRandomValidAggProvider() { } public static AggProvider createRandomValidAggProvider(String name, String field) { - Map agg = Collections.singletonMap(name, Collections.singletonMap("avg", Collections.singletonMap("field", field))); + Map agg = Map.of(name, Map.of("avg", Map.of("field", field))); try { SearchModule searchModule = new SearchModule(Settings.EMPTY, Collections.emptyList()); AggregatorFactories.Builder aggs = XContentObjectTransformer.aggregatorTransformer( @@ -87,95 +87,65 @@ public void testEmptyAggMap() throws IOException { assertThat(e.getMessage(), equalTo("Datafeed aggregations are not parsable")); } + private static HashMap hashMapOf(K key, V value) { + HashMap map = new HashMap<>(); + map.put(key, value); + return map; + } + + private static HashMap hashMapOf(K k1, V v1, K k2, V v2, K k3, V v3) { + HashMap map = new HashMap<>(); + map.put(k1, v1); + map.put(k2, v2); + map.put(k3, v3); + return map; + } + public void testRewriteBadNumericInterval() { long numericInterval = randomNonNegativeLong(); - Map maxTime = Collections.singletonMap("max", Collections.singletonMap("field", "time")); - Map numericDeprecated = new HashMap<>() { - { - put("interval", numericInterval); - put("field", "foo"); - put("aggs", Collections.singletonMap("time", maxTime)); - } - }; - Map expected = new HashMap<>() { - { - put("fixed_interval", numericInterval + "ms"); - put("field", "foo"); - put("aggs", Collections.singletonMap("time", maxTime)); - } - }; - Map deprecated = Collections.singletonMap("buckets", Collections.singletonMap("date_histogram", numericDeprecated)); + Map maxTime = Map.of("max", Map.of("field", "time")); + Map numericDeprecated = hashMapOf("interval", numericInterval, "field", "foo", "aggs", Map.of("time", maxTime)); + Map expected = Map.of("fixed_interval", numericInterval + "ms", "field", "foo", "aggs", Map.of("time", maxTime)); + Map deprecated = hashMapOf("buckets", hashMapOf("date_histogram", numericDeprecated)); assertTrue(AggProvider.rewriteDateHistogramInterval(deprecated, false)); - assertThat(deprecated, equalTo(Collections.singletonMap("buckets", Collections.singletonMap("date_histogram", expected)))); - - numericDeprecated = new HashMap<>() { - { - put("interval", numericInterval + "ms"); - put("field", "foo"); - put("aggs", Collections.singletonMap("time", maxTime)); - } - }; - deprecated = Collections.singletonMap("date_histogram", Collections.singletonMap("date_histogram", numericDeprecated)); + assertThat(deprecated, equalTo(Map.of("buckets", Map.of("date_histogram", expected)))); + + numericDeprecated = hashMapOf("interval", numericInterval + "ms", "field", "foo", "aggs", Map.of("time", maxTime)); + deprecated = hashMapOf("date_histogram", hashMapOf("date_histogram", numericDeprecated)); assertTrue(AggProvider.rewriteDateHistogramInterval(deprecated, false)); - assertThat(deprecated, equalTo(Collections.singletonMap("date_histogram", Collections.singletonMap("date_histogram", expected)))); + assertThat(deprecated, equalTo(Map.of("date_histogram", Map.of("date_histogram", expected)))); } public void testRewriteBadCalendarInterval() { String calendarInterval = "1w"; - Map maxTime = Collections.singletonMap("max", Collections.singletonMap("field", "time")); - Map calendarDeprecated = new HashMap<>() { - { - put("interval", calendarInterval); - put("field", "foo"); - put("aggs", Collections.singletonMap("time", maxTime)); - } - }; - Map expected = new HashMap<>() { - { - put("calendar_interval", calendarInterval); - put("field", "foo"); - put("aggs", Collections.singletonMap("time", maxTime)); - } - }; - Map deprecated = Collections.singletonMap( - "buckets", - Collections.singletonMap("date_histogram", calendarDeprecated) - ); + Map maxTime = Map.of("max", Map.of("field", "time")); + Map calendarDeprecated = hashMapOf("interval", calendarInterval, "field", "foo", "aggs", Map.of("time", maxTime)); + Map expected = Map.of("calendar_interval", calendarInterval, "field", "foo", "aggs", Map.of("time", maxTime)); + Map deprecated = hashMapOf("buckets", hashMapOf("date_histogram", calendarDeprecated)); assertTrue(AggProvider.rewriteDateHistogramInterval(deprecated, false)); - assertThat(deprecated, equalTo(Collections.singletonMap("buckets", Collections.singletonMap("date_histogram", expected)))); - - calendarDeprecated = new HashMap<>() { - { - put("interval", calendarInterval); - put("field", "foo"); - put("aggs", Collections.singletonMap("time", maxTime)); - } - }; - deprecated = Collections.singletonMap("date_histogram", Collections.singletonMap("date_histogram", calendarDeprecated)); + assertThat(deprecated, equalTo(Map.of("buckets", Map.of("date_histogram", expected)))); + + calendarDeprecated = hashMapOf("interval", calendarInterval, "field", "foo", "aggs", Map.of("time", maxTime)); + deprecated = hashMapOf("date_histogram", hashMapOf("date_histogram", calendarDeprecated)); assertTrue(AggProvider.rewriteDateHistogramInterval(deprecated, false)); - assertThat(deprecated, equalTo(Collections.singletonMap("date_histogram", Collections.singletonMap("date_histogram", expected)))); + assertThat(deprecated, equalTo(Map.of("date_histogram", Map.of("date_histogram", expected)))); } public void testRewriteWhenNoneMustOccur() { String calendarInterval = "1w"; - Map maxTime = Collections.singletonMap("max", Collections.singletonMap("field", "time")); - Map calendarDeprecated = new HashMap<>() { - { - put("calendar_interval", calendarInterval); - put("field", "foo"); - put("aggs", Collections.singletonMap("time", maxTime)); - } - }; - Map expected = new HashMap<>() { - { - put("calendar_interval", calendarInterval); - put("field", "foo"); - put("aggs", Collections.singletonMap("time", maxTime)); - } - }; - Map current = Collections.singletonMap("buckets", Collections.singletonMap("date_histogram", calendarDeprecated)); + Map maxTime = Map.of("max", Map.of("field", "time")); + Map calendarDeprecated = Map.of( + "calendar_interval", + calendarInterval, + "field", + "foo", + "aggs", + Map.of("time", maxTime) + ); + Map expected = Map.of("calendar_interval", calendarInterval, "field", "foo", "aggs", Map.of("time", maxTime)); + Map current = Map.of("buckets", Map.of("date_histogram", calendarDeprecated)); assertFalse(AggProvider.rewriteDateHistogramInterval(current, false)); - assertThat(current, equalTo(Collections.singletonMap("buckets", Collections.singletonMap("date_histogram", expected)))); + assertThat(current, equalTo(Map.of("buckets", Map.of("date_histogram", expected)))); } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/ClassificationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/ClassificationTests.java index afb5cfc4fb1bc..62f73b0f2bccd 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/ClassificationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/ClassificationTests.java @@ -41,7 +41,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; @@ -446,18 +445,19 @@ public void testGetResultMappings_DependentVariableMappingHasNoTypes() { } public void testGetResultMappings_DependentVariableMappingIsPresent() { - Map expectedTopClassesMapping = new HashMap<>() { - { - put("type", "nested"); - put("properties", new HashMap<>() { - { - put("class_name", singletonMap("type", "dummy")); - put("class_probability", singletonMap("type", "double")); - put("class_score", singletonMap("type", "double")); - } - }); - } - }; + Map expectedTopClassesMapping = Map.of( + "type", + "nested", + "properties", + Map.of( + "class_name", + Map.of("type", "dummy"), + "class_probability", + Map.of("type", "double"), + "class_score", + Map.of("type", "double") + ) + ); FieldCapabilitiesResponse fieldCapabilitiesResponse = new FieldCapabilitiesResponse( new String[0], Collections.singletonMap("foo", Collections.singletonMap("dummy", createFieldCapabilities("foo", "dummy"))) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/inference/InferenceDefinitionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/inference/InferenceDefinitionTests.java index 14ec7a2053a1c..9f2326d022eab 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/inference/InferenceDefinitionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/inference/InferenceDefinitionTests.java @@ -94,41 +94,20 @@ public void testMultiClassIrisInference() throws IOException, ParseException { xContentRegistry() ); - Map fields = new HashMap<>() { - { - put("sepal_length", 5.1); - put("sepal_width", 3.5); - put("petal_length", 1.4); - put("petal_width", 0.2); - } - }; + Map fields = Map.of("sepal_length", 5.1, "sepal_width", 3.5, "petal_length", 1.4, "petal_width", 0.2); assertThat( ((ClassificationInferenceResults) definition.infer(fields, ClassificationConfig.EMPTY_PARAMS)).getClassificationLabel(), equalTo("Iris-setosa") ); - fields = new HashMap<>() { - { - put("sepal_length", 7.0); - put("sepal_width", 3.2); - put("petal_length", 4.7); - put("petal_width", 1.4); - } - }; + fields = Map.of("sepal_length", 7.0, "sepal_width", 3.2, "petal_length", 4.7, "petal_width", 1.4); assertThat( ((ClassificationInferenceResults) definition.infer(fields, ClassificationConfig.EMPTY_PARAMS)).getClassificationLabel(), equalTo("Iris-versicolor") ); - fields = new HashMap<>() { - { - put("sepal_length", 6.5); - put("sepal_width", 3.0); - put("petal_length", 5.2); - put("petal_width", 2.0); - } - }; + fields = Map.of("sepal_length", 6.5, "sepal_width", 3.0, "petal_length", 5.2, "petal_width", 2.0); assertThat( ((ClassificationInferenceResults) definition.infer(fields, ClassificationConfig.EMPTY_PARAMS)).getClassificationLabel(), equalTo("Iris-virginica") diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java index 4fff2804f9350..047f3a418c36b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java @@ -43,7 +43,6 @@ import java.util.Collections; import java.util.Date; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -101,7 +100,7 @@ protected Writeable.Reader instanceReader() { @Override protected Job doParseInstance(XContentParser parser) { - return Job.STRICT_PARSER.apply(parser, null).build(); + return Job.LENIENT_PARSER.apply(parser, null).build(); } public void testToXContentForInternalStorage() throws IOException { @@ -119,10 +118,10 @@ public void testToXContentForInternalStorage() throws IOException { } } - public void testFutureConfigParse() throws IOException { + public void testRestRequestParser_DoesntAllowInternalFields() throws IOException { XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(XContentParserConfiguration.EMPTY, FUTURE_JOB); - XContentParseException e = expectThrows(XContentParseException.class, () -> Job.STRICT_PARSER.apply(parser, null).build()); - assertEquals("[4:5] [job_details] unknown field [tomorrows_technology_today]", e.getMessage()); + XContentParseException e = expectThrows(XContentParseException.class, () -> Job.REST_REQUEST_PARSER.apply(parser, null).build()); + assertEquals("[3:5] [job_details] unknown field [create_time]", e.getMessage()); } public void testFutureMetadataParse() throws IOException { @@ -554,22 +553,6 @@ public void testBuilder_givenTimeFieldInAnalysisConfig() { assertThat(e.getMessage(), equalTo(Messages.getMessage(Messages.JOB_CONFIG_TIME_FIELD_NOT_ALLOWED_IN_ANALYSIS_CONFIG))); } - public void testInvalidCreateTimeSettings() { - Job.Builder builder = new Job.Builder("invalid-settings"); - builder.setModelSnapshotId("snapshot-foo"); - assertEquals(Collections.singletonList(Job.MODEL_SNAPSHOT_ID.getPreferredName()), builder.invalidCreateTimeSettings()); - - builder.setCreateTime(new Date()); - builder.setFinishedTime(new Date()); - - Set expected = new HashSet<>(); - expected.add(Job.CREATE_TIME.getPreferredName()); - expected.add(Job.FINISHED_TIME.getPreferredName()); - expected.add(Job.MODEL_SNAPSHOT_ID.getPreferredName()); - - assertEquals(expected, new HashSet<>(builder.invalidCreateTimeSettings())); - } - public void testEmptyGroup() { Job.Builder builder = buildJobBuilder("foo"); builder.setGroups(Arrays.asList("foo-group", "")); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java index 09ff29f768dce..24a3a097e9e2d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.AbstractXContentSerializingTestCase; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xpack.core.ml.MlConfigVersion; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; import org.elasticsearch.xpack.core.ml.utils.MlConfigVersionUtils; @@ -35,8 +34,6 @@ public class JobUpdateTests extends AbstractXContentSerializingTestCase { - private boolean useInternalParser = randomBoolean(); - @Override protected JobUpdate createTestInstance() { return createRandom(randomAlphaOfLength(4), null); @@ -49,7 +46,7 @@ protected JobUpdate mutateInstance(JobUpdate instance) { /** * Creates a completely random update when the job is null - * or a random update that is is valid for the given job + * or a random update that is valid for the given job */ public JobUpdate createRandom(String jobId, @Nullable Job job) { JobUpdate.Builder update = new JobUpdate.Builder(jobId); @@ -126,24 +123,9 @@ public JobUpdate createRandom(String jobId, @Nullable Job job) { if (randomBoolean()) { update.setCustomSettings(Collections.singletonMap(randomAlphaOfLength(10), randomAlphaOfLength(10))); } - if (useInternalParser && randomBoolean()) { - update.setModelSnapshotId(randomAlphaOfLength(10)); - } - if (useInternalParser && randomBoolean()) { - update.setModelSnapshotMinVersion(MlConfigVersion.CURRENT); - } - if (useInternalParser && randomBoolean()) { - update.setJobVersion(MlConfigVersionUtils.randomCompatibleVersion(random())); - } - if (useInternalParser) { - update.setClearFinishTime(randomBoolean()); - } if (randomBoolean()) { update.setAllowLazyOpen(randomBoolean()); } - if (useInternalParser && randomBoolean() && (job == null || job.isDeleting() == false)) { - update.setBlocked(BlockedTests.createRandom()); - } if (randomBoolean() && job != null) { update.setModelPruneWindow( TimeValue.timeValueSeconds( @@ -251,11 +233,7 @@ protected Writeable.Reader instanceReader() { @Override protected JobUpdate doParseInstance(XContentParser parser) { - if (useInternalParser) { - return JobUpdate.INTERNAL_PARSER.apply(parser, null).build(); - } else { - return JobUpdate.EXTERNAL_PARSER.apply(parser, null).build(); - } + return JobUpdate.PARSER.apply(parser, null).build(); } public void testMergeWithJob() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/support/StringMatcherTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/support/StringMatcherTests.java index 1582cf3404bdc..2e31f760f6db2 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/support/StringMatcherTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/support/StringMatcherTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.common.Strings; +import org.elasticsearch.core.Predicates; import org.elasticsearch.test.ESTestCase; import java.util.List; @@ -49,7 +50,7 @@ public void testMatchAllWildcard() throws Exception { assertMatch(matcher, randomAlphaOfLengthBetween(i, 20)); } - assertThat(matcher.getPredicate(), sameInstance(StringMatcher.ALWAYS_TRUE_PREDICATE)); + assertThat(matcher.getPredicate(), sameInstance(Predicates.always())); } public void testSingleWildcard() throws Exception { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/textstructure/action/FindTextStructureActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/textstructure/action/FindTextStructureActionResponseTests.java deleted file mode 100644 index 31dbfc7dccff3..0000000000000 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/textstructure/action/FindTextStructureActionResponseTests.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.core.textstructure.action; - -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.test.AbstractWireSerializingTestCase; -import org.elasticsearch.xpack.core.textstructure.structurefinder.TextStructureTests; - -public class FindTextStructureActionResponseTests extends AbstractWireSerializingTestCase { - - @Override - protected FindStructureAction.Response createTestInstance() { - return new FindStructureAction.Response(TextStructureTests.createTestFileStructure()); - } - - @Override - protected FindStructureAction.Response mutateInstance(FindStructureAction.Response instance) { - return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929 - } - - @Override - protected Writeable.Reader instanceReader() { - return FindStructureAction.Response::new; - } -} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/textstructure/action/FindTextStructureResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/textstructure/action/FindTextStructureResponseTests.java new file mode 100644 index 0000000000000..887d75e3751c5 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/textstructure/action/FindTextStructureResponseTests.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.core.textstructure.action; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.core.textstructure.structurefinder.TextStructureTests; + +public class FindTextStructureResponseTests extends AbstractWireSerializingTestCase { + + @Override + protected FindStructureResponse createTestInstance() { + return new FindStructureResponse(TextStructureTests.createTestFileStructure()); + } + + @Override + protected FindStructureResponse mutateInstance(FindStructureResponse response) { + FindStructureResponse newResponse; + do { + newResponse = createTestInstance(); + } while (response.equals(newResponse)); + return newResponse; + } + + @Override + protected Writeable.Reader instanceReader() { + return FindStructureResponse::new; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/SettingsConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/SettingsConfigTests.java index 62b9e2e48a907..6bedd60d582dd 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/SettingsConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/SettingsConfigTests.java @@ -33,32 +33,30 @@ public class SettingsConfigTests extends AbstractSerializingTransformTestCase instanceReader() { } public void testExplicitNullParsing() throws IOException { - // explicit null assertThat(fromString("{\"max_page_search_size\" : null}").getMaxPageSearchSize(), equalTo(-1)); // not set @@ -119,6 +116,11 @@ public void testExplicitNullParsing() throws IOException { assertThat(fromString("{\"num_failure_retries\" : null}").getNumFailureRetriesForUpdate(), equalTo(-2)); assertNull(fromString("{}").getNumFailureRetries()); assertNull(fromString("{}").getNumFailureRetriesForUpdate()); + + assertNull(fromString("{\"unattended\" : null}").getUnattended()); + assertThat(fromString("{\"unattended\" : null}").getUnattendedForUpdate(), equalTo(-1)); + assertNull(fromString("{}").getUnattended()); + assertNull(fromString("{}").getUnattendedForUpdate()); } public void testUpdateMaxPageSearchSizeUsingBuilder() throws IOException { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/SourceConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/SourceConfigTests.java index a88530904b3d2..048f6b8b12090 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/SourceConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/SourceConfigTests.java @@ -13,7 +13,6 @@ import org.junit.Before; import java.io.IOException; -import java.util.HashMap; import java.util.Map; import java.util.function.Predicate; @@ -146,19 +145,20 @@ public void testGetRuntimeMappings_EmptyRuntimeMappings() { } public void testGetRuntimeMappings_NonEmptyRuntimeMappings() { - Map runtimeMappings = new HashMap<>() { - { - put("field-A", singletonMap("type", "keyword")); - put("field-B", singletonMap("script", "some script")); - put("field-C", singletonMap("script", "some other script")); - } - }; - Map scriptBasedRuntimeMappings = new HashMap<>() { - { - put("field-B", singletonMap("script", "some script")); - put("field-C", singletonMap("script", "some other script")); - } - }; + Map runtimeMappings = Map.of( + "field-A", + Map.of("type", "keyword"), + "field-B", + Map.of("script", "some script"), + "field-C", + Map.of("script", "some other script") + ); + Map scriptBasedRuntimeMappings = Map.of( + "field-B", + Map.of("script", "some script"), + "field-C", + Map.of("script", "some other script") + ); SourceConfig sourceConfig = new SourceConfig(generateRandomStringArray(10, 10, false, false), randomQueryConfig(), runtimeMappings); assertThat(sourceConfig.getRuntimeMappings(), is(equalTo(runtimeMappings))); assertThat(sourceConfig.getScriptBasedRuntimeMappings(), is(equalTo(scriptBasedRuntimeMappings))); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformEffectiveSettingsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformEffectiveSettingsTests.java new file mode 100644 index 0000000000000..98726d8dbf272 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformEffectiveSettingsTests.java @@ -0,0 +1,135 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.transform.transforms; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.transform.TransformConfigVersion; + +public class TransformEffectiveSettingsTests extends ESTestCase { + + public void testWriteDatesAsEpochMillis() { + SettingsConfig settingsConfig = new SettingsConfig.Builder().build(); + assertFalse(TransformEffectiveSettings.writeDatesAsEpochMillis(settingsConfig, TransformConfigVersion.V_7_11_0)); + assertTrue(TransformEffectiveSettings.writeDatesAsEpochMillis(settingsConfig, TransformConfigVersion.V_7_10_1)); + + settingsConfig = new SettingsConfig.Builder().setDatesAsEpochMillis(null).build(); + assertFalse(TransformEffectiveSettings.writeDatesAsEpochMillis(settingsConfig, TransformConfigVersion.V_7_11_0)); + // Note that the result is not the same as if we just left "setDatesAsEpochMillis" unset in the builder! + assertFalse(TransformEffectiveSettings.writeDatesAsEpochMillis(settingsConfig, TransformConfigVersion.V_7_10_1)); + + settingsConfig = new SettingsConfig.Builder().setDatesAsEpochMillis(false).build(); + assertFalse(TransformEffectiveSettings.writeDatesAsEpochMillis(settingsConfig, TransformConfigVersion.V_7_11_0)); + assertFalse(TransformEffectiveSettings.writeDatesAsEpochMillis(settingsConfig, TransformConfigVersion.V_7_10_1)); + + settingsConfig = new SettingsConfig.Builder().setDatesAsEpochMillis(true).build(); + assertTrue(TransformEffectiveSettings.writeDatesAsEpochMillis(settingsConfig, TransformConfigVersion.V_7_11_0)); + assertTrue(TransformEffectiveSettings.writeDatesAsEpochMillis(settingsConfig, TransformConfigVersion.V_7_10_1)); + } + + public void testIsAlignCheckpointsDisabled() { + SettingsConfig settingsConfig = new SettingsConfig.Builder().build(); + assertFalse(TransformEffectiveSettings.isAlignCheckpointsDisabled(settingsConfig)); + + settingsConfig = new SettingsConfig.Builder().setAlignCheckpoints(null).build(); + assertFalse(TransformEffectiveSettings.isAlignCheckpointsDisabled(settingsConfig)); + + settingsConfig = new SettingsConfig.Builder().setAlignCheckpoints(false).build(); + assertTrue(TransformEffectiveSettings.isAlignCheckpointsDisabled(settingsConfig)); + + settingsConfig = new SettingsConfig.Builder().setAlignCheckpoints(true).build(); + assertFalse(TransformEffectiveSettings.isAlignCheckpointsDisabled(settingsConfig)); + } + + public void testIsPitDisabled() { + SettingsConfig settingsConfig = new SettingsConfig.Builder().build(); + assertFalse(TransformEffectiveSettings.isPitDisabled(settingsConfig)); + + settingsConfig = new SettingsConfig.Builder().setUsePit(null).build(); + assertFalse(TransformEffectiveSettings.isPitDisabled(settingsConfig)); + + settingsConfig = new SettingsConfig.Builder().setUsePit(false).build(); + assertTrue(TransformEffectiveSettings.isPitDisabled(settingsConfig)); + + settingsConfig = new SettingsConfig.Builder().setUsePit(true).build(); + assertFalse(TransformEffectiveSettings.isPitDisabled(settingsConfig)); + } + + public void testIsDeduceMappingsDisabled() { + SettingsConfig settingsConfig = new SettingsConfig.Builder().build(); + assertFalse(TransformEffectiveSettings.isDeduceMappingsDisabled(settingsConfig)); + + settingsConfig = new SettingsConfig.Builder().setDeduceMappings(null).build(); + assertFalse(TransformEffectiveSettings.isDeduceMappingsDisabled(settingsConfig)); + + settingsConfig = new SettingsConfig.Builder().setDeduceMappings(false).build(); + assertTrue(TransformEffectiveSettings.isDeduceMappingsDisabled(settingsConfig)); + + settingsConfig = new SettingsConfig.Builder().setDeduceMappings(true).build(); + assertFalse(TransformEffectiveSettings.isDeduceMappingsDisabled(settingsConfig)); + } + + public void testGetNumFailureRetries() { + SettingsConfig settingsConfig = new SettingsConfig.Builder().build(); + assertEquals(10, TransformEffectiveSettings.getNumFailureRetries(settingsConfig, 10)); + + settingsConfig = new SettingsConfig.Builder().setNumFailureRetries(null).build(); + assertEquals(10, TransformEffectiveSettings.getNumFailureRetries(settingsConfig, 10)); + + settingsConfig = new SettingsConfig.Builder().setNumFailureRetries(-1).build(); + assertEquals(-1, TransformEffectiveSettings.getNumFailureRetries(settingsConfig, 10)); + + settingsConfig = new SettingsConfig.Builder().setNumFailureRetries(0).build(); + assertEquals(0, TransformEffectiveSettings.getNumFailureRetries(settingsConfig, 10)); + + settingsConfig = new SettingsConfig.Builder().setNumFailureRetries(1).build(); + assertEquals(1, TransformEffectiveSettings.getNumFailureRetries(settingsConfig, 10)); + + settingsConfig = new SettingsConfig.Builder().setNumFailureRetries(10).build(); + assertEquals(10, TransformEffectiveSettings.getNumFailureRetries(settingsConfig, 10)); + + settingsConfig = new SettingsConfig.Builder().setNumFailureRetries(100).build(); + assertEquals(100, TransformEffectiveSettings.getNumFailureRetries(settingsConfig, 10)); + } + + public void testGetNumFailureRetries_Unattended() { + SettingsConfig settingsConfig = new SettingsConfig.Builder().setUnattended(true).build(); + assertEquals(-1, TransformEffectiveSettings.getNumFailureRetries(settingsConfig, 10)); + + settingsConfig = new SettingsConfig.Builder().setUnattended(true).setNumFailureRetries(null).build(); + assertEquals(-1, TransformEffectiveSettings.getNumFailureRetries(settingsConfig, 10)); + + settingsConfig = new SettingsConfig.Builder().setUnattended(true).setNumFailureRetries(-1).build(); + assertEquals(-1, TransformEffectiveSettings.getNumFailureRetries(settingsConfig, 10)); + + settingsConfig = new SettingsConfig.Builder().setUnattended(true).setNumFailureRetries(0).build(); + assertEquals(-1, TransformEffectiveSettings.getNumFailureRetries(settingsConfig, 10)); + + settingsConfig = new SettingsConfig.Builder().setUnattended(true).setNumFailureRetries(1).build(); + assertEquals(-1, TransformEffectiveSettings.getNumFailureRetries(settingsConfig, 10)); + + settingsConfig = new SettingsConfig.Builder().setUnattended(true).setNumFailureRetries(10).build(); + assertEquals(-1, TransformEffectiveSettings.getNumFailureRetries(settingsConfig, 10)); + + settingsConfig = new SettingsConfig.Builder().setUnattended(true).setNumFailureRetries(100).build(); + assertEquals(-1, TransformEffectiveSettings.getNumFailureRetries(settingsConfig, 10)); + } + + public void testIsUnattended() { + SettingsConfig settingsConfig = new SettingsConfig.Builder().build(); + assertFalse(TransformEffectiveSettings.isUnattended(settingsConfig)); + + settingsConfig = new SettingsConfig.Builder().setUnattended(null).build(); + assertFalse(TransformEffectiveSettings.isUnattended(settingsConfig)); + + settingsConfig = new SettingsConfig.Builder().setUnattended(false).build(); + assertFalse(TransformEffectiveSettings.isUnattended(settingsConfig)); + + settingsConfig = new SettingsConfig.Builder().setUnattended(true).build(); + assertTrue(TransformEffectiveSettings.isUnattended(settingsConfig)); + } +} diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-events.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-events.json index e2d17c8327704..3f2e0ca21bdbd 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-events.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-events.json @@ -7,10 +7,14 @@ }, "composed_of": [ "profiling-events", + "profiling-events@custom", "profiling-ilm", "profiling-ilm@custom" ], - "ignore_missing_component_templates": ["profiling-ilm@custom"], + "ignore_missing_component_templates": [ + "profiling-events@custom", + "profiling-ilm@custom" + ], "priority": 100, "_meta": { "description": "Index template for profiling-events", diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-executables.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-executables.json index 57fd114c57e27..088589f7df769 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-executables.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-executables.json @@ -4,11 +4,15 @@ ], "composed_of": [ "profiling-executables", + "profiling-executables@custom", "profiling-ilm", "profiling-hot-tier", "profiling-ilm@custom" ], - "ignore_missing_component_templates": ["profiling-ilm@custom"], + "ignore_missing_component_templates": [ + "profiling-executables@custom", + "profiling-ilm@custom" + ], "priority": 100, "_meta": { "description": "Index template for .profiling-executables", diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-hosts.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-hosts.json index 526d8090b0ac6..4d750726b8028 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-hosts.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-hosts.json @@ -5,10 +5,14 @@ "data_stream": {}, "composed_of": [ "profiling-hosts", + "profiling-hosts@custom", "profiling-ilm", "profiling-ilm@custom" ], - "ignore_missing_component_templates": ["profiling-ilm@custom"], + "ignore_missing_component_templates": [ + "profiling-hosts@custom", + "profiling-ilm@custom" + ], "priority": 100, "_meta": { "description": "Index template for profiling-hosts", diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-metrics.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-metrics.json index d09de006d025d..74516d7cb826c 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-metrics.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-metrics.json @@ -5,10 +5,14 @@ "data_stream": {}, "composed_of": [ "profiling-metrics", + "profiling-metrics@custom", "profiling-ilm", "profiling-ilm@custom" ], - "ignore_missing_component_templates": ["profiling-ilm@custom"], + "ignore_missing_component_templates": [ + "profiling-metrics@custom", + "profiling-ilm@custom" + ], "priority": 100, "_meta": { "description": "Index template for profiling-metrics", diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stackframes.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stackframes.json index 694ae6ba92a57..0cbd868c2eade 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stackframes.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stackframes.json @@ -4,11 +4,15 @@ ], "composed_of": [ "profiling-stackframes", + "profiling-stackframes@custom", "profiling-ilm", "profiling-hot-tier", "profiling-ilm@custom" ], - "ignore_missing_component_templates": ["profiling-ilm@custom"], + "ignore_missing_component_templates": [ + "profiling-stackframes@custom", + "profiling-ilm@custom" + ], "priority": 100, "_meta": { "description": "Index template for .profiling-stackframes", diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stacktraces.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stacktraces.json index c4c920a76c375..d280906873ffa 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stacktraces.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stacktraces.json @@ -4,11 +4,15 @@ ], "composed_of": [ "profiling-stacktraces", + "profiling-stacktraces@custom", "profiling-ilm", "profiling-hot-tier", "profiling-ilm@custom" ], - "ignore_missing_component_templates": ["profiling-ilm@custom"], + "ignore_missing_component_templates": [ + "profiling-stacktraces@custom", + "profiling-ilm@custom" + ], "priority": 100, "_meta": { "description": "Index template for .profiling-stacktraces", diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-global.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-global.json index a7bae1adbb548..dd5eca49b9daa 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-global.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-global.json @@ -4,11 +4,15 @@ ], "composed_of": [ "profiling-symbols", + "profiling-symbols@custom", "profiling-ilm", "profiling-hot-tier", "profiling-ilm@custom" ], - "ignore_missing_component_templates": ["profiling-ilm@custom"], + "ignore_missing_component_templates": [ + "profiling-symbols@custom", + "profiling-ilm@custom" + ], "template": { "settings": { "index": { diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-private.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-private.json index 999bf7721b897..04c382e558591 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-private.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-private.json @@ -3,7 +3,11 @@ ".profiling-symbols-private*" ], "composed_of": [ - "profiling-symbols" + "profiling-symbols", + "profiling-symbols@custom" + ], + "ignore_missing_component_templates": [ + "profiling-symbols@custom" ], "priority": 100, "_meta": { diff --git a/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/DeprecationHttpIT.java b/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/DeprecationHttpIT.java index 99ffb4d11660e..73c2fb607eb17 100644 --- a/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/DeprecationHttpIT.java +++ b/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/DeprecationHttpIT.java @@ -46,7 +46,6 @@ import static org.elasticsearch.common.logging.DeprecatedMessage.KEY_FIELD_NAME; import static org.elasticsearch.common.logging.DeprecatedMessage.X_OPAQUE_ID_FIELD_NAME; -import static org.elasticsearch.test.hamcrest.RegexMatcher.matches; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; @@ -56,6 +55,7 @@ import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.matchesRegex; /** * Tests that deprecation message are returned via response headers, and can be indexed into a data stream. @@ -152,7 +152,7 @@ public void testDeprecatedSettingsReturnWarnings() throws Exception { final Response response = client().performRequest(request); final List deprecatedWarnings = getWarningHeaders(response.getHeaders()); - assertThat(deprecatedWarnings, everyItem(matches(HeaderWarning.WARNING_HEADER_PATTERN.pattern()))); + assertThat(deprecatedWarnings, everyItem(matchesRegex(HeaderWarning.WARNING_HEADER_PATTERN))); final List actualWarningValues = deprecatedWarnings.stream() .map(s -> HeaderWarning.extractWarningValueFromWarningHeader(s, true)) @@ -295,7 +295,7 @@ private void doTestDeprecationWarningsAppearInHeaders() throws Exception { headerMatchers.add(equalTo(TestDeprecationHeaderRestAction.DEPRECATED_USAGE)); } - assertThat(deprecatedWarnings, everyItem(matches(HeaderWarning.WARNING_HEADER_PATTERN.pattern()))); + assertThat(deprecatedWarnings, everyItem(matchesRegex(HeaderWarning.WARNING_HEADER_PATTERN))); final List actualWarningValues = deprecatedWarnings.stream() .map(s -> HeaderWarning.extractWarningValueFromWarningHeader(s, true)) .collect(Collectors.toList()); diff --git a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleDisruptionIT.java b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleDisruptionIT.java index 76cc8308a4703..65a4d84e921a2 100644 --- a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleDisruptionIT.java +++ b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleDisruptionIT.java @@ -57,6 +57,7 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { return settings.build(); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105577") @TestLogging(value = "org.elasticsearch.datastreams.lifecycle:TRACE", reason = "debugging") public void testDataStreamLifecycleDownsampleRollingRestart() throws Exception { final InternalTestCluster cluster = internalCluster(); diff --git a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleClusterDisruptionIT.java b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleClusterDisruptionIT.java deleted file mode 100644 index dc915738f6d13..0000000000000 --- a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleClusterDisruptionIT.java +++ /dev/null @@ -1,443 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.downsample; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.action.DocWriteRequest; -import org.elasticsearch.action.admin.indices.get.GetIndexRequest; -import org.elasticsearch.action.admin.indices.get.GetIndexResponse; -import org.elasticsearch.action.bulk.BulkItemResponse; -import org.elasticsearch.action.bulk.BulkRequestBuilder; -import org.elasticsearch.action.bulk.BulkResponse; -import org.elasticsearch.action.downsample.DownsampleAction; -import org.elasticsearch.action.downsample.DownsampleConfig; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.support.WriteRequest; -import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.time.DateFormatter; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.index.IndexMode; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.engine.VersionConflictEngineException; -import org.elasticsearch.index.mapper.DateFieldMapper; -import org.elasticsearch.index.query.MatchAllQueryBuilder; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.InternalTestCluster; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentFactory; -import org.elasticsearch.xpack.aggregatemetric.AggregateMetricMapperPlugin; -import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; - -import java.io.IOException; -import java.time.Instant; -import java.time.LocalDateTime; -import java.time.ZoneId; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Locale; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import java.util.function.Consumer; - -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; -import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.xpack.core.rollup.ConfigTestHelpers.randomInterval; - -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 4) -public class DownsampleClusterDisruptionIT extends ESIntegTestCase { - private static final Logger logger = LogManager.getLogger(DownsampleClusterDisruptionIT.class); - private static final DateFormatter DATE_FORMATTER = DateFormatter.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSXXX"); - private static final TimeValue TIMEOUT = new TimeValue(1, TimeUnit.MINUTES); - public static final String FIELD_TIMESTAMP = "@timestamp"; - public static final String FIELD_DIMENSION_1 = "dimension_kw"; - public static final String FIELD_DIMENSION_2 = "dimension_long"; - public static final String FIELD_METRIC_COUNTER = "counter"; - public static final int DOC_COUNT = 10_000; - - @Override - protected Collection> nodePlugins() { - return List.of(LocalStateCompositeXPackPlugin.class, Downsample.class, AggregateMetricMapperPlugin.class); - } - - interface DisruptionListener { - void disruptionStart(); - - void disruptionEnd(); - } - - private class Disruptor implements Runnable { - final InternalTestCluster cluster; - private final String sourceIndex; - private final DisruptionListener listener; - private final String clientNode; - private final Consumer disruption; - - private Disruptor( - final InternalTestCluster cluster, - final String sourceIndex, - final DisruptionListener listener, - final String clientNode, - final Consumer disruption - ) { - this.cluster = cluster; - this.sourceIndex = sourceIndex; - this.listener = listener; - this.clientNode = clientNode; - this.disruption = disruption; - } - - @Override - public void run() { - listener.disruptionStart(); - try { - final String candidateNode = cluster.client(clientNode) - .admin() - .cluster() - .prepareSearchShards(sourceIndex) - .get() - .getNodes()[0].getName(); - logger.info("Candidate node [" + candidateNode + "]"); - disruption.accept(candidateNode); - ensureGreen(TimeValue.timeValueSeconds(60), sourceIndex); - ensureStableCluster(cluster.numDataAndMasterNodes(), clientNode); - - } catch (Exception e) { - logger.error("Ignoring Error while injecting disruption [" + e.getMessage() + "]"); - } finally { - listener.disruptionEnd(); - } - } - } - - public void setup(final String sourceIndex, int numOfShards, int numOfReplicas, long startTime) throws IOException { - final Settings.Builder settings = indexSettings(numOfShards, numOfReplicas).put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES) - .putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), List.of(FIELD_DIMENSION_1)) - .put( - IndexSettings.TIME_SERIES_START_TIME.getKey(), - DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(Instant.ofEpochMilli(startTime).toEpochMilli()) - ) - .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), "2106-01-08T23:40:53.384Z"); - - if (randomBoolean()) { - settings.put(IndexMetadata.SETTING_INDEX_HIDDEN, randomBoolean()); - } - - final XContentBuilder mapping = jsonBuilder().startObject().startObject("_doc").startObject("properties"); - mapping.startObject(FIELD_TIMESTAMP).field("type", "date").endObject(); - - mapping.startObject(FIELD_DIMENSION_1).field("type", "keyword").field("time_series_dimension", true).endObject(); - mapping.startObject(FIELD_DIMENSION_2).field("type", "long").field("time_series_dimension", true).endObject(); - - mapping.startObject(FIELD_METRIC_COUNTER) - .field("type", "double") /* numeric label indexed as a metric */ - .field("time_series_metric", "counter") - .endObject(); - - mapping.endObject().endObject().endObject(); - assertAcked(indicesAdmin().prepareCreate(sourceIndex).setSettings(settings.build()).setMapping(mapping).get()); - } - - public void testDownsampleIndexWithDataNodeRestart() throws Exception { - final InternalTestCluster cluster = internalCluster(); - final List masterNodes = cluster.startMasterOnlyNodes(1); - cluster.startDataOnlyNodes(3); - ensureStableCluster(cluster.size()); - ensureGreen(); - - final String sourceIndex = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); - final String targetIndex = randomAlphaOfLength(11).toLowerCase(Locale.ROOT); - long startTime = LocalDateTime.parse("2020-09-09T18:00:00").atZone(ZoneId.of("UTC")).toInstant().toEpochMilli(); - setup(sourceIndex, 1, 0, startTime); - final DownsampleConfig config = new DownsampleConfig(randomInterval()); - final DownsampleActionSingleNodeTests.SourceSupplier sourceSupplier = () -> { - final String ts = randomDateForInterval(config.getInterval(), startTime); - double counterValue = DATE_FORMATTER.parseMillis(ts); - final List dimensionValues = new ArrayList<>(5); - for (int j = 0; j < randomIntBetween(1, 5); j++) { - dimensionValues.add(randomAlphaOfLength(6)); - } - return XContentFactory.jsonBuilder() - .startObject() - .field(FIELD_TIMESTAMP, ts) - .field(FIELD_DIMENSION_1, randomFrom(dimensionValues)) - .field(FIELD_DIMENSION_2, randomIntBetween(1, 10)) - .field(FIELD_METRIC_COUNTER, counterValue) - .endObject(); - }; - int indexedDocs = bulkIndex(sourceIndex, sourceSupplier, DOC_COUNT); - prepareSourceIndex(sourceIndex); - final CountDownLatch disruptionStart = new CountDownLatch(1); - final CountDownLatch disruptionEnd = new CountDownLatch(1); - - new Thread(new Disruptor(cluster, sourceIndex, new DisruptionListener() { - @Override - public void disruptionStart() { - disruptionStart.countDown(); - } - - @Override - public void disruptionEnd() { - disruptionEnd.countDown(); - } - }, masterNodes.get(0), (node) -> { - try { - cluster.restartNode(node, new InternalTestCluster.RestartCallback() { - @Override - public boolean validateClusterForming() { - return true; - } - }); - } catch (Exception e) { - throw new RuntimeException(e); - } - })).start(); - startDownsampleTaskDuringDisruption(sourceIndex, targetIndex, config, disruptionStart, disruptionEnd); - waitUntil(() -> getClusterPendingTasks(cluster.client()).pendingTasks().isEmpty()); - ensureStableCluster(cluster.numDataAndMasterNodes()); - assertTargetIndex(cluster, sourceIndex, targetIndex, indexedDocs); - } - - public void testDownsampleIndexWithRollingRestart() throws Exception { - final InternalTestCluster cluster = internalCluster(); - final List masterNodes = cluster.startMasterOnlyNodes(1); - cluster.startDataOnlyNodes(3); - ensureStableCluster(cluster.size()); - ensureGreen(); - - final String sourceIndex = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); - final String targetIndex = randomAlphaOfLength(11).toLowerCase(Locale.ROOT); - long startTime = LocalDateTime.parse("2020-09-09T18:00:00").atZone(ZoneId.of("UTC")).toInstant().toEpochMilli(); - setup(sourceIndex, 1, 0, startTime); - final DownsampleConfig config = new DownsampleConfig(randomInterval()); - final DownsampleActionSingleNodeTests.SourceSupplier sourceSupplier = () -> { - final String ts = randomDateForInterval(config.getInterval(), startTime); - double counterValue = DATE_FORMATTER.parseMillis(ts); - final List dimensionValues = new ArrayList<>(5); - for (int j = 0; j < randomIntBetween(1, 5); j++) { - dimensionValues.add(randomAlphaOfLength(6)); - } - return XContentFactory.jsonBuilder() - .startObject() - .field(FIELD_TIMESTAMP, ts) - .field(FIELD_DIMENSION_1, randomFrom(dimensionValues)) - .field(FIELD_DIMENSION_2, randomIntBetween(1, 10)) - .field(FIELD_METRIC_COUNTER, counterValue) - .endObject(); - }; - int indexedDocs = bulkIndex(sourceIndex, sourceSupplier, DOC_COUNT); - prepareSourceIndex(sourceIndex); - final CountDownLatch disruptionStart = new CountDownLatch(1); - final CountDownLatch disruptionEnd = new CountDownLatch(1); - - new Thread(new Disruptor(cluster, sourceIndex, new DisruptionListener() { - @Override - public void disruptionStart() { - disruptionStart.countDown(); - } - - @Override - public void disruptionEnd() { - disruptionEnd.countDown(); - } - }, masterNodes.get(0), (ignored) -> { - try { - cluster.rollingRestart(new InternalTestCluster.RestartCallback() { - @Override - public boolean validateClusterForming() { - return true; - } - }); - } catch (Exception e) { - throw new RuntimeException(e); - } - })).start(); - - startDownsampleTaskDuringDisruption(sourceIndex, targetIndex, config, disruptionStart, disruptionEnd); - waitUntil(() -> getClusterPendingTasks(cluster.client()).pendingTasks().isEmpty()); - ensureStableCluster(cluster.numDataAndMasterNodes()); - assertTargetIndex(cluster, sourceIndex, targetIndex, indexedDocs); - } - - /** - * Starts a downsample operation. - * - * @param sourceIndex the idex to read data from - * @param targetIndex the idnex to write downsampled data to - * @param config the downsample configuration including the downsample granularity - * @param disruptionStart a latch to synchronize on the disruption starting - * @param disruptionEnd a latch to synchronize on the disruption ending - * @throws InterruptedException if the thread is interrupted while waiting - */ - private void startDownsampleTaskDuringDisruption( - final String sourceIndex, - final String targetIndex, - final DownsampleConfig config, - final CountDownLatch disruptionStart, - final CountDownLatch disruptionEnd - ) throws Exception { - disruptionStart.await(); - assertBusy(() -> { - try { - downsample(sourceIndex, targetIndex, config); - } catch (Exception e) { - throw new AssertionError(e); - } - }, 120, TimeUnit.SECONDS); - disruptionEnd.await(); - } - - public void testDownsampleIndexWithFullClusterRestart() throws Exception { - final InternalTestCluster cluster = internalCluster(); - final List masterNodes = cluster.startMasterOnlyNodes(1); - cluster.startDataOnlyNodes(3); - ensureStableCluster(cluster.size()); - ensureGreen(); - - final String sourceIndex = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); - final String downsampleIndex = randomAlphaOfLength(11).toLowerCase(Locale.ROOT); - long startTime = LocalDateTime.parse("2020-09-09T18:00:00").atZone(ZoneId.of("UTC")).toInstant().toEpochMilli(); - setup(sourceIndex, 1, 0, startTime); - final DownsampleConfig config = new DownsampleConfig(randomInterval()); - final DownsampleActionSingleNodeTests.SourceSupplier sourceSupplier = () -> { - final String ts = randomDateForInterval(config.getInterval(), startTime); - double counterValue = DATE_FORMATTER.parseMillis(ts); - final List dimensionValues = new ArrayList<>(5); - for (int j = 0; j < randomIntBetween(1, 5); j++) { - dimensionValues.add(randomAlphaOfLength(6)); - } - return XContentFactory.jsonBuilder() - .startObject() - .field(FIELD_TIMESTAMP, ts) - .field(FIELD_DIMENSION_1, randomFrom(dimensionValues)) - .field(FIELD_DIMENSION_2, randomIntBetween(1, 10)) - .field(FIELD_METRIC_COUNTER, counterValue) - .endObject(); - }; - int indexedDocs = bulkIndex(sourceIndex, sourceSupplier, DOC_COUNT); - prepareSourceIndex(sourceIndex); - final CountDownLatch disruptionStart = new CountDownLatch(1); - final CountDownLatch disruptionEnd = new CountDownLatch(1); - - new Thread(new Disruptor(cluster, sourceIndex, new DisruptionListener() { - @Override - public void disruptionStart() { - disruptionStart.countDown(); - } - - @Override - public void disruptionEnd() { - disruptionEnd.countDown(); - } - }, masterNodes.get(0), (ignored) -> { - try { - cluster.fullRestart(new InternalTestCluster.RestartCallback() { - @Override - public boolean validateClusterForming() { - return true; - } - }); - } catch (Exception e) { - throw new RuntimeException(e); - } - })).start(); - - startDownsampleTaskDuringDisruption(sourceIndex, downsampleIndex, config, disruptionStart, disruptionEnd); - waitUntil(() -> getClusterPendingTasks(cluster.client()).pendingTasks().isEmpty()); - ensureStableCluster(cluster.numDataAndMasterNodes()); - assertTargetIndex(cluster, sourceIndex, downsampleIndex, indexedDocs); - } - - private void assertTargetIndex(final InternalTestCluster cluster, final String sourceIndex, final String targetIndex, int indexedDocs) { - final GetIndexResponse getIndexResponse = cluster.client() - .admin() - .indices() - .getIndex(new GetIndexRequest().indices(targetIndex)) - .actionGet(); - assertEquals(1, getIndexResponse.indices().length); - assertResponse( - cluster.client() - .prepareSearch(sourceIndex) - .setQuery(new MatchAllQueryBuilder()) - .setSize(Math.min(DOC_COUNT, indexedDocs)) - .setTrackTotalHitsUpTo(Integer.MAX_VALUE), - sourceIndexSearch -> { - assertEquals(indexedDocs, sourceIndexSearch.getHits().getHits().length); - } - ); - assertResponse( - cluster.client() - .prepareSearch(targetIndex) - .setQuery(new MatchAllQueryBuilder()) - .setSize(Math.min(DOC_COUNT, indexedDocs)) - .setTrackTotalHitsUpTo(Integer.MAX_VALUE), - targetIndexSearch -> { - assertTrue(targetIndexSearch.getHits().getHits().length > 0); - } - ); - } - - private int bulkIndex(final String indexName, final DownsampleActionSingleNodeTests.SourceSupplier sourceSupplier, int docCount) - throws IOException { - BulkRequestBuilder bulkRequestBuilder = internalCluster().client().prepareBulk(); - bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); - for (int i = 0; i < docCount; i++) { - IndexRequest indexRequest = new IndexRequest(indexName).opType(DocWriteRequest.OpType.CREATE); - XContentBuilder source = sourceSupplier.get(); - indexRequest.source(source); - bulkRequestBuilder.add(indexRequest); - } - BulkResponse bulkResponse = bulkRequestBuilder.get(); - int duplicates = 0; - for (BulkItemResponse response : bulkResponse.getItems()) { - if (response.isFailed()) { - if (response.getFailure().getCause() instanceof VersionConflictEngineException) { - // A duplicate event was created by random generator. We should not fail for this - // reason. - logger.debug("We tried to insert a duplicate: [{}]", response.getFailureMessage()); - duplicates++; - } else { - fail("Failed to index data: " + bulkResponse.buildFailureMessage()); - } - } - } - int docsIndexed = docCount - duplicates; - logger.info("Indexed [{}] documents. Dropped [{}] duplicates.", docsIndexed, duplicates); - return docsIndexed; - } - - private void prepareSourceIndex(String sourceIndex) { - // Set the source index to read-only state - assertAcked( - indicesAdmin().prepareUpdateSettings(sourceIndex) - .setSettings(Settings.builder().put(IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.getKey(), true).build()) - ); - } - - private void downsample(final String sourceIndex, final String downsampleIndex, final DownsampleConfig config) { - assertAcked( - internalCluster().client() - .execute(DownsampleAction.INSTANCE, new DownsampleAction.Request(sourceIndex, downsampleIndex, TIMEOUT, config)) - .actionGet(TIMEOUT) - ); - } - - private String randomDateForInterval(final DateHistogramInterval interval, final long startTime) { - long endTime = startTime + 10 * interval.estimateMillis(); - return randomDateForRange(startTime, endTime); - } - - private String randomDateForRange(long start, long end) { - return DATE_FORMATTER.formatMillis(randomLongBetween(start, end)); - } -} diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java index 74e77c2896588..5debe5d2edfc9 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java @@ -448,6 +448,7 @@ public void onFailure(Exception e) { persistentTaskId, DownsampleShardTask.TASK_NAME, params, + null, ActionListener.wrap( startedTask -> persistentTasksService.waitForPersistentTaskCondition( startedTask.getId(), diff --git a/x-pack/plugin/ent-search/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/application/FullClusterRestartIT.java b/x-pack/plugin/ent-search/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/application/FullClusterRestartIT.java index 6b1b6fc886825..95b3b576eb46c 100644 --- a/x-pack/plugin/ent-search/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/application/FullClusterRestartIT.java +++ b/x-pack/plugin/ent-search/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/application/FullClusterRestartIT.java @@ -11,6 +11,7 @@ import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.rest.ObjectPath; @@ -22,12 +23,19 @@ import java.util.List; import static org.elasticsearch.Version.V_8_12_0; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; public class FullClusterRestartIT extends ParameterizedFullClusterRestartTestCase { - + // DSL was introduced with version 8.12.0 of ES. private static final Version DSL_DEFAULT_RETENTION_VERSION = V_8_12_0; + // DSL was introduced with the version 3 of the registry. + private static final int DSL_REGISTRY_VERSION = 3; + // Legacy name we used for ILM policy configuration in versions prior to 8.12.0. + private static final String EVENT_DATA_STREAM_LEGACY_TEMPLATE_NAME = "behavioral_analytics-events-default"; + + // Event data streams template name. private static final String EVENT_DATA_STREAM_LEGACY_ILM_POLICY_NAME = "behavioral_analytics-events-default_policy"; @ClassRule @@ -36,6 +44,7 @@ public class FullClusterRestartIT extends ParameterizedFullClusterRestartTestCas .version(getOldClusterTestVersion()) .nodes(2) .setting("xpack.security.enabled", "false") + .setting("xpack.license.self_generated.type", "trial") .module("x-pack-ent-search") .build(); @@ -48,7 +57,6 @@ protected ElasticsearchCluster getUpgradeCluster() { return cluster; } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/104470") public void testBehavioralAnalyticsDataRetention() throws Exception { assumeTrue( "Data retention changed by default to DSL in " + DSL_DEFAULT_RETENTION_VERSION, @@ -59,26 +67,32 @@ public void testBehavioralAnalyticsDataRetention() throws Exception { String newAnalyticsCollectionName = "newstuff"; if (isRunningAgainstOldCluster()) { + // Ensure index template is installed before executing the tests. + assertBusy(() -> assertDataStreamTemplateExists(EVENT_DATA_STREAM_LEGACY_TEMPLATE_NAME)); + // Create an analytics collection Request legacyPutRequest = new Request("PUT", "_application/analytics/" + legacyAnalyticsCollectionName); assertOK(client().performRequest(legacyPutRequest)); // Validate that ILM lifecycle is in place - assertBusy(() -> assertLegacyDataRetentionPolicy(legacyAnalyticsCollectionName)); + assertBusy(() -> assertUsingLegacyDataRetentionPolicy(legacyAnalyticsCollectionName)); } else { + // Ensure index template is updated to version 3 before executing the tests. + assertBusy(() -> assertDataStreamTemplateExists(EVENT_DATA_STREAM_LEGACY_TEMPLATE_NAME, DSL_REGISTRY_VERSION)); + // Create a new analytics collection Request putRequest = new Request("PUT", "_application/analytics/" + newAnalyticsCollectionName); assertOK(client().performRequest(putRequest)); // Validate that NO ILM lifecycle is in place and we are using DLS instead. - assertBusy(() -> assertDslDataRetention(newAnalyticsCollectionName)); + assertBusy(() -> assertUsingDslDataRetention(newAnalyticsCollectionName)); // Validate that the existing analytics collection created with an older version is still using ILM - assertBusy(() -> assertLegacyDataRetentionPolicy(legacyAnalyticsCollectionName)); + assertBusy(() -> assertUsingLegacyDataRetentionPolicy(legacyAnalyticsCollectionName)); } } - private void assertLegacyDataRetentionPolicy(String analyticsCollectionName) throws IOException { + private void assertUsingLegacyDataRetentionPolicy(String analyticsCollectionName) throws IOException { String dataStreamName = "behavioral_analytics-events-" + analyticsCollectionName; Request getDataStreamRequest = new Request("GET", "_data_stream/" + dataStreamName); Response response = client().performRequest(getDataStreamRequest); @@ -93,7 +107,7 @@ private void assertLegacyDataRetentionPolicy(String analyticsCollectionName) thr assertNotNull(policy.evaluate(EVENT_DATA_STREAM_LEGACY_ILM_POLICY_NAME)); } - private void assertDslDataRetention(String analyticsCollectionName) throws IOException { + private void assertUsingDslDataRetention(String analyticsCollectionName) throws IOException { String dataStreamName = "behavioral_analytics-events-" + analyticsCollectionName; Request getDataStreamRequest = new Request("GET", "_data_stream/" + dataStreamName); Response response = client().performRequest(getDataStreamRequest); @@ -105,13 +119,36 @@ private void assertDslDataRetention(String analyticsCollectionName) throws IOExc for (Object dataStreamObj : dataStreams) { ObjectPath dataStream = new ObjectPath(dataStreamObj); if (dataStreamName.equals(dataStream.evaluate("name"))) { - assertNull(dataStream.evaluate("ilm_policy")); assertEquals(true, dataStream.evaluate("lifecycle.enabled")); assertEquals("180d", dataStream.evaluate("lifecycle.data_retention")); + assertEquals("Data stream lifecycle", dataStream.evaluate("next_generation_managed_by")); + assertEquals(false, dataStream.evaluate("prefer_ilm")); evaluatedNewDataStream = true; } } assertTrue(evaluatedNewDataStream); + } + private void assertDataStreamTemplateExists(String templateName) throws IOException { + assertDataStreamTemplateExists(templateName, null); + } + + private void assertDataStreamTemplateExists(String templateName, Integer minVersion) throws IOException { + try { + Request getIndexTemplateRequest = new Request("GET", "_index_template/" + templateName); + Response response = client().performRequest(getIndexTemplateRequest); + assertOK(response); + if (minVersion != null) { + String pathToVersion = "index_templates.0.index_template.version"; + ObjectPath indexTemplatesResponse = ObjectPath.createFromResponse(response); + assertThat(indexTemplatesResponse.evaluate(pathToVersion), greaterThanOrEqualTo(minVersion)); + } + } catch (ResponseException e) { + int status = e.getResponse().getStatusLine().getStatusCode(); + if (status == 404) { + throw new AssertionError("Waiting for the template to be created"); + } + throw e; + } } } diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/332_connector_update_filtering.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/332_connector_update_filtering.yml index a693ba5431d4b..abb43806ec793 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/332_connector_update_filtering.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/332_connector_update_filtering.yml @@ -13,7 +13,7 @@ setup: is_native: false service_type: super-connector --- -"Update Connector Filtering": +"Update Connector Filtering with advanced snippet value array": - do: connector.update_filtering: connector_id: test-connector @@ -107,6 +107,7 @@ setup: - match: { filtering.0.domain: DEFAULT } - match: { filtering.0.active.advanced_snippet.created_at: "2023-05-25T12:30:00.000Z" } + - match: { filtering.0.active.advanced_snippet.value.0.tables.0.: "some_table" } - match: { filtering.0.active.rules.0.id: "RULE-ACTIVE-0" } - match: { filtering.0.draft.rules.0.id: "RULE-DRAFT-0" } @@ -115,6 +116,181 @@ setup: - match: { filtering.1.active.rules.0.id: "RULE-ACTIVE-1" } - match: { filtering.1.draft.rules.0.id: "RULE-DRAFT-1" } +--- +"Update Connector Filtering with advanced snippet value object": + - do: + connector.update_filtering: + connector_id: test-connector + body: + filtering: + - active: + advanced_snippet: + created_at: "2023-05-25T12:30:00.000Z" + updated_at: "2023-05-25T12:30:00.000Z" + value: + some_filtering_key: "some_filtering_value" + rules: + - created_at: "2023-05-25T12:30:00.000Z" + field: _ + id: RULE-ACTIVE-0 + order: 0 + policy: include + rule: regex + updated_at: "2023-05-25T12:30:00.000Z" + value: ".*" + validation: + errors: [] + state: valid + domain: DEFAULT + draft: + advanced_snippet: + created_at: "2023-05-25T12:30:00.000Z" + updated_at: "2023-05-25T12:30:00.000Z" + value: {} + rules: + - created_at: "2023-05-25T12:30:00.000Z" + field: _ + id: RULE-DRAFT-0 + order: 0 + policy: include + rule: regex + updated_at: "2023-05-25T12:30:00.000Z" + value: ".*" + validation: + errors: [] + state: valid + - active: + advanced_snippet: + created_at: "2021-05-25T12:30:00.000Z" + updated_at: "2021-05-25T12:30:00.000Z" + value: {} + rules: + - created_at: "2021-05-25T12:30:00.000Z" + field: _ + id: RULE-ACTIVE-1 + order: 0 + policy: include + rule: regex + updated_at: "2021-05-25T12:30:00.000Z" + value: ".*" + validation: + errors: [] + state: valid + domain: TEST + draft: + advanced_snippet: + created_at: "2021-05-25T12:30:00.000Z" + updated_at: "2021-05-25T12:30:00.000Z" + value: {} + rules: + - created_at: "2021-05-25T12:30:00.000Z" + field: _ + id: RULE-DRAFT-1 + order: 0 + policy: exclude + rule: regex + updated_at: "2021-05-25T12:30:00.000Z" + value: ".*" + validation: + errors: [] + state: valid + + - match: { result: updated } + + - do: + connector.get: + connector_id: test-connector + + - match: { filtering.0.domain: DEFAULT } + - match: { filtering.0.active.advanced_snippet.created_at: "2023-05-25T12:30:00.000Z" } + - match: { filtering.0.active.advanced_snippet.value.some_filtering_key: "some_filtering_value" } + - match: { filtering.0.active.rules.0.id: "RULE-ACTIVE-0" } + - match: { filtering.0.draft.rules.0.id: "RULE-DRAFT-0" } + + - match: { filtering.1.domain: TEST } + - match: { filtering.1.active.advanced_snippet.created_at: "2021-05-25T12:30:00.000Z" } + - match: { filtering.1.active.rules.0.id: "RULE-ACTIVE-1" } + - match: { filtering.1.draft.rules.0.id: "RULE-DRAFT-1" } + +--- +"Update Connector Filtering with value literal - Wrong advanced snippet value": + - do: + catch: "bad_request" + connector.update_filtering: + connector_id: test-connector + body: + filtering: + - active: + advanced_snippet: + created_at: "2023-05-25T12:30:00.000Z" + updated_at: "2023-05-25T12:30:00.000Z" + value: "string literal" + rules: + - created_at: "2023-05-25T12:30:00.000Z" + field: _ + id: RULE-ACTIVE-0 + order: 0 + policy: include + rule: regex + updated_at: "2023-05-25T12:30:00.000Z" + value: ".*" + validation: + errors: [] + state: valid + domain: DEFAULT + draft: + advanced_snippet: + created_at: "2023-05-25T12:30:00.000Z" + updated_at: "2023-05-25T12:30:00.000Z" + value: {} + rules: + - created_at: "2023-05-25T12:30:00.000Z" + field: _ + id: RULE-DRAFT-0 + order: 0 + policy: include + rule: regex + updated_at: "2023-05-25T12:30:00.000Z" + value: ".*" + validation: + errors: [] + state: valid + - active: + advanced_snippet: + created_at: "2021-05-25T12:30:00.000Z" + updated_at: "2021-05-25T12:30:00.000Z" + value: {} + rules: + - created_at: "2021-05-25T12:30:00.000Z" + field: _ + id: RULE-ACTIVE-1 + order: 0 + policy: include + rule: regex + updated_at: "2021-05-25T12:30:00.000Z" + value: ".*" + validation: + errors: [] + state: valid + domain: TEST + draft: + advanced_snippet: + created_at: "2021-05-25T12:30:00.000Z" + updated_at: "2021-05-25T12:30:00.000Z" + value: {} + rules: + - created_at: "2021-05-25T12:30:00.000Z" + field: _ + id: RULE-DRAFT-1 + order: 0 + policy: exclude + rule: regex + updated_at: "2021-05-25T12:30:00.000Z" + value: ".*" + validation: + errors: [] + state: valid + --- "Update Connector Filtering - Connector doesn't exist": - do: diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/430_connector_sync_job_cancel.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/430_connector_sync_job_cancel.yml index 633c1a8cecb7b..eea4ca197614d 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/430_connector_sync_job_cancel.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/430_connector_sync_job_cancel.yml @@ -13,7 +13,7 @@ setup: service_type: super-connector --- -"Cancel a Connector Sync Job": +"Cancel a pending Connector Sync Job - transition to canceled directly": - do: connector_sync_job.post: body: @@ -33,8 +33,31 @@ setup: connector_sync_job.get: connector_sync_job_id: $sync-job-id-to-cancel - - match: { status: "canceling"} + - set: { cancelation_requested_at: cancelation_requested_at } + - match: { status: "canceled"} + - match: { completed_at: $cancelation_requested_at } + - match: { canceled_at: $cancelation_requested_at } + +--- +"Cancel a canceled Connector Sync Job - invalid state transition from canceled to canceling": + - do: + connector_sync_job.post: + body: + id: test-connector + job_type: full + trigger_method: on_demand + + - set: { id: sync-job-id-to-cancel } + + - do: + connector_sync_job.cancel: + connector_sync_job_id: $sync-job-id-to-cancel + + - do: + catch: bad_request + connector_sync_job.cancel: + connector_sync_job_id: $sync-job-id-to-cancel --- "Cancel a Connector Sync Job - Connector Sync Job does not exist": diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/450_connector_sync_job_error.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/450_connector_sync_job_error.yml index a565d28c3e788..78cfdb845b10e 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/450_connector_sync_job_error.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/450_connector_sync_job_error.yml @@ -13,7 +13,7 @@ setup: service_type: super-connector --- -"Set an error for a connector sync job": +"Set an error for a pending connector sync job - invalid state transition from pending to error": - do: connector_sync_job.post: body: @@ -24,21 +24,12 @@ setup: - set: { id: id } - do: + catch: bad_request connector_sync_job.error: connector_sync_job_id: $id body: error: error - - match: { result: updated } - - - do: - connector_sync_job.get: - connector_sync_job_id: $id - - - match: { error: error } - - match: { status: error } - - --- "Set an error for a Connector Sync Job - Connector Sync Job does not exist": - do: diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/470_connector_sync_job_list.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/470_connector_sync_job_list.yml index 8d23850f49840..82d9a18bb51e9 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/470_connector_sync_job_list.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/470_connector_sync_job_list.yml @@ -50,10 +50,10 @@ setup: - match: { count: 3 } - # Ascending order by creation_date for results - - match: { results.0.id: $sync-job-one-id } + # Descending order by creation_date for results + - match: { results.0.id: $sync-job-three-id } - match: { results.1.id: $sync-job-two-id } - - match: { results.2.id: $sync-job-three-id } + - match: { results.2.id: $sync-job-one-id } --- "List Connector Sync Jobs - with from": @@ -84,9 +84,9 @@ setup: - match: { count: 3 } - # Ascending order by creation_date for results + # Descending order by creation_date for results - match: { results.0.id: $sync-job-two-id } - - match: { results.1.id: $sync-job-three-id } + - match: { results.1.id: $sync-job-one-id } --- "List Connector Sync Jobs - with size": @@ -117,7 +117,8 @@ setup: - match: { count: 3 } - - match: { results.0.id: $sync-job-one-id } + # Descending order by creation_date for results + - match: { results.0.id: $sync-job-three-id } --- "List Connector Sync Jobs - Get pending jobs": @@ -216,9 +217,11 @@ setup: connector_sync_job.list: connector_id: connector-one job_type: full,incremental + + # Descending order by creation_date for results - match: { count: 2 } - - match: { results.0.id: $sync-job-one-id } - - match: { results.1.id: $sync-job-two-id } + - match: { results.0.id: $sync-job-two-id } + - match: { results.1.id: $sync-job-one-id } --- "List Connector Sync Jobs - with invalid job type": diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorConfiguration.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorConfiguration.java index 3b0254ef6ffcf..fc2c0920f49df 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorConfiguration.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorConfiguration.java @@ -30,7 +30,9 @@ import org.elasticsearch.xpack.application.connector.configuration.ConfigurationValidation; import java.io.IOException; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.Objects; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; @@ -217,6 +219,62 @@ public ConnectorConfiguration(StreamInput in) throws IOException { ); } + public String getCategory() { + return category; + } + + public Object getDefaultValue() { + return defaultValue; + } + + public List getDependsOn() { + return dependsOn; + } + + public ConfigurationDisplayType getDisplay() { + return display; + } + + public String getLabel() { + return label; + } + + public List getOptions() { + return options; + } + + public Integer getOrder() { + return order; + } + + public String getPlaceholder() { + return placeholder; + } + + public boolean isRequired() { + return required; + } + + public boolean isSensitive() { + return sensitive; + } + + public String getTooltip() { + return tooltip; + } + + public ConfigurationFieldType getType() { + return type; + } + + public List getUiRestrictions() { + return uiRestrictions; + } + + public List getValidations() { + return validations; + } + public Object getValue() { return value; } @@ -320,6 +378,46 @@ public void writeTo(StreamOutput out) throws IOException { out.writeGenericValue(value); } + public Map toMap() { + Map map = new HashMap<>(); + if (category != null) { + map.put(CATEGORY_FIELD.getPreferredName(), category); + } + map.put(DEFAULT_VALUE_FIELD.getPreferredName(), defaultValue); + if (dependsOn != null) { + map.put(DEPENDS_ON_FIELD.getPreferredName(), dependsOn.stream().map(ConfigurationDependency::toMap).toList()); + } + if (display != null) { + map.put(DISPLAY_FIELD.getPreferredName(), display.toString()); + } + map.put(LABEL_FIELD.getPreferredName(), label); + if (options != null) { + map.put(OPTIONS_FIELD.getPreferredName(), options.stream().map(ConfigurationSelectOption::toMap).toList()); + } + if (order != null) { + map.put(ORDER_FIELD.getPreferredName(), order); + } + if (placeholder != null) { + map.put(PLACEHOLDER_FIELD.getPreferredName(), placeholder); + } + map.put(REQUIRED_FIELD.getPreferredName(), required); + map.put(SENSITIVE_FIELD.getPreferredName(), sensitive); + if (tooltip != null) { + map.put(TOOLTIP_FIELD.getPreferredName(), tooltip); + } + if (type != null) { + map.put(TYPE_FIELD.getPreferredName(), type.toString()); + } + if (uiRestrictions != null) { + map.put(UI_RESTRICTIONS_FIELD.getPreferredName(), uiRestrictions); + } + if (validations != null) { + map.put(VALIDATIONS_FIELD.getPreferredName(), validations.stream().map(ConfigurationValidation::toMap).toList()); + } + map.put(VALUE_FIELD.getPreferredName(), value); + return map; + } + @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorFeatures.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorFeatures.java index 51aa110342fe9..bbb8805de1f0f 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorFeatures.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorFeatures.java @@ -41,28 +41,33 @@ public class ConnectorFeatures implements Writeable, ToXContentObject { @Nullable private final FeatureEnabled incrementalSyncEnabled; @Nullable + private final FeatureEnabled nativeConnectorAPIKeysEnabled; + @Nullable private final SyncRulesFeatures syncRulesFeatures; /** * Constructs a new instance of ConnectorFeatures. * - * @param documentLevelSecurityEnabled A flag indicating whether document-level security is enabled. - * @param filteringAdvancedConfig A flag indicating whether advanced filtering configuration is enabled. - * @param filteringRules A flag indicating whether filtering rules are enabled. - * @param incrementalSyncEnabled A flag indicating whether incremental sync is enabled. - * @param syncRulesFeatures An {@link SyncRulesFeatures} object indicating whether basic and advanced sync rules are enabled. + * @param documentLevelSecurityEnabled A flag indicating whether document-level security is enabled. + * @param filteringAdvancedConfig A flag indicating whether advanced filtering configuration is enabled. + * @param filteringRules A flag indicating whether filtering rules are enabled. + * @param incrementalSyncEnabled A flag indicating whether incremental sync is enabled. + * @param nativeConnectorAPIKeysEnabled A flag indicating whether support for api keys is enabled for native connectors. + * @param syncRulesFeatures An {@link SyncRulesFeatures} object indicating if basic and advanced sync rules are enabled. */ private ConnectorFeatures( FeatureEnabled documentLevelSecurityEnabled, Boolean filteringAdvancedConfig, Boolean filteringRules, FeatureEnabled incrementalSyncEnabled, + FeatureEnabled nativeConnectorAPIKeysEnabled, SyncRulesFeatures syncRulesFeatures ) { this.documentLevelSecurityEnabled = documentLevelSecurityEnabled; this.filteringAdvancedConfigEnabled = filteringAdvancedConfig; this.filteringRulesEnabled = filteringRules; this.incrementalSyncEnabled = incrementalSyncEnabled; + this.nativeConnectorAPIKeysEnabled = nativeConnectorAPIKeysEnabled; this.syncRulesFeatures = syncRulesFeatures; } @@ -71,6 +76,7 @@ public ConnectorFeatures(StreamInput in) throws IOException { this.filteringAdvancedConfigEnabled = in.readOptionalBoolean(); this.filteringRulesEnabled = in.readOptionalBoolean(); this.incrementalSyncEnabled = in.readOptionalWriteable(FeatureEnabled::new); + this.nativeConnectorAPIKeysEnabled = in.readOptionalWriteable(FeatureEnabled::new); this.syncRulesFeatures = in.readOptionalWriteable(SyncRulesFeatures::new); } @@ -78,19 +84,19 @@ public ConnectorFeatures(StreamInput in) throws IOException { private static final ParseField FILTERING_ADVANCED_CONFIG_ENABLED_FIELD = new ParseField("filtering_advanced_config"); private static final ParseField FILTERING_RULES_ENABLED_FIELD = new ParseField("filtering_rules"); private static final ParseField INCREMENTAL_SYNC_ENABLED_FIELD = new ParseField("incremental_sync"); + private static final ParseField NATIVE_CONNECTOR_API_KEYS_ENABLED_FIELD = new ParseField("native_connector_api_keys"); private static final ParseField SYNC_RULES_FIELD = new ParseField("sync_rules"); private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "connector_features", true, - args -> { - return new Builder().setDocumentLevelSecurityEnabled((FeatureEnabled) args[0]) - .setFilteringAdvancedConfig((Boolean) args[1]) - .setFilteringRules((Boolean) args[2]) - .setIncrementalSyncEnabled((FeatureEnabled) args[3]) - .setSyncRulesFeatures((SyncRulesFeatures) args[4]) - .build(); - } + args -> new Builder().setDocumentLevelSecurityEnabled((FeatureEnabled) args[0]) + .setFilteringAdvancedConfig((Boolean) args[1]) + .setFilteringRules((Boolean) args[2]) + .setIncrementalSyncEnabled((FeatureEnabled) args[3]) + .setNativeConnectorAPIKeysEnabled((FeatureEnabled) args[4]) + .setSyncRulesFeatures((SyncRulesFeatures) args[5]) + .build() ); static { @@ -98,6 +104,7 @@ public ConnectorFeatures(StreamInput in) throws IOException { PARSER.declareBoolean(optionalConstructorArg(), FILTERING_ADVANCED_CONFIG_ENABLED_FIELD); PARSER.declareBoolean(optionalConstructorArg(), FILTERING_RULES_ENABLED_FIELD); PARSER.declareObject(optionalConstructorArg(), (p, c) -> FeatureEnabled.fromXContent(p), INCREMENTAL_SYNC_ENABLED_FIELD); + PARSER.declareObject(optionalConstructorArg(), (p, c) -> FeatureEnabled.fromXContent(p), NATIVE_CONNECTOR_API_KEYS_ENABLED_FIELD); PARSER.declareObject(optionalConstructorArg(), (p, c) -> SyncRulesFeatures.fromXContent(p), SYNC_RULES_FIELD); } @@ -129,6 +136,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (incrementalSyncEnabled != null) { builder.field(INCREMENTAL_SYNC_ENABLED_FIELD.getPreferredName(), incrementalSyncEnabled); } + if (nativeConnectorAPIKeysEnabled != null) { + builder.field(NATIVE_CONNECTOR_API_KEYS_ENABLED_FIELD.getPreferredName(), nativeConnectorAPIKeysEnabled); + } if (syncRulesFeatures != null) { builder.field(SYNC_RULES_FIELD.getPreferredName(), syncRulesFeatures); } @@ -143,6 +153,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalBoolean(filteringAdvancedConfigEnabled); out.writeOptionalBoolean(filteringRulesEnabled); out.writeOptionalWriteable(incrementalSyncEnabled); + out.writeOptionalWriteable(nativeConnectorAPIKeysEnabled); out.writeOptionalWriteable(syncRulesFeatures); } @@ -155,6 +166,7 @@ public boolean equals(Object o) { && Objects.equals(filteringAdvancedConfigEnabled, features.filteringAdvancedConfigEnabled) && Objects.equals(filteringRulesEnabled, features.filteringRulesEnabled) && Objects.equals(incrementalSyncEnabled, features.incrementalSyncEnabled) + && Objects.equals(nativeConnectorAPIKeysEnabled, features.nativeConnectorAPIKeysEnabled) && Objects.equals(syncRulesFeatures, features.syncRulesFeatures); } @@ -165,6 +177,7 @@ public int hashCode() { filteringAdvancedConfigEnabled, filteringRulesEnabled, incrementalSyncEnabled, + nativeConnectorAPIKeysEnabled, syncRulesFeatures ); } @@ -175,6 +188,7 @@ public static class Builder { private Boolean filteringAdvancedConfig; private Boolean filteringRules; private FeatureEnabled incrementalSyncEnabled; + private FeatureEnabled nativeConnectorAPIKeysEnabled; private SyncRulesFeatures syncRulesFeatures; public Builder setDocumentLevelSecurityEnabled(FeatureEnabled documentLevelSecurityEnabled) { @@ -197,6 +211,11 @@ public Builder setIncrementalSyncEnabled(FeatureEnabled incrementalSyncEnabled) return this; } + public Builder setNativeConnectorAPIKeysEnabled(FeatureEnabled nativeConnectorAPIKeysEnabled) { + this.nativeConnectorAPIKeysEnabled = nativeConnectorAPIKeysEnabled; + return this; + } + public Builder setSyncRulesFeatures(SyncRulesFeatures syncRulesFeatures) { this.syncRulesFeatures = syncRulesFeatures; return this; @@ -208,6 +227,7 @@ public ConnectorFeatures build() { filteringAdvancedConfig, filteringRules, incrementalSyncEnabled, + nativeConnectorAPIKeysEnabled, syncRulesFeatures ); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIndexService.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIndexService.java index ea5ec92e18007..40a6eeaafd708 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIndexService.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIndexService.java @@ -418,7 +418,7 @@ public void updateConnectorConfiguration(UpdateConnectorConfigurationAction.Requ updateConfigurationScript, Map.of( Connector.CONFIGURATION_FIELD.getPreferredName(), - request.getConfiguration(), + request.getConfigurationAsMap(), Connector.STATUS_FIELD.getPreferredName(), ConnectorStatus.CONFIGURED.toString() ) diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorScheduling.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorScheduling.java index 233bea5d4a842..637957b8ce66e 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorScheduling.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorScheduling.java @@ -30,14 +30,15 @@ public class ConnectorScheduling implements Writeable, ToXContentObject { - private final ScheduleConfig accessControl; - private final ScheduleConfig full; - private final ScheduleConfig incremental; - + private static final String EVERYDAY_AT_MIDNIGHT = "0 0 0 * * ?"; private static final ParseField ACCESS_CONTROL_FIELD = new ParseField("access_control"); private static final ParseField FULL_FIELD = new ParseField("full"); private static final ParseField INCREMENTAL_FIELD = new ParseField("incremental"); + private final ScheduleConfig accessControl; + private final ScheduleConfig full; + private final ScheduleConfig incremental; + /** * @param accessControl connector access control sync schedule represented as {@link ScheduleConfig} * @param full connector full sync schedule represented as {@link ScheduleConfig} @@ -238,12 +239,19 @@ public ScheduleConfig build() { } } + /** + * Default scheduling is set to everyday at midnight (00:00:00). + * + * @return default scheduling for full, incremental and access control syncs. + */ public static ConnectorScheduling getDefaultConnectorScheduling() { return new ConnectorScheduling.Builder().setAccessControl( - new ConnectorScheduling.ScheduleConfig.Builder().setEnabled(false).setInterval(new Cron("0 0 0 * * ?")).build() + new ConnectorScheduling.ScheduleConfig.Builder().setEnabled(false).setInterval(new Cron(EVERYDAY_AT_MIDNIGHT)).build() ) - .setFull(new ConnectorScheduling.ScheduleConfig.Builder().setEnabled(false).setInterval(new Cron("0 0 0 * * ?")).build()) - .setIncremental(new ConnectorScheduling.ScheduleConfig.Builder().setEnabled(false).setInterval(new Cron("0 0 0 * * ?")).build()) + .setFull(new ConnectorScheduling.ScheduleConfig.Builder().setEnabled(false).setInterval(new Cron(EVERYDAY_AT_MIDNIGHT)).build()) + .setIncremental( + new ConnectorScheduling.ScheduleConfig.Builder().setEnabled(false).setInterval(new Cron(EVERYDAY_AT_MIDNIGHT)).build() + ) .build(); } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorStateMachine.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorStateMachine.java index 39a12ba334c30..f722955cc0f9e 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorStateMachine.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorStateMachine.java @@ -42,6 +42,13 @@ public static boolean isValidTransition(ConnectorStatus current, ConnectorStatus return validNextStates(current).contains(next); } + /** + * Throws {@link ConnectorInvalidStatusTransitionException} if a + * transition from one {@link ConnectorStatus} to another is invalid. + * + * @param current The current {@link ConnectorStatus} of the {@link Connector}. + * @param next The proposed next {@link ConnectorStatus} of the {@link Connector}. + */ public static void assertValidStateTransition(ConnectorStatus current, ConnectorStatus next) throws ConnectorInvalidStatusTransitionException { if (isValidTransition(current, next)) return; diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorConfigurationAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorConfigurationAction.java index 0421d710ccdfb..9069f832e1c44 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorConfigurationAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorConfigurationAction.java @@ -30,6 +30,7 @@ import java.util.HashMap; import java.util.Map; import java.util.Objects; +import java.util.stream.Collectors; import static org.elasticsearch.action.ValidateActions.addValidationError; import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; @@ -70,6 +71,10 @@ public Map getConfiguration() { return configuration; } + public Map> getConfigurationAsMap() { + return configuration.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().toMap())); + } + public Map getConfigurationValues() { return configurationValues; } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationDependency.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationDependency.java index 1efd3f47fdff0..46714eb5b34a0 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationDependency.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationDependency.java @@ -19,6 +19,8 @@ import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; +import java.util.HashMap; +import java.util.Map; import java.util.Objects; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; @@ -84,6 +86,13 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + public Map toMap() { + Map map = new HashMap<>(); + map.put(FIELD_FIELD.getPreferredName(), field); + map.put(VALUE_FIELD.getPreferredName(), value); + return map; + } + public static ConfigurationDependency fromXContent(XContentParser parser) throws IOException { return PARSER.parse(parser, null); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationSelectOption.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationSelectOption.java index ba281c69702e0..3c17f97ead51d 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationSelectOption.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationSelectOption.java @@ -17,6 +17,8 @@ import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; +import java.util.HashMap; +import java.util.Map; import java.util.Objects; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; @@ -60,6 +62,13 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + public Map toMap() { + Map map = new HashMap<>(); + map.put(LABEL_FIELD.getPreferredName(), label); + map.put(VALUE_FIELD.getPreferredName(), value); + return map; + } + public static ConfigurationSelectOption fromXContent(XContentParser parser) throws IOException { return PARSER.parse(parser, null); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationValidation.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationValidation.java index 8f05e67ecb14d..51e912650bc1d 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationValidation.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationValidation.java @@ -19,6 +19,8 @@ import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; +import java.util.HashMap; +import java.util.Map; import java.util.Objects; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; @@ -100,6 +102,13 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + public Map toMap() { + Map map = new HashMap<>(); + map.put(CONSTRAINT_FIELD.getPreferredName(), constraint); + map.put(TYPE_FIELD.getPreferredName(), type.toString()); + return map; + } + public static ConfigurationValidation fromXContent(XContentParser parser) throws IOException { return PARSER.parse(parser, null); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/secrets/ConnectorSecretsIndexService.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/secrets/ConnectorSecretsIndexService.java index cc25b8e5317d4..ba9a3e78281dd 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/secrets/ConnectorSecretsIndexService.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/secrets/ConnectorSecretsIndexService.java @@ -76,6 +76,12 @@ public static SystemIndexDescriptor getSystemIndexDescriptor() { .build(); } + /** + * Gets the secret from the underlying index with the specified id. + * + * @param id The id of the secret. + * @param listener The action listener to invoke on response/failure. + */ public void getSecret(String id, ActionListener listener) { clientWithOrigin.prepareGet(CONNECTOR_SECRETS_INDEX_NAME, id).execute(listener.delegateFailureAndWrap((delegate, getResponse) -> { if (getResponse.isSourceEmpty()) { @@ -86,6 +92,12 @@ public void getSecret(String id, ActionListener list })); } + /** + * Creates a secret in the underlying index with an auto-generated doc ID. + * + * @param request Request for creating the secret. + * @param listener The action listener to invoke on response/failure. + */ public void createSecret(PostConnectorSecretRequest request, ActionListener listener) { try { clientWithOrigin.prepareIndex(CONNECTOR_SECRETS_INDEX_NAME) @@ -100,6 +112,12 @@ public void createSecret(PostConnectorSecretRequest request, ActionListener listener) { String connectorSecretId = request.id(); @@ -119,6 +137,12 @@ public void createSecretWithDocId(PutConnectorSecretRequest request, ActionListe } } + /** + * Deletes the secret in the underlying index with the specified doc ID. + * + * @param id The id of the secret to delete. + * @param listener The action listener to invoke on response/failure. + */ public void deleteSecret(String id, ActionListener listener) { try { clientWithOrigin.prepareDelete(CONNECTOR_SECRETS_INDEX_NAME, id) diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/secrets/action/PostConnectorSecretRequest.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/secrets/action/PostConnectorSecretRequest.java index 2e565dece7eca..90672f7ca7120 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/secrets/action/PostConnectorSecretRequest.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/secrets/action/PostConnectorSecretRequest.java @@ -21,15 +21,15 @@ import java.io.IOException; import java.util.Objects; +import static org.elasticsearch.action.ValidateActions.addValidationError; + public class PostConnectorSecretRequest extends ActionRequest { - public static final ParseField VALUE_FIELD = new ParseField("value"); + private static final ParseField VALUE_FIELD = new ParseField("value"); public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "post_secret_request", - args -> { - return new PostConnectorSecretRequest((String) args[0]); - } + args -> new PostConnectorSecretRequest((String) args[0]) ); static { @@ -75,13 +75,13 @@ public void writeTo(StreamOutput out) throws IOException { @Override public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (Strings.isNullOrEmpty(this.value)) { - ActionRequestValidationException exception = new ActionRequestValidationException(); - exception.addValidationError("value is missing"); - return exception; + validationException = addValidationError("[value] of the connector secret cannot be [null] or [\"\"]", validationException); } - return null; + return validationException; } @Override diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexService.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexService.java index b1c08d8b7fbb1..d1d345840874f 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexService.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexService.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.application.connector.syncjob; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; @@ -24,7 +25,6 @@ import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.internal.Client; -import org.elasticsearch.common.UUIDs; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.engine.DocumentMissingException; import org.elasticsearch.index.query.BoolQueryBuilder; @@ -32,6 +32,7 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.index.query.TermsQueryBuilder; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.sort.SortOrder; @@ -220,45 +221,79 @@ public void getConnectorSyncJob(String connectorSyncJobId, ActionListener listener) { - Instant cancellationRequestedAt = Instant.now(); + try { + getConnectorSyncJob(connectorSyncJobId, listener.delegateFailure((getSyncJobListener, syncJobSearchResult) -> { + Map syncJobFieldsToUpdate; + Instant now = Instant.now(); - final UpdateRequest updateRequest = new UpdateRequest(CONNECTOR_SYNC_JOB_INDEX_NAME, connectorSyncJobId).setRefreshPolicy( - WriteRequest.RefreshPolicy.IMMEDIATE - ) - .doc( - Map.of( - ConnectorSyncJob.STATUS_FIELD.getPreferredName(), - ConnectorSyncStatus.CANCELING, - ConnectorSyncJob.CANCELATION_REQUESTED_AT_FIELD.getPreferredName(), - cancellationRequestedAt - ) - ); + ConnectorSyncStatus prevStatus = getConnectorSyncJobStatusFromSearchResult(syncJobSearchResult); - try { - client.update( - updateRequest, - new DelegatingIndexNotFoundOrDocumentMissingActionListener<>(connectorSyncJobId, listener, (l, updateResponse) -> { - if (updateResponse.getResult() == DocWriteResponse.Result.NOT_FOUND) { - l.onFailure(new ResourceNotFoundException(connectorSyncJobId)); - return; + try { + if (ConnectorSyncStatus.PENDING.equals(prevStatus) || ConnectorSyncStatus.SUSPENDED.equals(prevStatus)) { + // A pending or suspended non-running sync job is set to `canceled` directly + // without a transition to the in-between `canceling` status + ConnectorSyncStatus nextStatus = ConnectorSyncStatus.CANCELED; + ConnectorSyncJobStateMachine.assertValidStateTransition(prevStatus, nextStatus); + + syncJobFieldsToUpdate = Map.of( + ConnectorSyncJob.STATUS_FIELD.getPreferredName(), + nextStatus, + ConnectorSyncJob.CANCELATION_REQUESTED_AT_FIELD.getPreferredName(), + now, + ConnectorSyncJob.CANCELED_AT_FIELD.getPreferredName(), + now, + ConnectorSyncJob.COMPLETED_AT_FIELD.getPreferredName(), + now + ); + } else { + ConnectorSyncStatus nextStatus = ConnectorSyncStatus.CANCELING; + ConnectorSyncJobStateMachine.assertValidStateTransition(prevStatus, nextStatus); + + syncJobFieldsToUpdate = Map.of( + ConnectorSyncJob.STATUS_FIELD.getPreferredName(), + nextStatus, + ConnectorSyncJob.CANCELATION_REQUESTED_AT_FIELD.getPreferredName(), + now + ); } - l.onResponse(updateResponse); - }) - ); + } catch (ConnectorSyncJobInvalidStatusTransitionException e) { + getSyncJobListener.onFailure(new ElasticsearchStatusException(e.getMessage(), RestStatus.BAD_REQUEST, e)); + return; + } + + final UpdateRequest updateRequest = new UpdateRequest(CONNECTOR_SYNC_JOB_INDEX_NAME, connectorSyncJobId).setRefreshPolicy( + WriteRequest.RefreshPolicy.IMMEDIATE + ).doc(syncJobFieldsToUpdate); + + client.update( + updateRequest, + new DelegatingIndexNotFoundOrDocumentMissingActionListener<>( + connectorSyncJobId, + listener, + (indexNotFoundListener, updateResponse) -> { + if (updateResponse.getResult() == DocWriteResponse.Result.NOT_FOUND) { + indexNotFoundListener.onFailure(new ResourceNotFoundException(connectorSyncJobId)); + return; + } + indexNotFoundListener.onResponse(updateResponse); + } + ) + ); + })); } catch (Exception e) { listener.onFailure(e); } } /** - * List the {@link ConnectorSyncJob} in ascending order of their 'created_at'. + * List the {@link ConnectorSyncJob} in descending order of their 'created_at'. * * @param from From index to start the search from. * @param size The maximum number of {@link Connector}s to return. @@ -282,7 +317,7 @@ public void listConnectorSyncJobs( .size(size) .query(query) .fetchSource(true) - .sort(ConnectorSyncJob.CREATED_AT_FIELD.getPreferredName(), SortOrder.ASC); + .sort(ConnectorSyncJob.CREATED_AT_FIELD.getPreferredName(), SortOrder.DESC); final SearchRequest searchRequest = new SearchRequest(CONNECTOR_SYNC_JOB_INDEX_NAME).source(searchSource); @@ -416,12 +451,10 @@ public void updateConnectorSyncJobIngestionStats( } - private String generateId() { - /* Workaround: only needed for generating an id upfront, autoGenerateId() has a side effect generating a timestamp, - * which would raise an error on the response layer later ("autoGeneratedTimestamp should not be set externally"). - * TODO: do we even need to copy the "_id" and set it as "id"? - */ - return UUIDs.base64UUID(); + private ConnectorSyncStatus getConnectorSyncJobStatusFromSearchResult(ConnectorSyncJobSearchResult searchResult) { + return ConnectorSyncStatus.connectorSyncStatus( + (String) searchResult.getResultMap().get(ConnectorSyncJob.STATUS_FIELD.getPreferredName()) + ); } private void getSyncJobConnectorInfo(String connectorId, ActionListener listener) { @@ -494,29 +527,45 @@ FilteringRules transformConnectorFilteringToSyncJobRepresentation(List listener) { - final UpdateRequest updateRequest = new UpdateRequest(CONNECTOR_SYNC_JOB_INDEX_NAME, connectorSyncJobId).setRefreshPolicy( - WriteRequest.RefreshPolicy.IMMEDIATE - ) - .doc( - Map.of( - ConnectorSyncJob.ERROR_FIELD.getPreferredName(), - error, - ConnectorSyncJob.STATUS_FIELD.getPreferredName(), - ConnectorSyncStatus.ERROR + try { + getConnectorSyncJob(connectorSyncJobId, listener.delegateFailure((getSyncJobListener, syncJobSearchResult) -> { + ConnectorSyncStatus prevStatus = getConnectorSyncJobStatusFromSearchResult(syncJobSearchResult); + ConnectorSyncStatus nextStatus = ConnectorSyncStatus.ERROR; + + try { + ConnectorSyncJobStateMachine.assertValidStateTransition(prevStatus, nextStatus); + } catch (ConnectorSyncJobInvalidStatusTransitionException e) { + getSyncJobListener.onFailure(new ElasticsearchStatusException(e.getMessage(), RestStatus.BAD_REQUEST, e)); + return; + } + + final UpdateRequest updateRequest = new UpdateRequest(CONNECTOR_SYNC_JOB_INDEX_NAME, connectorSyncJobId).setRefreshPolicy( + WriteRequest.RefreshPolicy.IMMEDIATE ) - ); + .doc( + Map.of( + ConnectorSyncJob.ERROR_FIELD.getPreferredName(), + error, + ConnectorSyncJob.STATUS_FIELD.getPreferredName(), + nextStatus + ) + ); - try { - client.update( - updateRequest, - new DelegatingIndexNotFoundOrDocumentMissingActionListener<>(connectorSyncJobId, listener, (l, updateResponse) -> { - if (updateResponse.getResult() == DocWriteResponse.Result.NOT_FOUND) { - l.onFailure(new ResourceNotFoundException(connectorSyncJobId)); - return; - } - l.onResponse(updateResponse); - }) - ); + client.update( + updateRequest, + new DelegatingIndexNotFoundOrDocumentMissingActionListener<>( + connectorSyncJobId, + listener, + (indexNotFoundListener, updateResponse) -> { + if (updateResponse.getResult() == DocWriteResponse.Result.NOT_FOUND) { + indexNotFoundListener.onFailure(new ResourceNotFoundException(connectorSyncJobId)); + return; + } + indexNotFoundListener.onResponse(updateResponse); + } + ) + ); + })); } catch (Exception e) { listener.onFailure(e); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobInvalidStatusTransitionException.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobInvalidStatusTransitionException.java new file mode 100644 index 0000000000000..3ded62afa5d14 --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobInvalidStatusTransitionException.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.syncjob; + +import org.elasticsearch.xpack.application.connector.ConnectorSyncStatus; + +public class ConnectorSyncJobInvalidStatusTransitionException extends Exception { + + /** + * Constructs a {@link ConnectorSyncJobInvalidStatusTransitionException} exception with a detailed message. + * + * @param current The current {@link ConnectorSyncStatus} of the {@link ConnectorSyncJob}. + * @param next The attempted next {@link ConnectorSyncStatus} of the {@link ConnectorSyncJob}. + */ + public ConnectorSyncJobInvalidStatusTransitionException(ConnectorSyncStatus current, ConnectorSyncStatus next) { + super( + "Invalid transition attempt from [" + + current + + "] to [" + + next + + "]. Such a " + + ConnectorSyncStatus.class.getSimpleName() + + " transition is not supported by the Connector Protocol." + ); + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobStateMachine.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobStateMachine.java index 7a7a05bd5e455..dc624b5bf8ba1 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobStateMachine.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobStateMachine.java @@ -47,4 +47,17 @@ public class ConnectorSyncJobStateMachine { public static boolean isValidTransition(ConnectorSyncStatus current, ConnectorSyncStatus next) { return VALID_TRANSITIONS.getOrDefault(current, Collections.emptySet()).contains(next); } + + /** + * Throws {@link ConnectorSyncJobInvalidStatusTransitionException} if a + * transition from one {@link ConnectorSyncStatus} to another is invalid. + * + * @param current The current {@link ConnectorSyncStatus} of the {@link ConnectorSyncJob}. + * @param next The proposed next {@link ConnectorSyncStatus} of the {@link ConnectorSyncJob}. + */ + public static void assertValidStateTransition(ConnectorSyncStatus current, ConnectorSyncStatus next) + throws ConnectorSyncJobInvalidStatusTransitionException { + if (isValidTransition(current, next)) return; + throw new ConnectorSyncJobInvalidStatusTransitionException(current, next); + } } diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorConfigurationTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorConfigurationTests.java index 35b21ce676a57..3a7ff819ecbf5 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorConfigurationTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorConfigurationTests.java @@ -17,10 +17,14 @@ import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.application.connector.configuration.ConfigurationDependency; +import org.elasticsearch.xpack.application.connector.configuration.ConfigurationSelectOption; +import org.elasticsearch.xpack.application.connector.configuration.ConfigurationValidation; import org.junit.Before; import java.io.IOException; import java.util.List; +import java.util.Map; import static java.util.Collections.emptyList; import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; @@ -188,6 +192,87 @@ public void testToXContentWithMultipleConstraintTypes() throws IOException { assertToXContentEquivalent(originalBytes, toXContent(parsed, XContentType.JSON, humanReadable), XContentType.JSON); } + public void testToMap() { + ConnectorConfiguration configField = ConnectorTestUtils.getRandomConnectorConfigurationField(); + Map configFieldAsMap = configField.toMap(); + + if (configField.getCategory() != null) { + assertThat(configFieldAsMap.get("category"), equalTo(configField.getCategory())); + } else { + assertFalse(configFieldAsMap.containsKey("category")); + } + + assertThat(configFieldAsMap.get("default_value"), equalTo(configField.getDefaultValue())); + + if (configField.getDependsOn() != null) { + List> dependsOnAsList = configField.getDependsOn().stream().map(ConfigurationDependency::toMap).toList(); + assertThat(configFieldAsMap.get("depends_on"), equalTo(dependsOnAsList)); + } else { + assertFalse(configFieldAsMap.containsKey("depends_on")); + } + + if (configField.getDisplay() != null) { + assertThat(configFieldAsMap.get("display"), equalTo(configField.getDisplay().toString())); + } else { + assertFalse(configFieldAsMap.containsKey("display")); + } + + assertThat(configFieldAsMap.get("label"), equalTo(configField.getLabel())); + + if (configField.getOptions() != null) { + List> optionsAsList = configField.getOptions().stream().map(ConfigurationSelectOption::toMap).toList(); + assertThat(configFieldAsMap.get("options"), equalTo(optionsAsList)); + } else { + assertFalse(configFieldAsMap.containsKey("options")); + } + + if (configField.getOrder() != null) { + assertThat(configFieldAsMap.get("order"), equalTo(configField.getOrder())); + } else { + assertFalse(configFieldAsMap.containsKey("order")); + } + + if (configField.getPlaceholder() != null) { + assertThat(configFieldAsMap.get("placeholder"), equalTo(configField.getPlaceholder())); + } else { + assertFalse(configFieldAsMap.containsKey("placeholder")); + } + + assertThat(configFieldAsMap.get("required"), equalTo(configField.isRequired())); + assertThat(configFieldAsMap.get("sensitive"), equalTo(configField.isSensitive())); + + if (configField.getTooltip() != null) { + assertThat(configFieldAsMap.get("tooltip"), equalTo(configField.getTooltip())); + } else { + assertFalse(configFieldAsMap.containsKey("tooltip")); + } + + if (configField.getType() != null) { + assertThat(configFieldAsMap.get("type"), equalTo(configField.getType().toString())); + } else { + assertFalse(configFieldAsMap.containsKey("type")); + } + + if (configField.getUiRestrictions() != null) { + assertThat(configFieldAsMap.get("ui_restrictions"), equalTo(configField.getUiRestrictions())); + } else { + assertFalse(configFieldAsMap.containsKey("ui_restrictions")); + } + + if (configField.getValidations() != null) { + List> validationsAsList = configField.getValidations() + .stream() + .map(ConfigurationValidation::toMap) + .toList(); + assertThat(configFieldAsMap.get("validations"), equalTo(validationsAsList)); + } else { + assertFalse(configFieldAsMap.containsKey("validations")); + } + + assertThat(configFieldAsMap.get("value"), equalTo(configField.getValue())); + + } + private void assertTransportSerialization(ConnectorConfiguration testInstance) throws IOException { ConnectorConfiguration deserializedInstance = copyInstance(testInstance); assertNotSame(testInstance, deserializedInstance); diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorFeaturesTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorFeaturesTests.java index 1563ff5fcf82c..941d0a9ed4594 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorFeaturesTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorFeaturesTests.java @@ -110,6 +110,30 @@ public void testToXContentMissingSyncRulesAdvanced() throws IOException { testToXContentChecker(content); } + public void testToXContent_NativeConnectorAPIKeysEnabled() throws IOException { + String content = XContentHelper.stripWhitespace(""" + { + "document_level_security": { + "enabled": true + }, + "filtering_advanced_config": true, + "sync_rules": { + "advanced": { + "enabled": false + }, + "basic": { + "enabled": true + } + }, + "native_connector_api_keys": { + "enabled": true + } + } + """); + + testToXContentChecker(content); + } + private void testToXContentChecker(String content) throws IOException { ConnectorFeatures features = ConnectorFeatures.fromXContentBytes(new BytesArray(content), XContentType.JSON); boolean humanReadable = true; diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorFilteringTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorFilteringTests.java index 20c2200b26f2b..8c1cdcb418142 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorFilteringTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorFilteringTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentParseException; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import org.junit.Before; @@ -110,7 +111,7 @@ public void testToXContent() throws IOException { } - public void testToXContent_WithAdvancedSnippetPopulated() throws IOException { + public void testToXContent_WithAdvancedSnippetPopulatedWithAValueArray() throws IOException { String content = XContentHelper.stripWhitespace(""" { "active": { @@ -177,6 +178,129 @@ public void testToXContent_WithAdvancedSnippetPopulated() throws IOException { } + public void testToXContent_WithAdvancedSnippetPopulatedWithAValueObject() throws IOException { + String content = XContentHelper.stripWhitespace(""" + { + "active": { + "advanced_snippet": { + "created_at": "2023-11-09T15:13:08.231Z", + "updated_at": "2023-11-09T15:13:08.231Z", + "value": { + "service": "Incident", + "query": "user_nameSTARTSWITHa" + } + }, + "rules": [ + { + "created_at": "2023-11-09T15:13:08.231Z", + "field": "_", + "id": "DEFAULT", + "order": 0, + "policy": "include", + "rule": "regex", + "updated_at": "2023-11-09T15:13:08.231Z", + "value": ".*" + } + ], + "validation": { + "errors": [], + "state": "valid" + } + }, + "domain": "DEFAULT", + "draft": { + "advanced_snippet": { + "created_at": "2023-11-09T15:13:08.231Z", + "updated_at": "2023-11-09T15:13:08.231Z", + "value": {} + }, + "rules": [ + { + "created_at": "2023-11-09T15:13:08.231Z", + "field": "_", + "id": "DEFAULT", + "order": 0, + "policy": "include", + "rule": "regex", + "updated_at": "2023-11-09T15:13:08.231Z", + "value": ".*" + } + ], + "validation": { + "errors": [], + "state": "valid" + } + } + } + """); + + ConnectorFiltering filtering = ConnectorFiltering.fromXContentBytes(new BytesArray(content), XContentType.JSON); + boolean humanReadable = true; + BytesReference originalBytes = toShuffledXContent(filtering, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable); + ConnectorFiltering parsed; + try (XContentParser parser = createParser(XContentType.JSON.xContent(), originalBytes)) { + parsed = ConnectorFiltering.fromXContent(parser); + } + assertToXContentEquivalent(originalBytes, toXContent(parsed, XContentType.JSON, humanReadable), XContentType.JSON); + + } + + public void testToXContent_WithAdvancedSnippetPopulatedWithAValueLiteral_ExpectParseException() throws IOException { + String content = XContentHelper.stripWhitespace(""" + { + "active": { + "advanced_snippet": { + "created_at": "2023-11-09T15:13:08.231Z", + "updated_at": "2023-11-09T15:13:08.231Z", + "value": "string literal" + }, + "rules": [ + { + "created_at": "2023-11-09T15:13:08.231Z", + "field": "_", + "id": "DEFAULT", + "order": 0, + "policy": "include", + "rule": "regex", + "updated_at": "2023-11-09T15:13:08.231Z", + "value": ".*" + } + ], + "validation": { + "errors": [], + "state": "valid" + } + }, + "domain": "DEFAULT", + "draft": { + "advanced_snippet": { + "created_at": "2023-11-09T15:13:08.231Z", + "updated_at": "2023-11-09T15:13:08.231Z", + "value": {} + }, + "rules": [ + { + "created_at": "2023-11-09T15:13:08.231Z", + "field": "_", + "id": "DEFAULT", + "order": 0, + "policy": "include", + "rule": "regex", + "updated_at": "2023-11-09T15:13:08.231Z", + "value": ".*" + } + ], + "validation": { + "errors": [], + "state": "valid" + } + } + } + """); + + assertThrows(XContentParseException.class, () -> ConnectorFiltering.fromXContentBytes(new BytesArray(content), XContentType.JSON)); + } + private void assertTransportSerialization(ConnectorFiltering testInstance) throws IOException { ConnectorFiltering deserializedInstance = copyInstance(testInstance); assertNotSame(testInstance, deserializedInstance); diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorStateMachineTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorStateMachineTests.java index 372c874310162..d1f08f80d02f2 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorStateMachineTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorStateMachineTests.java @@ -65,4 +65,31 @@ public void testTransitionToSameState() { assertFalse("Transition from " + state + " to itself should be invalid", ConnectorStateMachine.isValidTransition(state, state)); } } + + public void testAssertValidStateTransition_ExpectExceptionOnInvalidTransition() { + assertThrows( + ConnectorInvalidStatusTransitionException.class, + () -> ConnectorStateMachine.assertValidStateTransition(ConnectorStatus.CREATED, ConnectorStatus.CONFIGURED) + ); + } + + public void testAssertValidStateTransition_ExpectNoExceptionOnValidTransition() { + ConnectorStatus prevStatus = ConnectorStatus.CREATED; + ConnectorStatus nextStatus = ConnectorStatus.ERROR; + + try { + ConnectorStateMachine.assertValidStateTransition(prevStatus, nextStatus); + } catch (ConnectorInvalidStatusTransitionException e) { + fail( + "Did not expect " + + ConnectorInvalidStatusTransitionException.class.getSimpleName() + + " to be thrown for valid state transition [" + + prevStatus + + "] -> " + + "[" + + nextStatus + + "]." + ); + } + } } diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTestUtils.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTestUtils.java index 6d94cdc3ebe35..e9053a0a64507 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTestUtils.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTestUtils.java @@ -114,6 +114,7 @@ public static ConnectorFeatures getRandomConnectorFeatures() { .setFilteringRules(randomFrom(new Boolean[] { null, randomBoolean() })) .setFilteringAdvancedConfig(randomFrom(new Boolean[] { null, randomBoolean() })) .setIncrementalSyncEnabled(randomBoolean() ? randomConnectorFeatureEnabled() : null) + .setNativeConnectorAPIKeysEnabled(randomBoolean() ? randomConnectorFeatureEnabled() : null) .setSyncRulesFeatures(randomBoolean() ? randomSyncRulesFeatures() : null) .build(); } diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTests.java index 5525b4694ef04..8ed18fc303498 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTests.java @@ -139,6 +139,9 @@ public void testToXContent() throws IOException { "basic":{ "enabled":true } + }, + "native_connector_api_keys": { + "enabled": true } }, "filtering":[ diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/secrets/action/PostConnectorSecretActionTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/secrets/action/PostConnectorSecretActionTests.java index f1e1a670b2748..a11de91de739a 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/secrets/action/PostConnectorSecretActionTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/secrets/action/PostConnectorSecretActionTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.application.connector.secrets.ConnectorSecretsTestUtils; +import static org.elasticsearch.xpack.application.connector.ConnectorTestUtils.NULL_STRING; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -24,11 +25,19 @@ public void testValidate_WhenConnectorSecretIdIsPresent_ExpectNoValidationError( assertThat(exception, nullValue()); } - public void testValidate_WhenConnectorSecretIdIsEmpty_ExpectValidationError() { + public void testValidate_WhenConnectorSecretIdIsNull_ExpectValidationError() { + PostConnectorSecretRequest requestWithNullValue = new PostConnectorSecretRequest(NULL_STRING); + ActionRequestValidationException exception = requestWithNullValue.validate(); + + assertThat(exception, notNullValue()); + assertThat(exception.getMessage(), containsString("[value] of the connector secret cannot be [null] or [\"\"]")); + } + + public void testValidate_WhenConnectorSecretIdIsBlank_ExpectValidationError() { PostConnectorSecretRequest requestWithMissingValue = new PostConnectorSecretRequest(""); ActionRequestValidationException exception = requestWithMissingValue.validate(); assertThat(exception, notNullValue()); - assertThat(exception.getMessage(), containsString("value is missing")); + assertThat(exception.getMessage(), containsString("[value] of the connector secret cannot be [null] or [\"\"]")); } } diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/secrets/action/PutConnectorSecretActionTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/secrets/action/PutConnectorSecretActionTests.java index b7c7453611bdf..7940017318336 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/secrets/action/PutConnectorSecretActionTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/secrets/action/PutConnectorSecretActionTests.java @@ -25,11 +25,34 @@ public void testValidate_WhenConnectorSecretIdIsPresent_ExpectNoValidationError( } public void testValidate_WhenConnectorSecretIdIsEmpty_ExpectValidationError() { - PutConnectorSecretRequest requestWithMissingValue = new PutConnectorSecretRequest("", ""); - ActionRequestValidationException exception = requestWithMissingValue.validate(); + PutConnectorSecretRequest requestWithEmptyId = new PutConnectorSecretRequest("", randomAlphaOfLength(10)); + ActionRequestValidationException exception = requestWithEmptyId.validate(); assertThat(exception, notNullValue()); assertThat(exception.getMessage(), containsString("[id] cannot be [null] or [\"\"]")); + } + + public void testValidate_WhenConnectorSecretIdIsNull_ExpectValidationError() { + PutConnectorSecretRequest requestWithNullId = new PutConnectorSecretRequest(null, randomAlphaOfLength(10)); + ActionRequestValidationException exception = requestWithNullId.validate(); + + assertThat(exception, notNullValue()); + assertThat(exception.getMessage(), containsString("[id] cannot be [null] or [\"\"]")); + } + + public void testValidate_WhenConnectorSecretValueIsEmpty_ExpectValidationError() { + PutConnectorSecretRequest requestWithEmptyValue = new PutConnectorSecretRequest(randomAlphaOfLength(10), ""); + ActionRequestValidationException exception = requestWithEmptyValue.validate(); + + assertThat(exception, notNullValue()); + assertThat(exception.getMessage(), containsString("[value] cannot be [null] or [\"\"]")); + } + + public void testValidate_WhenConnectorSecretValueIsNull_ExpectValidationError() { + PutConnectorSecretRequest requestWithEmptyValue = new PutConnectorSecretRequest(randomAlphaOfLength(10), null); + ActionRequestValidationException exception = requestWithEmptyValue.validate(); + + assertThat(exception, notNullValue()); assertThat(exception.getMessage(), containsString("[value] cannot be [null] or [\"\"]")); } } diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexServiceTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexServiceTests.java index 4a7a3e76ecf42..1e991569a9ffd 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexServiceTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexServiceTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.application.connector.syncjob; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionListener; @@ -17,6 +18,7 @@ import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -80,7 +82,6 @@ public void setup() throws Exception { } private String createConnector() throws IOException, InterruptedException, ExecutionException, TimeoutException { - Connector connector = ConnectorTestUtils.getRandomConnector(); final IndexRequest indexRequest = new IndexRequest(ConnectorIndexService.CONNECTOR_INDEX_NAME).opType(DocWriteRequest.OpType.INDEX) @@ -229,7 +230,8 @@ public void testCheckInConnectorSyncJob_WithMissingSyncJobId_ExpectException() { expectThrows(ResourceNotFoundException.class, () -> awaitCheckInConnectorSyncJob(NON_EXISTING_SYNC_JOB_ID)); } - public void testCancelConnectorSyncJob() throws Exception { + public void testCancelConnectorSyncJob_WithStatusInProgress_ExpectNextStatusCanceling() throws Exception { + // Create connector sync job PostConnectorSyncJobAction.Request syncJobRequest = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest( connectorOneId ); @@ -247,6 +249,10 @@ public void testCancelConnectorSyncJob() throws Exception { assertThat(cancellationRequestedAtBeforeUpdate, nullValue()); assertThat(syncStatusBeforeUpdate, not(equalTo(ConnectorSyncStatus.CANCELING))); + // Set sync job status to `in_progress` + updateConnectorSyncJobStatusWithoutStateMachineGuard(syncJobId, ConnectorSyncStatus.IN_PROGRESS); + + // Cancel sync job UpdateResponse updateResponse = awaitCancelConnectorSyncJob(syncJobId); Map syncJobSourceAfterUpdate = getConnectorSyncJobSourceById(syncJobId); @@ -263,6 +269,103 @@ public void testCancelConnectorSyncJob() throws Exception { assertFieldsExceptSyncStatusAndCancellationRequestedAtDidNotUpdate(syncJobSourceBeforeUpdate, syncJobSourceAfterUpdate); } + public void testCancelConnectorSyncJob_WithPendingState_ExpectNextStatusCanceled() throws Exception { + // Create pending sync job + PostConnectorSyncJobAction.Request syncJobRequest = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest( + connectorOneId + ); + PostConnectorSyncJobAction.Response response = awaitPutConnectorSyncJob(syncJobRequest); + String syncJobId = response.getId(); + Map syncJobSourceBeforeUpdate = getConnectorSyncJobSourceById(syncJobId); + ConnectorSyncStatus syncStatusBeforeUpdate = ConnectorSyncStatus.fromString( + (String) syncJobSourceBeforeUpdate.get(ConnectorSyncJob.STATUS_FIELD.getPreferredName()) + ); + Object canceledAtBeforeUpdate = syncJobSourceBeforeUpdate.get(ConnectorSyncJob.CANCELED_AT_FIELD.getPreferredName()); + + assertThat(syncJobId, notNullValue()); + assertThat(canceledAtBeforeUpdate, nullValue()); + assertThat(syncStatusBeforeUpdate, not(equalTo(ConnectorSyncStatus.CANCELED))); + + // Cancel sync job + UpdateResponse updateResponse = awaitCancelConnectorSyncJob(syncJobId); + + Map syncJobSourceAfterUpdate = getConnectorSyncJobSourceById(syncJobId); + ConnectorSyncStatus syncStatusAfterUpdate = ConnectorSyncStatus.fromString( + (String) syncJobSourceAfterUpdate.get(ConnectorSyncJob.STATUS_FIELD.getPreferredName()) + ); + Instant canceledAtAfterUpdate = Instant.parse( + (String) syncJobSourceAfterUpdate.get(ConnectorSyncJob.CANCELED_AT_FIELD.getPreferredName()) + ); + + assertThat(updateResponse.status(), equalTo(RestStatus.OK)); + assertThat(canceledAtAfterUpdate, notNullValue()); + assertThat(syncStatusAfterUpdate, equalTo(ConnectorSyncStatus.CANCELED)); + assertFieldsExceptSyncStatusAndCanceledAndCompletedTimestampsDidNotUpdate(syncJobSourceBeforeUpdate, syncJobSourceAfterUpdate); + } + + public void testCancelConnectorSyncJob_WithSuspendedState_ExpectNextStatusCanceled() throws Exception { + // Create pending sync job + PostConnectorSyncJobAction.Request syncJobRequest = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest( + connectorOneId + ); + PostConnectorSyncJobAction.Response response = awaitPutConnectorSyncJob(syncJobRequest); + String syncJobId = response.getId(); + Map syncJobSourceBeforeUpdate = getConnectorSyncJobSourceById(syncJobId); + ConnectorSyncStatus syncStatusBeforeUpdate = ConnectorSyncStatus.fromString( + (String) syncJobSourceBeforeUpdate.get(ConnectorSyncJob.STATUS_FIELD.getPreferredName()) + ); + Object canceledAtBeforeUpdate = syncJobSourceBeforeUpdate.get(ConnectorSyncJob.CANCELED_AT_FIELD.getPreferredName()); + + assertThat(syncJobId, notNullValue()); + assertThat(canceledAtBeforeUpdate, nullValue()); + assertThat(syncStatusBeforeUpdate, not(equalTo(ConnectorSyncStatus.CANCELED))); + + // Set sync job to suspended + updateConnectorSyncJobStatusWithoutStateMachineGuard(syncJobId, ConnectorSyncStatus.SUSPENDED); + + // Cancel sync job + UpdateResponse updateResponse = awaitCancelConnectorSyncJob(syncJobId); + + Map syncJobSourceAfterUpdate = getConnectorSyncJobSourceById(syncJobId); + ConnectorSyncStatus syncStatusAfterUpdate = ConnectorSyncStatus.fromString( + (String) syncJobSourceAfterUpdate.get(ConnectorSyncJob.STATUS_FIELD.getPreferredName()) + ); + Instant canceledAtAfterUpdate = Instant.parse( + (String) syncJobSourceAfterUpdate.get(ConnectorSyncJob.CANCELED_AT_FIELD.getPreferredName()) + ); + + assertThat(updateResponse.status(), equalTo(RestStatus.OK)); + assertThat(canceledAtAfterUpdate, notNullValue()); + assertThat(syncStatusAfterUpdate, equalTo(ConnectorSyncStatus.CANCELED)); + assertFieldsExceptSyncStatusAndCanceledAndCompletedTimestampsDidNotUpdate(syncJobSourceBeforeUpdate, syncJobSourceAfterUpdate); + } + + public void testCancelConnectorSyncJob_WithCompletedState_ExpectStatusException() throws Exception { + // Create sync job + PostConnectorSyncJobAction.Request syncJobRequest = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest( + connectorOneId + ); + PostConnectorSyncJobAction.Response response = awaitPutConnectorSyncJob(syncJobRequest); + String syncJobId = response.getId(); + Map syncJobSourceBeforeUpdate = getConnectorSyncJobSourceById(syncJobId); + ConnectorSyncStatus syncStatusBeforeUpdate = ConnectorSyncStatus.fromString( + (String) syncJobSourceBeforeUpdate.get(ConnectorSyncJob.STATUS_FIELD.getPreferredName()) + ); + Object cancellationRequestedAtBeforeUpdate = syncJobSourceBeforeUpdate.get( + ConnectorSyncJob.CANCELATION_REQUESTED_AT_FIELD.getPreferredName() + ); + + assertThat(syncJobId, notNullValue()); + assertThat(cancellationRequestedAtBeforeUpdate, nullValue()); + assertThat(syncStatusBeforeUpdate, not(equalTo(ConnectorSyncStatus.CANCELING))); + + // Set sync job status to `completed` + updateConnectorSyncJobStatusWithoutStateMachineGuard(syncJobId, ConnectorSyncStatus.COMPLETED); + + // Cancel sync job + assertThrows(ElasticsearchStatusException.class, () -> awaitCancelConnectorSyncJob(syncJobId)); + } + public void testCancelConnectorSyncJob_WithMissingSyncJobId_ExpectException() { expectThrows(ResourceNotFoundException.class, () -> awaitCancelConnectorSyncJob(NON_EXISTING_SYNC_JOB_ID)); } @@ -319,20 +422,21 @@ public void testListConnectorSyncJobs() throws Exception { assertThat(lastSyncJobs.connectorSyncJobs().size(), equalTo(1)); assertThat(lastSyncJobs.totalResults(), equalTo(5L)); - assertThat(firstSyncJob, equalTo(syncJobs.get(0))); - assertThat(secondSyncJob, equalTo(syncJobs.get(1))); + // Sync jobs are returned in most-recently created order + assertThat(firstSyncJob, equalTo(syncJobs.get(4))); + assertThat(secondSyncJob, equalTo(syncJobs.get(3))); assertThat(thirdSyncJob, equalTo(syncJobs.get(2))); - assertThat(fourthSyncJob, equalTo(syncJobs.get(3))); - assertThat(fifthSyncJob, equalTo(syncJobs.get(4))); - - // assert ordering: ascending order by creation date - assertTrue(fifthSyncJob.getCreatedAt().isAfter(fourthSyncJob.getCreatedAt())); - assertTrue(fourthSyncJob.getCreatedAt().isAfter(thirdSyncJob.getCreatedAt())); - assertTrue(thirdSyncJob.getCreatedAt().isAfter(secondSyncJob.getCreatedAt())); - assertTrue(secondSyncJob.getCreatedAt().isAfter(firstSyncJob.getCreatedAt())); + assertThat(fourthSyncJob, equalTo(syncJobs.get(1))); + assertThat(fifthSyncJob, equalTo(syncJobs.get(0))); + + // assert ordering: descending order by creation date + assertTrue(fourthSyncJob.getCreatedAt().isAfter(fifthSyncJob.getCreatedAt())); + assertTrue(thirdSyncJob.getCreatedAt().isAfter(fourthSyncJob.getCreatedAt())); + assertTrue(secondSyncJob.getCreatedAt().isAfter(thirdSyncJob.getCreatedAt())); + assertTrue(firstSyncJob.getCreatedAt().isAfter(secondSyncJob.getCreatedAt())); } - public void testListConnectorSyncJobs_WithStatusPending_GivenOnePendingTwoCancelled_ExpectOnePending() throws Exception { + public void testListConnectorSyncJobs_WithStatusPending_GivenOnePendingTwoCanceled_ExpectOnePending() throws Exception { PostConnectorSyncJobAction.Request requestOne = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest( connectorOneId ); @@ -535,8 +639,9 @@ public void testListConnectorSyncJobs_WithJobTypeFullAndIncremental_GivenOnePerT String idOfReturnedSyncJobTwo = connectorSyncJobsResult.connectorSyncJobs().get(1).getDocId(); assertThat(numberOfResults, equalTo(2L)); - assertThat(idOfReturnedSyncJobOne, equalTo(syncJobOneId)); - assertThat(idOfReturnedSyncJobTwo, equalTo(syncJobTwoId)); + // Sync jobs are returned in most-recently created order + assertThat(idOfReturnedSyncJobTwo, equalTo(syncJobOneId)); + assertThat(idOfReturnedSyncJobOne, equalTo(syncJobTwoId)); } public void testListConnectorSyncJobs_WithNoSyncJobs_ReturnEmptyResult() throws Exception { @@ -547,12 +652,17 @@ public void testListConnectorSyncJobs_WithNoSyncJobs_ReturnEmptyResult() throws } public void testUpdateConnectorSyncJobError() throws Exception { + // Create sync job PostConnectorSyncJobAction.Request syncJobRequest = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest( connectorOneId ); PostConnectorSyncJobAction.Response response = awaitPutConnectorSyncJob(syncJobRequest); String syncJobId = response.getId(); + // Set sync job to in progress + updateConnectorSyncJobStatusWithoutStateMachineGuard(syncJobId, ConnectorSyncStatus.IN_PROGRESS); + + // Set sync job error UpdateConnectorSyncJobErrorAction.Request request = ConnectorSyncJobTestUtils.getRandomUpdateConnectorSyncJobErrorActionRequest(); String errorInRequest = request.getError(); @@ -575,6 +685,18 @@ public void testUpdateConnectorSyncJobError_WithMissingSyncJobId_ExceptException ); } + public void testUpdateConnectorSyncJobError_WithStatusPending_ExpectStatusException() throws Exception { + // Create sync job + PostConnectorSyncJobAction.Request syncJobRequest = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest( + connectorOneId + ); + PostConnectorSyncJobAction.Response response = awaitPutConnectorSyncJob(syncJobRequest); + String syncJobId = response.getId(); + + // Try to set error + assertThrows(ElasticsearchStatusException.class, () -> awaitUpdateConnectorSyncJob(syncJobId, "some error")); + } + public void testUpdateConnectorSyncJobIngestionStats() throws Exception { PostConnectorSyncJobAction.Request syncJobRequest = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest( connectorOneId @@ -733,6 +855,22 @@ private static void assertFieldsExceptSyncStatusAndCancellationRequestedAtDidNot ); } + private static void assertFieldsExceptSyncStatusAndCanceledAndCompletedTimestampsDidNotUpdate( + Map syncJobSourceBeforeUpdate, + Map syncJobSourceAfterUpdate + ) { + assertFieldsDidNotUpdateExceptFieldList( + syncJobSourceBeforeUpdate, + syncJobSourceAfterUpdate, + List.of( + ConnectorSyncJob.STATUS_FIELD, + ConnectorSyncJob.CANCELED_AT_FIELD, + ConnectorSyncJob.COMPLETED_AT_FIELD, + ConnectorSyncJob.CANCELATION_REQUESTED_AT_FIELD + ) + ); + } + private static void assertFieldsExceptLastSeenDidNotUpdate( Map syncJobSourceBeforeUpdate, Map syncJobSourceAfterUpdate @@ -1006,4 +1144,14 @@ public void onFailure(Exception e) { return response; } + private String updateConnectorSyncJobStatusWithoutStateMachineGuard(String syncJobId, ConnectorSyncStatus syncStatus) throws Exception { + final UpdateRequest updateRequest = new UpdateRequest(ConnectorSyncJobIndexService.CONNECTOR_SYNC_JOB_INDEX_NAME, syncJobId) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .doc(Map.of(ConnectorSyncJob.STATUS_FIELD.getPreferredName(), syncStatus)); + + ActionFuture index = client().update(updateRequest); + + // wait 10 seconds for connector creation + return index.get(TIMEOUT_SECONDS, TimeUnit.SECONDS).getId(); + } } diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobStateMachineTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobStateMachineTests.java index b702a5ffa7eef..3e7bf80dcfb25 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobStateMachineTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobStateMachineTests.java @@ -86,4 +86,31 @@ public void testTransitionToSameState() { ); } } + + public void testAssertValidStateTransition_ExpectExceptionOnInvalidTransition() { + assertThrows( + ConnectorSyncJobInvalidStatusTransitionException.class, + () -> ConnectorSyncJobStateMachine.assertValidStateTransition(ConnectorSyncStatus.PENDING, ConnectorSyncStatus.CANCELING) + ); + } + + public void testAssertValidStateTransition_ExpectNoExceptionOnValidTransition() { + ConnectorSyncStatus prevStatus = ConnectorSyncStatus.PENDING; + ConnectorSyncStatus nextStatus = ConnectorSyncStatus.CANCELED; + + try { + ConnectorSyncJobStateMachine.assertValidStateTransition(prevStatus, nextStatus); + } catch (ConnectorSyncJobInvalidStatusTransitionException e) { + fail( + "Did not expect " + + ConnectorSyncJobInvalidStatusTransitionException.class.getSimpleName() + + " to be thrown for valid state transition [" + + prevStatus + + "] -> " + + "[" + + nextStatus + + "]." + ); + } + } } diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/EvaluatorImplementer.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/EvaluatorImplementer.java index f283e3b59bb63..55a81cd7aaace 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/EvaluatorImplementer.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/EvaluatorImplementer.java @@ -36,10 +36,15 @@ import static org.elasticsearch.compute.gen.Methods.buildFromFactory; import static org.elasticsearch.compute.gen.Methods.getMethod; import static org.elasticsearch.compute.gen.Types.BLOCK; +import static org.elasticsearch.compute.gen.Types.BOOLEAN_BLOCK; import static org.elasticsearch.compute.gen.Types.BYTES_REF; +import static org.elasticsearch.compute.gen.Types.BYTES_REF_BLOCK; +import static org.elasticsearch.compute.gen.Types.DOUBLE_BLOCK; import static org.elasticsearch.compute.gen.Types.DRIVER_CONTEXT; import static org.elasticsearch.compute.gen.Types.EXPRESSION_EVALUATOR; import static org.elasticsearch.compute.gen.Types.EXPRESSION_EVALUATOR_FACTORY; +import static org.elasticsearch.compute.gen.Types.INT_BLOCK; +import static org.elasticsearch.compute.gen.Types.LONG_BLOCK; import static org.elasticsearch.compute.gen.Types.PAGE; import static org.elasticsearch.compute.gen.Types.RELEASABLE; import static org.elasticsearch.compute.gen.Types.RELEASABLES; @@ -53,6 +58,7 @@ public class EvaluatorImplementer { private final TypeElement declarationType; private final ProcessFunction processFunction; private final ClassName implementation; + private final boolean processOutputsMultivalued; public EvaluatorImplementer( Elements elements, @@ -68,6 +74,7 @@ public EvaluatorImplementer( elements.getPackageOf(declarationType).toString(), declarationType.getSimpleName() + extraName + "Evaluator" ); + this.processOutputsMultivalued = this.processFunction.hasBlockType && (this.processFunction.builderArg != null); } public JavaFile sourceFile() { @@ -94,10 +101,17 @@ private TypeSpec type() { builder.addMethod(ctor()); builder.addMethod(eval()); - if (processFunction.args.stream().anyMatch(x -> x instanceof FixedProcessFunctionArg == false)) { - builder.addMethod(realEval(true)); + + if (processOutputsMultivalued) { + if (processFunction.args.stream().anyMatch(x -> x instanceof FixedProcessFunctionArg == false)) { + builder.addMethod(realEval(true)); + } + } else { + if (processFunction.args.stream().anyMatch(x -> x instanceof FixedProcessFunctionArg == false)) { + builder.addMethod(realEval(true)); + } + builder.addMethod(realEval(false)); } - builder.addMethod(realEval(false)); builder.addMethod(toStringMethod()); builder.addMethod(close()); return builder.build(); @@ -117,17 +131,21 @@ private MethodSpec ctor() { private MethodSpec eval() { MethodSpec.Builder builder = MethodSpec.methodBuilder("eval").addAnnotation(Override.class); builder.addModifiers(Modifier.PUBLIC).returns(BLOCK).addParameter(PAGE, "page"); - processFunction.args.stream().forEach(a -> a.evalToBlock(builder)); String invokeBlockEval = invokeRealEval(true); - processFunction.args.stream().forEach(a -> a.resolveVectors(builder, invokeBlockEval)); - builder.addStatement(invokeRealEval(false)); + if (processOutputsMultivalued) { + builder.addStatement(invokeBlockEval); + } else { + processFunction.args.stream().forEach(a -> a.resolveVectors(builder, invokeBlockEval)); + builder.addStatement(invokeRealEval(false)); + } processFunction.args.stream().forEach(a -> a.closeEvalToBlock(builder)); return builder.build(); } private String invokeRealEval(boolean blockStyle) { StringBuilder builder = new StringBuilder("return eval(page.getPositionCount()"); + String params = processFunction.args.stream() .map(a -> a.paramName(blockStyle)) .filter(a -> a != null) @@ -154,6 +172,7 @@ private MethodSpec realEval(boolean blockStyle) { builder.addParameter(a.dataType(blockStyle), a.paramName(blockStyle)); } }); + TypeName builderType = builderType(resultDataType); builder.beginControlFlow( "try($T result = driverContext.blockFactory().$L(positionCount))", @@ -166,13 +185,36 @@ private MethodSpec realEval(boolean blockStyle) { builder.beginControlFlow("position: for (int p = 0; p < positionCount; p++)"); { if (blockStyle) { - processFunction.args.stream().forEach(a -> a.skipNull(builder)); + if (processOutputsMultivalued == false) { + processFunction.args.stream().forEach(a -> a.skipNull(builder)); + } else { + builder.addStatement("boolean allBlocksAreNulls = true"); + // allow block type inputs to be null + processFunction.args.stream().forEach(a -> { + if (a instanceof StandardProcessFunctionArg as) { + as.skipNull(builder); + } else if (a instanceof BlockProcessFunctionArg ab) { + builder.beginControlFlow("if (!$N.isNull(p))", ab.paramName(blockStyle)); + { + builder.addStatement("allBlocksAreNulls = false"); + } + builder.endControlFlow(); + } + }); + + builder.beginControlFlow("if (allBlocksAreNulls)"); + { + builder.addStatement("result.appendNull()"); + builder.addStatement("continue position"); + } + builder.endControlFlow(); + } } processFunction.args.stream().forEach(a -> a.unpackValues(builder, blockStyle)); StringBuilder pattern = new StringBuilder(); List args = new ArrayList<>(); - pattern.append("$T.$N("); + pattern.append(processOutputsMultivalued ? "$T.$N(result, p, " : "$T.$N("); args.add(declarationType); args.add(processFunction.function.getSimpleName()); processFunction.args.stream().forEach(a -> { @@ -189,11 +231,12 @@ private MethodSpec realEval(boolean blockStyle) { } else { builtPattern = pattern.toString(); } - if (processFunction.warnExceptions.isEmpty() == false) { builder.beginControlFlow("try"); } + builder.addStatement(builtPattern, args.toArray()); + if (processFunction.warnExceptions.isEmpty() == false) { String catchPattern = "catch (" + processFunction.warnExceptions.stream().map(m -> "$T").collect(Collectors.joining(" | ")) @@ -403,7 +446,7 @@ private record StandardProcessFunctionArg(TypeName type, String name) implements @Override public TypeName dataType(boolean blockStyle) { if (blockStyle) { - return blockType(type); + return isBlockType() ? type : blockType(type); } return vectorType(type); } @@ -442,7 +485,7 @@ public String factoryInvocation(MethodSpec.Builder factoryMethodBuilder) { @Override public void evalToBlock(MethodSpec.Builder builder) { - TypeName blockType = blockType(type); + TypeName blockType = isBlockType() ? type : blockType(type); builder.beginControlFlow("try ($T $LBlock = ($T) $L.eval(page))", blockType, name, blockType, name); } @@ -474,6 +517,10 @@ public void unpackValues(MethodSpec.Builder builder, boolean blockStyle) { // nothing to do } + private boolean isBlockType() { + return EvaluatorImplementer.isBlockType(type); + } + @Override public void buildInvocation(StringBuilder pattern, List args, boolean blockStyle) { if (type.equals(BYTES_REF)) { @@ -488,14 +535,21 @@ public void buildInvocation(StringBuilder pattern, List args, boolean bl return; } if (blockStyle) { - pattern.append("$L.$L($L.getFirstValueIndex(p))"); + if (isBlockType()) { + pattern.append("$L"); + } else { + pattern.append("$L.$L($L.getFirstValueIndex(p))"); + } } else { pattern.append("$L.$L(p)"); } args.add(paramName(blockStyle)); - args.add(getMethod(type)); - if (blockStyle) { - args.add(paramName(true)); + String method = isBlockType() ? null : getMethod(type); + if (method != null) { + args.add(method); + if (blockStyle) { + args.add(paramName(true)); + } } } @@ -824,12 +878,101 @@ public String closeInvocation() { } } + private record BlockProcessFunctionArg(TypeName type, String name) implements ProcessFunctionArg { + @Override + public TypeName dataType(boolean blockStyle) { + return type; + } + + @Override + public String paramName(boolean blockStyle) { + return name + (blockStyle ? "Block" : "Vector"); + } + + @Override + public void declareField(TypeSpec.Builder builder) { + builder.addField(EXPRESSION_EVALUATOR, name, Modifier.PRIVATE, Modifier.FINAL); + } + + @Override + public void declareFactoryField(TypeSpec.Builder builder) { + builder.addField(EXPRESSION_EVALUATOR_FACTORY, name, Modifier.PRIVATE, Modifier.FINAL); + } + + @Override + public void implementCtor(MethodSpec.Builder builder) { + builder.addParameter(EXPRESSION_EVALUATOR, name); + builder.addStatement("this.$L = $L", name, name); + } + + @Override + public void implementFactoryCtor(MethodSpec.Builder builder) { + builder.addParameter(EXPRESSION_EVALUATOR_FACTORY, name); + builder.addStatement("this.$L = $L", name, name); + } + + @Override + public String factoryInvocation(MethodSpec.Builder factoryMethodBuilder) { + return name + ".get(context)"; + } + + @Override + public void evalToBlock(MethodSpec.Builder builder) { + builder.beginControlFlow("try ($T $LBlock = ($T) $L.eval(page))", type, name, type, name); + } + + @Override + public void closeEvalToBlock(MethodSpec.Builder builder) { + builder.endControlFlow(); + } + + @Override + public void resolveVectors(MethodSpec.Builder builder, String invokeBlockEval) { + // nothing to do + } + + @Override + public void createScratch(MethodSpec.Builder builder) { + // nothing to do + } + + @Override + public void skipNull(MethodSpec.Builder builder) { + EvaluatorImplementer.skipNull(builder, paramName(true)); + } + + @Override + public void unpackValues(MethodSpec.Builder builder, boolean blockStyle) { + // nothing to do + } + + @Override + public void buildInvocation(StringBuilder pattern, List args, boolean blockStyle) { + pattern.append("$L"); + args.add(paramName(blockStyle)); + } + + @Override + public void buildToStringInvocation(StringBuilder pattern, List args, String prefix) { + pattern.append(" + $S + $L"); + args.add(prefix + name + "="); + args.add(name); + } + + @Override + public String closeInvocation() { + return name; + } + } + private static class ProcessFunction { private final ExecutableElement function; private final List args; private final BuilderProcessFunctionArg builderArg; private final List warnExceptions; + private boolean hasBlockType; + private ProcessFunction( Elements elements, javax.lang.model.util.Types types, @@ -839,6 +982,7 @@ private ProcessFunction( this.function = function; args = new ArrayList<>(); BuilderProcessFunctionArg builderArg = null; + hasBlockType = false; for (VariableElement v : function.getParameters()) { TypeName type = TypeName.get(v.asType()); String name = v.getSimpleName().toString(); @@ -871,6 +1015,14 @@ private ProcessFunction( args.add(new ArrayProcessFunctionArg(TypeName.get(componentType), name)); continue; } + if (isBlockType(type)) { + if (builderArg != null && args.size() == 2 && hasBlockType == false) { + args.clear(); + hasBlockType = true; + } + args.add(new BlockProcessFunctionArg(type, name)); + continue; + } args.add(new StandardProcessFunctionArg(type, name)); } this.builderArg = builderArg; @@ -885,4 +1037,12 @@ private ClassName resultDataType(boolean blockStyle) { return useBlockStyle ? blockType(TypeName.get(function.getReturnType())) : vectorType(TypeName.get(function.getReturnType())); } } + + static boolean isBlockType(TypeName type) { + return type.equals(INT_BLOCK) + || type.equals(LONG_BLOCK) + || type.equals(DOUBLE_BLOCK) + || type.equals(BOOLEAN_BLOCK) + || type.equals(BYTES_REF_BLOCK); + } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java index 666f1ad926eeb..710eb17f72f6a 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java @@ -8,8 +8,10 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Releasables; +import java.io.IOException; import java.util.BitSet; /** @@ -35,26 +37,47 @@ final class BooleanArrayBlock extends AbstractArrayBlock implements BooleanBlock positionCount, firstValueIndexes, nulls, - mvOrdering, - blockFactory + mvOrdering ); } private BooleanArrayBlock( - BooleanArrayVector vector, + BooleanArrayVector vector, // stylecheck int positionCount, int[] firstValueIndexes, BitSet nulls, - MvOrdering mvOrdering, - BlockFactory blockFactory + MvOrdering mvOrdering ) { - super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); + super(positionCount, firstValueIndexes, nulls, mvOrdering); this.vector = vector; assert firstValueIndexes == null ? vector.getPositionCount() == getPositionCount() : firstValueIndexes[getPositionCount()] == vector.getPositionCount(); } + static BooleanArrayBlock readArrayBlock(BlockFactory blockFactory, BlockStreamInput in) throws IOException { + final SubFields sub = new SubFields(blockFactory, in); + BooleanArrayVector vector = null; + boolean success = false; + try { + vector = BooleanArrayVector.readArrayVector(sub.vectorPositions(), in, blockFactory); + var block = new BooleanArrayBlock(vector, sub.positionCount, sub.firstValueIndexes, sub.nullsMask, sub.mvOrdering); + blockFactory.adjustBreaker(block.ramBytesUsed() - vector.ramBytesUsed() - sub.bytesReserved); + success = true; + return block; + } finally { + if (success == false) { + Releasables.close(vector); + blockFactory.adjustBreaker(-sub.bytesReserved); + } + } + } + + void writeArrayBlock(StreamOutput out) throws IOException { + writeSubFields(out); + vector.writeArrayVector(vector.getPositionCount(), out); + } + @Override public BooleanVector asVector() { return null; @@ -115,8 +138,7 @@ public BooleanBlock expand() { expandedPositionCount, null, shiftNullsToExpandedPositions(), - MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, - blockFactory() + MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING ); blockFactory().adjustBreaker(expanded.ramBytesUsedOnlyBlock() - bitSetRamUsedEstimate); // We need to incRef after adjusting any breakers, otherwise we might leak the vector if the breaker trips. @@ -160,10 +182,14 @@ public String toString() { @Override public void allowPassingToDifferentDriver() { - super.allowPassingToDifferentDriver(); vector.allowPassingToDifferentDriver(); } + @Override + public BlockFactory blockFactory() { + return vector.blockFactory(); + } + @Override public void closeInternal() { blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock()); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayVector.java index 1599061d04ce8..63f02b14d9481 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayVector.java @@ -8,7 +8,10 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import java.io.IOException; import java.util.Arrays; /** @@ -28,6 +31,33 @@ final class BooleanArrayVector extends AbstractVector implements BooleanVector { this.values = values; } + static BooleanArrayVector readArrayVector(int positions, StreamInput in, BlockFactory blockFactory) throws IOException { + final long preAdjustedBytes = RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) positions * Byte.BYTES; + blockFactory.adjustBreaker(preAdjustedBytes); + boolean success = false; + try { + boolean[] values = new boolean[positions]; + for (int i = 0; i < positions; i++) { + values[i] = in.readBoolean(); + } + final var block = new BooleanArrayVector(values, positions, blockFactory); + blockFactory.adjustBreaker(block.ramBytesUsed() - preAdjustedBytes); + success = true; + return block; + } finally { + if (success == false) { + blockFactory.adjustBreaker(-preAdjustedBytes); + } + } + } + + void writeArrayVector(int positions, StreamOutput out) throws IOException { + // TODO: One bit for each boolean + for (int i = 0; i < positions; i++) { + out.writeBoolean(values[i]); + } + } + @Override public BooleanBlock asBlock() { return new BooleanVectorBlock(this); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayBlock.java index a19ed24302b65..890e6b6a59acd 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayBlock.java @@ -36,20 +36,18 @@ public BooleanBigArrayBlock( positionCount, firstValueIndexes, nulls, - mvOrdering, - blockFactory + mvOrdering ); } private BooleanBigArrayBlock( - BooleanBigArrayVector vector, + BooleanBigArrayVector vector, // stylecheck int positionCount, int[] firstValueIndexes, BitSet nulls, - MvOrdering mvOrdering, - BlockFactory blockFactory + MvOrdering mvOrdering ) { - super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); + super(positionCount, firstValueIndexes, nulls, mvOrdering); this.vector = vector; assert firstValueIndexes == null ? vector.getPositionCount() == getPositionCount() @@ -116,8 +114,7 @@ public BooleanBlock expand() { expandedPositionCount, null, shiftNullsToExpandedPositions(), - MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, - blockFactory() + MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING ); blockFactory().adjustBreaker(expanded.ramBytesUsedOnlyBlock() - bitSetRamUsedEstimate); // We need to incRef after adjusting any breakers, otherwise we might leak the vector if the breaker trips. @@ -161,10 +158,14 @@ public String toString() { @Override public void allowPassingToDifferentDriver() { - super.allowPassingToDifferentDriver(); vector.allowPassingToDifferentDriver(); } + @Override + public BlockFactory blockFactory() { + return vector.blockFactory(); + } + @Override public void closeInternal() { blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock()); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java index fffa3af137d76..1dd231c129a2d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java @@ -7,6 +7,7 @@ package org.elasticsearch.compute.data; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -49,10 +50,19 @@ private static BooleanBlock readFrom(StreamInput in) throws IOException { } private static BooleanBlock readFrom(BlockStreamInput in) throws IOException { - final boolean isVector = in.readBoolean(); - if (isVector) { - return BooleanVector.readFrom(in.blockFactory(), in).asBlock(); - } + final byte serializationType = in.readByte(); + return switch (serializationType) { + case SERIALIZE_BLOCK_VALUES -> BooleanBlock.readValues(in); + case SERIALIZE_BLOCK_VECTOR -> BooleanVector.readFrom(in.blockFactory(), in).asBlock(); + case SERIALIZE_BLOCK_ARRAY -> BooleanArrayBlock.readArrayBlock(in.blockFactory(), in); + default -> { + assert false : "invalid block serialization type " + serializationType; + throw new IllegalStateException("invalid serialization type " + serializationType); + } + }; + } + + private static BooleanBlock readValues(BlockStreamInput in) throws IOException { final int positions = in.readVInt(); try (BooleanBlock.Builder builder = in.blockFactory().newBooleanBlockBuilder(positions)) { for (int i = 0; i < positions; i++) { @@ -74,22 +84,31 @@ private static BooleanBlock readFrom(BlockStreamInput in) throws IOException { @Override default void writeTo(StreamOutput out) throws IOException { BooleanVector vector = asVector(); - out.writeBoolean(vector != null); + final var version = out.getTransportVersion(); if (vector != null) { + out.writeByte(SERIALIZE_BLOCK_VECTOR); vector.writeTo(out); + } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_ARRAY_BLOCK) && this instanceof BooleanArrayBlock b) { + out.writeByte(SERIALIZE_BLOCK_ARRAY); + b.writeArrayBlock(out); } else { - final int positions = getPositionCount(); - out.writeVInt(positions); - for (int pos = 0; pos < positions; pos++) { - if (isNull(pos)) { - out.writeBoolean(true); - } else { - out.writeBoolean(false); - final int valueCount = getValueCount(pos); - out.writeVInt(valueCount); - for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { - out.writeBoolean(getBoolean(getFirstValueIndex(pos) + valueIndex)); - } + out.writeByte(SERIALIZE_BLOCK_VALUES); + BooleanBlock.writeValues(this, out); + } + } + + private static void writeValues(BooleanBlock block, StreamOutput out) throws IOException { + final int positions = block.getPositionCount(); + out.writeVInt(positions); + for (int pos = 0; pos < positions; pos++) { + if (block.isNull(pos)) { + out.writeBoolean(true); + } else { + out.writeBoolean(false); + final int valueCount = block.getValueCount(pos); + out.writeVInt(valueCount); + for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { + out.writeBoolean(block.getBoolean(block.getFirstValueIndex(pos) + valueIndex)); } } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVector.java index 7c86f40981ec7..2f50b45fbfc9d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVector.java @@ -7,6 +7,7 @@ package org.elasticsearch.compute.data; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -74,30 +75,47 @@ static int hash(BooleanVector vector) { /** Deserializes a Vector from the given stream input. */ static BooleanVector readFrom(BlockFactory blockFactory, StreamInput in) throws IOException { final int positions = in.readVInt(); - final boolean constant = in.readBoolean(); - if (constant && positions > 0) { - return blockFactory.newConstantBooleanVector(in.readBoolean(), positions); - } else { - try (var builder = blockFactory.newBooleanVectorFixedBuilder(positions)) { - for (int i = 0; i < positions; i++) { - builder.appendBoolean(in.readBoolean()); - } - return builder.build(); + final byte serializationType = in.readByte(); + return switch (serializationType) { + case SERIALIZE_VECTOR_VALUES -> readValues(positions, in, blockFactory); + case SERIALIZE_VECTOR_CONSTANT -> blockFactory.newConstantBooleanVector(in.readBoolean(), positions); + case SERIALIZE_VECTOR_ARRAY -> BooleanArrayVector.readArrayVector(positions, in, blockFactory); + default -> { + assert false : "invalid vector serialization type [" + serializationType + "]"; + throw new IllegalStateException("invalid vector serialization type [" + serializationType + "]"); } - } + }; } /** Serializes this Vector to the given stream output. */ default void writeTo(StreamOutput out) throws IOException { final int positions = getPositionCount(); + final var version = out.getTransportVersion(); out.writeVInt(positions); - out.writeBoolean(isConstant()); if (isConstant() && positions > 0) { + out.writeByte(SERIALIZE_VECTOR_CONSTANT); out.writeBoolean(getBoolean(0)); + } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_ARRAY_VECTOR) && this instanceof BooleanArrayVector v) { + out.writeByte(SERIALIZE_VECTOR_ARRAY); + v.writeArrayVector(positions, out); } else { + out.writeByte(SERIALIZE_VECTOR_VALUES); + writeValues(this, positions, out); + } + } + + private static BooleanVector readValues(int positions, StreamInput in, BlockFactory blockFactory) throws IOException { + try (var builder = blockFactory.newBooleanVectorFixedBuilder(positions)) { for (int i = 0; i < positions; i++) { - out.writeBoolean(getBoolean(i)); + builder.appendBoolean(in.readBoolean()); } + return builder.build(); + } + } + + private static void writeValues(BooleanVector v, int positions, StreamOutput out) throws IOException { + for (int i = 0; i < positions; i++) { + out.writeBoolean(v.getBoolean(i)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java index d707e3cf901c1..a42e9b148064d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java @@ -21,7 +21,6 @@ public final class BooleanVectorBlock extends AbstractVectorBlock implements Boo * @param vector considered owned by the current block; must not be used in any other {@code Block} */ BooleanVectorBlock(BooleanVector vector) { - super(vector.getPositionCount(), vector.blockFactory()); this.vector = vector; } @@ -36,7 +35,7 @@ public boolean getBoolean(int valueIndex) { } @Override - public int getTotalValueCount() { + public int getPositionCount() { return vector.getPositionCount(); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java index 69e5499eaba46..6cc66183db2ed 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java @@ -9,9 +9,11 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.BytesRefArray; import org.elasticsearch.core.Releasables; +import java.io.IOException; import java.util.BitSet; /** @@ -38,26 +40,47 @@ final class BytesRefArrayBlock extends AbstractArrayBlock implements BytesRefBlo positionCount, firstValueIndexes, nulls, - mvOrdering, - blockFactory + mvOrdering ); } private BytesRefArrayBlock( - BytesRefArrayVector vector, + BytesRefArrayVector vector, // stylecheck int positionCount, int[] firstValueIndexes, BitSet nulls, - MvOrdering mvOrdering, - BlockFactory blockFactory + MvOrdering mvOrdering ) { - super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); + super(positionCount, firstValueIndexes, nulls, mvOrdering); this.vector = vector; assert firstValueIndexes == null ? vector.getPositionCount() == getPositionCount() : firstValueIndexes[getPositionCount()] == vector.getPositionCount(); } + static BytesRefArrayBlock readArrayBlock(BlockFactory blockFactory, BlockStreamInput in) throws IOException { + final SubFields sub = new SubFields(blockFactory, in); + BytesRefArrayVector vector = null; + boolean success = false; + try { + vector = BytesRefArrayVector.readArrayVector(sub.vectorPositions(), in, blockFactory); + var block = new BytesRefArrayBlock(vector, sub.positionCount, sub.firstValueIndexes, sub.nullsMask, sub.mvOrdering); + blockFactory.adjustBreaker(block.ramBytesUsed() - vector.ramBytesUsed() - sub.bytesReserved); + success = true; + return block; + } finally { + if (success == false) { + Releasables.close(vector); + blockFactory.adjustBreaker(-sub.bytesReserved); + } + } + } + + void writeArrayBlock(StreamOutput out) throws IOException { + writeSubFields(out); + vector.writeArrayVector(vector.getPositionCount(), out); + } + @Override public BytesRefVector asVector() { return null; @@ -119,8 +142,7 @@ public BytesRefBlock expand() { expandedPositionCount, null, shiftNullsToExpandedPositions(), - MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, - blockFactory() + MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING ); blockFactory().adjustBreaker(expanded.ramBytesUsedOnlyBlock() - bitSetRamUsedEstimate); // We need to incRef after adjusting any breakers, otherwise we might leak the vector if the breaker trips. @@ -164,10 +186,14 @@ public String toString() { @Override public void allowPassingToDifferentDriver() { - super.allowPassingToDifferentDriver(); vector.allowPassingToDifferentDriver(); } + @Override + public BlockFactory blockFactory() { + return vector.blockFactory(); + } + @Override public void closeInternal() { blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock()); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayVector.java index 5d47802bebabe..d0b600d0f0be2 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayVector.java @@ -9,9 +9,13 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.BytesRefArray; import org.elasticsearch.core.Releasables; +import java.io.IOException; + /** * Vector implementation that stores an array of BytesRef values. * Does not take ownership of the given {@link BytesRefArray} and does not adjust circuit breakers to account for it. @@ -30,6 +34,25 @@ final class BytesRefArrayVector extends AbstractVector implements BytesRefVector this.values = values; } + static BytesRefArrayVector readArrayVector(int positions, StreamInput in, BlockFactory blockFactory) throws IOException { + final BytesRefArray values = new BytesRefArray(in, blockFactory.bigArrays()); + boolean success = false; + try { + final var block = new BytesRefArrayVector(values, positions, blockFactory); + blockFactory.adjustBreaker(block.ramBytesUsed() - values.bigArraysRamBytesUsed()); + success = true; + return block; + } finally { + if (success == false) { + values.close(); + } + } + } + + void writeArrayVector(int positions, StreamOutput out) throws IOException { + values.writeTo(out); + } + @Override public BytesRefBlock asBlock() { return new BytesRefVectorBlock(this); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java index 8ed17a1435302..8331d948ca329 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java @@ -8,6 +8,7 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -53,10 +54,19 @@ private static BytesRefBlock readFrom(StreamInput in) throws IOException { } private static BytesRefBlock readFrom(BlockStreamInput in) throws IOException { - final boolean isVector = in.readBoolean(); - if (isVector) { - return BytesRefVector.readFrom(in.blockFactory(), in).asBlock(); - } + final byte serializationType = in.readByte(); + return switch (serializationType) { + case SERIALIZE_BLOCK_VALUES -> BytesRefBlock.readValues(in); + case SERIALIZE_BLOCK_VECTOR -> BytesRefVector.readFrom(in.blockFactory(), in).asBlock(); + case SERIALIZE_BLOCK_ARRAY -> BytesRefArrayBlock.readArrayBlock(in.blockFactory(), in); + default -> { + assert false : "invalid block serialization type " + serializationType; + throw new IllegalStateException("invalid serialization type " + serializationType); + } + }; + } + + private static BytesRefBlock readValues(BlockStreamInput in) throws IOException { final int positions = in.readVInt(); try (BytesRefBlock.Builder builder = in.blockFactory().newBytesRefBlockBuilder(positions)) { for (int i = 0; i < positions; i++) { @@ -78,22 +88,32 @@ private static BytesRefBlock readFrom(BlockStreamInput in) throws IOException { @Override default void writeTo(StreamOutput out) throws IOException { BytesRefVector vector = asVector(); - out.writeBoolean(vector != null); + final var version = out.getTransportVersion(); if (vector != null) { + out.writeByte(SERIALIZE_BLOCK_VECTOR); vector.writeTo(out); + } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_ARRAY_BLOCK) && this instanceof BytesRefArrayBlock b) { + out.writeByte(SERIALIZE_BLOCK_ARRAY); + b.writeArrayBlock(out); } else { - final int positions = getPositionCount(); - out.writeVInt(positions); - for (int pos = 0; pos < positions; pos++) { - if (isNull(pos)) { - out.writeBoolean(true); - } else { - out.writeBoolean(false); - final int valueCount = getValueCount(pos); - out.writeVInt(valueCount); - for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { - out.writeBytesRef(getBytesRef(getFirstValueIndex(pos) + valueIndex, new BytesRef())); - } + out.writeByte(SERIALIZE_BLOCK_VALUES); + BytesRefBlock.writeValues(this, out); + } + } + + private static void writeValues(BytesRefBlock block, StreamOutput out) throws IOException { + final int positions = block.getPositionCount(); + out.writeVInt(positions); + for (int pos = 0; pos < positions; pos++) { + if (block.isNull(pos)) { + out.writeBoolean(true); + } else { + out.writeBoolean(false); + final int valueCount = block.getValueCount(pos); + out.writeVInt(valueCount); + var scratch = new BytesRef(); + for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { + out.writeBytesRef(block.getBytesRef(block.getFirstValueIndex(pos) + valueIndex, scratch)); } } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVector.java index 5c56ece72c298..c0b107065f43c 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVector.java @@ -8,6 +8,7 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -74,30 +75,48 @@ static int hash(BytesRefVector vector) { /** Deserializes a Vector from the given stream input. */ static BytesRefVector readFrom(BlockFactory blockFactory, StreamInput in) throws IOException { final int positions = in.readVInt(); - final boolean constant = in.readBoolean(); - if (constant && positions > 0) { - return blockFactory.newConstantBytesRefVector(in.readBytesRef(), positions); - } else { - try (var builder = blockFactory.newBytesRefVectorBuilder(positions)) { - for (int i = 0; i < positions; i++) { - builder.appendBytesRef(in.readBytesRef()); - } - return builder.build(); + final byte serializationType = in.readByte(); + return switch (serializationType) { + case SERIALIZE_VECTOR_VALUES -> readValues(positions, in, blockFactory); + case SERIALIZE_VECTOR_CONSTANT -> blockFactory.newConstantBytesRefVector(in.readBytesRef(), positions); + case SERIALIZE_VECTOR_ARRAY -> BytesRefArrayVector.readArrayVector(positions, in, blockFactory); + default -> { + assert false : "invalid vector serialization type [" + serializationType + "]"; + throw new IllegalStateException("invalid vector serialization type [" + serializationType + "]"); } - } + }; } /** Serializes this Vector to the given stream output. */ default void writeTo(StreamOutput out) throws IOException { final int positions = getPositionCount(); + final var version = out.getTransportVersion(); out.writeVInt(positions); - out.writeBoolean(isConstant()); if (isConstant() && positions > 0) { + out.writeByte(SERIALIZE_VECTOR_CONSTANT); out.writeBytesRef(getBytesRef(0, new BytesRef())); + } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_ARRAY_VECTOR) && this instanceof BytesRefArrayVector v) { + out.writeByte(SERIALIZE_VECTOR_ARRAY); + v.writeArrayVector(positions, out); } else { + out.writeByte(SERIALIZE_VECTOR_VALUES); + writeValues(this, positions, out); + } + } + + private static BytesRefVector readValues(int positions, StreamInput in, BlockFactory blockFactory) throws IOException { + try (var builder = blockFactory.newBytesRefVectorBuilder(positions)) { for (int i = 0; i < positions; i++) { - out.writeBytesRef(getBytesRef(i, new BytesRef())); + builder.appendBytesRef(in.readBytesRef()); } + return builder.build(); + } + } + + private static void writeValues(BytesRefVector v, int positions, StreamOutput out) throws IOException { + var scratch = new BytesRef(); + for (int i = 0; i < positions; i++) { + out.writeBytesRef(v.getBytesRef(i, scratch)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java index 92f93d5d23a49..1a077f38385e3 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java @@ -22,7 +22,6 @@ public final class BytesRefVectorBlock extends AbstractVectorBlock implements By * @param vector considered owned by the current block; must not be used in any other {@code Block} */ BytesRefVectorBlock(BytesRefVector vector) { - super(vector.getPositionCount(), vector.blockFactory()); this.vector = vector; } @@ -37,7 +36,7 @@ public BytesRef getBytesRef(int valueIndex, BytesRef dest) { } @Override - public int getTotalValueCount() { + public int getPositionCount() { return vector.getPositionCount(); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java index b5f5c69e0508a..d872a4938a734 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java @@ -8,8 +8,10 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Releasables; +import java.io.IOException; import java.util.BitSet; /** @@ -35,26 +37,47 @@ final class DoubleArrayBlock extends AbstractArrayBlock implements DoubleBlock { positionCount, firstValueIndexes, nulls, - mvOrdering, - blockFactory + mvOrdering ); } private DoubleArrayBlock( - DoubleArrayVector vector, + DoubleArrayVector vector, // stylecheck int positionCount, int[] firstValueIndexes, BitSet nulls, - MvOrdering mvOrdering, - BlockFactory blockFactory + MvOrdering mvOrdering ) { - super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); + super(positionCount, firstValueIndexes, nulls, mvOrdering); this.vector = vector; assert firstValueIndexes == null ? vector.getPositionCount() == getPositionCount() : firstValueIndexes[getPositionCount()] == vector.getPositionCount(); } + static DoubleArrayBlock readArrayBlock(BlockFactory blockFactory, BlockStreamInput in) throws IOException { + final SubFields sub = new SubFields(blockFactory, in); + DoubleArrayVector vector = null; + boolean success = false; + try { + vector = DoubleArrayVector.readArrayVector(sub.vectorPositions(), in, blockFactory); + var block = new DoubleArrayBlock(vector, sub.positionCount, sub.firstValueIndexes, sub.nullsMask, sub.mvOrdering); + blockFactory.adjustBreaker(block.ramBytesUsed() - vector.ramBytesUsed() - sub.bytesReserved); + success = true; + return block; + } finally { + if (success == false) { + Releasables.close(vector); + blockFactory.adjustBreaker(-sub.bytesReserved); + } + } + } + + void writeArrayBlock(StreamOutput out) throws IOException { + writeSubFields(out); + vector.writeArrayVector(vector.getPositionCount(), out); + } + @Override public DoubleVector asVector() { return null; @@ -115,8 +138,7 @@ public DoubleBlock expand() { expandedPositionCount, null, shiftNullsToExpandedPositions(), - MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, - blockFactory() + MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING ); blockFactory().adjustBreaker(expanded.ramBytesUsedOnlyBlock() - bitSetRamUsedEstimate); // We need to incRef after adjusting any breakers, otherwise we might leak the vector if the breaker trips. @@ -160,10 +182,14 @@ public String toString() { @Override public void allowPassingToDifferentDriver() { - super.allowPassingToDifferentDriver(); vector.allowPassingToDifferentDriver(); } + @Override + public BlockFactory blockFactory() { + return vector.blockFactory(); + } + @Override public void closeInternal() { blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock()); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayVector.java index 9a9fedb95a1b6..a7868beaf5db8 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayVector.java @@ -8,7 +8,10 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import java.io.IOException; import java.util.Arrays; /** @@ -28,6 +31,32 @@ final class DoubleArrayVector extends AbstractVector implements DoubleVector { this.values = values; } + static DoubleArrayVector readArrayVector(int positions, StreamInput in, BlockFactory blockFactory) throws IOException { + final long preAdjustedBytes = RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) positions * Double.BYTES; + blockFactory.adjustBreaker(preAdjustedBytes); + boolean success = false; + try { + double[] values = new double[positions]; + for (int i = 0; i < positions; i++) { + values[i] = in.readDouble(); + } + final var block = new DoubleArrayVector(values, positions, blockFactory); + blockFactory.adjustBreaker(block.ramBytesUsed() - preAdjustedBytes); + success = true; + return block; + } finally { + if (success == false) { + blockFactory.adjustBreaker(-preAdjustedBytes); + } + } + } + + void writeArrayVector(int positions, StreamOutput out) throws IOException { + for (int i = 0; i < positions; i++) { + out.writeDouble(values[i]); + } + } + @Override public DoubleBlock asBlock() { return new DoubleVectorBlock(this); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayBlock.java index 39f959edf5ee3..702499513a0c3 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayBlock.java @@ -36,20 +36,18 @@ public DoubleBigArrayBlock( positionCount, firstValueIndexes, nulls, - mvOrdering, - blockFactory + mvOrdering ); } private DoubleBigArrayBlock( - DoubleBigArrayVector vector, + DoubleBigArrayVector vector, // stylecheck int positionCount, int[] firstValueIndexes, BitSet nulls, - MvOrdering mvOrdering, - BlockFactory blockFactory + MvOrdering mvOrdering ) { - super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); + super(positionCount, firstValueIndexes, nulls, mvOrdering); this.vector = vector; assert firstValueIndexes == null ? vector.getPositionCount() == getPositionCount() @@ -116,8 +114,7 @@ public DoubleBlock expand() { expandedPositionCount, null, shiftNullsToExpandedPositions(), - MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, - blockFactory() + MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING ); blockFactory().adjustBreaker(expanded.ramBytesUsedOnlyBlock() - bitSetRamUsedEstimate); // We need to incRef after adjusting any breakers, otherwise we might leak the vector if the breaker trips. @@ -161,10 +158,14 @@ public String toString() { @Override public void allowPassingToDifferentDriver() { - super.allowPassingToDifferentDriver(); vector.allowPassingToDifferentDriver(); } + @Override + public BlockFactory blockFactory() { + return vector.blockFactory(); + } + @Override public void closeInternal() { blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock()); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java index 890f965c765bb..20be6402ba097 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java @@ -7,6 +7,7 @@ package org.elasticsearch.compute.data; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -49,10 +50,19 @@ private static DoubleBlock readFrom(StreamInput in) throws IOException { } private static DoubleBlock readFrom(BlockStreamInput in) throws IOException { - final boolean isVector = in.readBoolean(); - if (isVector) { - return DoubleVector.readFrom(in.blockFactory(), in).asBlock(); - } + final byte serializationType = in.readByte(); + return switch (serializationType) { + case SERIALIZE_BLOCK_VALUES -> DoubleBlock.readValues(in); + case SERIALIZE_BLOCK_VECTOR -> DoubleVector.readFrom(in.blockFactory(), in).asBlock(); + case SERIALIZE_BLOCK_ARRAY -> DoubleArrayBlock.readArrayBlock(in.blockFactory(), in); + default -> { + assert false : "invalid block serialization type " + serializationType; + throw new IllegalStateException("invalid serialization type " + serializationType); + } + }; + } + + private static DoubleBlock readValues(BlockStreamInput in) throws IOException { final int positions = in.readVInt(); try (DoubleBlock.Builder builder = in.blockFactory().newDoubleBlockBuilder(positions)) { for (int i = 0; i < positions; i++) { @@ -74,22 +84,31 @@ private static DoubleBlock readFrom(BlockStreamInput in) throws IOException { @Override default void writeTo(StreamOutput out) throws IOException { DoubleVector vector = asVector(); - out.writeBoolean(vector != null); + final var version = out.getTransportVersion(); if (vector != null) { + out.writeByte(SERIALIZE_BLOCK_VECTOR); vector.writeTo(out); + } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_ARRAY_BLOCK) && this instanceof DoubleArrayBlock b) { + out.writeByte(SERIALIZE_BLOCK_ARRAY); + b.writeArrayBlock(out); } else { - final int positions = getPositionCount(); - out.writeVInt(positions); - for (int pos = 0; pos < positions; pos++) { - if (isNull(pos)) { - out.writeBoolean(true); - } else { - out.writeBoolean(false); - final int valueCount = getValueCount(pos); - out.writeVInt(valueCount); - for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { - out.writeDouble(getDouble(getFirstValueIndex(pos) + valueIndex)); - } + out.writeByte(SERIALIZE_BLOCK_VALUES); + DoubleBlock.writeValues(this, out); + } + } + + private static void writeValues(DoubleBlock block, StreamOutput out) throws IOException { + final int positions = block.getPositionCount(); + out.writeVInt(positions); + for (int pos = 0; pos < positions; pos++) { + if (block.isNull(pos)) { + out.writeBoolean(true); + } else { + out.writeBoolean(false); + final int valueCount = block.getValueCount(pos); + out.writeVInt(valueCount); + for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { + out.writeDouble(block.getDouble(block.getFirstValueIndex(pos) + valueIndex)); } } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVector.java index f54044874acdd..c5553f6a102f9 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVector.java @@ -7,6 +7,7 @@ package org.elasticsearch.compute.data; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -75,30 +76,47 @@ static int hash(DoubleVector vector) { /** Deserializes a Vector from the given stream input. */ static DoubleVector readFrom(BlockFactory blockFactory, StreamInput in) throws IOException { final int positions = in.readVInt(); - final boolean constant = in.readBoolean(); - if (constant && positions > 0) { - return blockFactory.newConstantDoubleVector(in.readDouble(), positions); - } else { - try (var builder = blockFactory.newDoubleVectorFixedBuilder(positions)) { - for (int i = 0; i < positions; i++) { - builder.appendDouble(in.readDouble()); - } - return builder.build(); + final byte serializationType = in.readByte(); + return switch (serializationType) { + case SERIALIZE_VECTOR_VALUES -> readValues(positions, in, blockFactory); + case SERIALIZE_VECTOR_CONSTANT -> blockFactory.newConstantDoubleVector(in.readDouble(), positions); + case SERIALIZE_VECTOR_ARRAY -> DoubleArrayVector.readArrayVector(positions, in, blockFactory); + default -> { + assert false : "invalid vector serialization type [" + serializationType + "]"; + throw new IllegalStateException("invalid vector serialization type [" + serializationType + "]"); } - } + }; } /** Serializes this Vector to the given stream output. */ default void writeTo(StreamOutput out) throws IOException { final int positions = getPositionCount(); + final var version = out.getTransportVersion(); out.writeVInt(positions); - out.writeBoolean(isConstant()); if (isConstant() && positions > 0) { + out.writeByte(SERIALIZE_VECTOR_CONSTANT); out.writeDouble(getDouble(0)); + } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_ARRAY_VECTOR) && this instanceof DoubleArrayVector v) { + out.writeByte(SERIALIZE_VECTOR_ARRAY); + v.writeArrayVector(positions, out); } else { + out.writeByte(SERIALIZE_VECTOR_VALUES); + writeValues(this, positions, out); + } + } + + private static DoubleVector readValues(int positions, StreamInput in, BlockFactory blockFactory) throws IOException { + try (var builder = blockFactory.newDoubleVectorFixedBuilder(positions)) { for (int i = 0; i < positions; i++) { - out.writeDouble(getDouble(i)); + builder.appendDouble(in.readDouble()); } + return builder.build(); + } + } + + private static void writeValues(DoubleVector v, int positions, StreamOutput out) throws IOException { + for (int i = 0; i < positions; i++) { + out.writeDouble(v.getDouble(i)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java index 2aa8e07c25604..647849a968df9 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java @@ -21,7 +21,6 @@ public final class DoubleVectorBlock extends AbstractVectorBlock implements Doub * @param vector considered owned by the current block; must not be used in any other {@code Block} */ DoubleVectorBlock(DoubleVector vector) { - super(vector.getPositionCount(), vector.blockFactory()); this.vector = vector; } @@ -36,7 +35,7 @@ public double getDouble(int valueIndex) { } @Override - public int getTotalValueCount() { + public int getPositionCount() { return vector.getPositionCount(); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java index 2afefbff16117..492769d1f3d43 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java @@ -8,8 +8,10 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Releasables; +import java.io.IOException; import java.util.BitSet; /** @@ -35,26 +37,47 @@ final class IntArrayBlock extends AbstractArrayBlock implements IntBlock { positionCount, firstValueIndexes, nulls, - mvOrdering, - blockFactory + mvOrdering ); } private IntArrayBlock( - IntArrayVector vector, + IntArrayVector vector, // stylecheck int positionCount, int[] firstValueIndexes, BitSet nulls, - MvOrdering mvOrdering, - BlockFactory blockFactory + MvOrdering mvOrdering ) { - super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); + super(positionCount, firstValueIndexes, nulls, mvOrdering); this.vector = vector; assert firstValueIndexes == null ? vector.getPositionCount() == getPositionCount() : firstValueIndexes[getPositionCount()] == vector.getPositionCount(); } + static IntArrayBlock readArrayBlock(BlockFactory blockFactory, BlockStreamInput in) throws IOException { + final SubFields sub = new SubFields(blockFactory, in); + IntArrayVector vector = null; + boolean success = false; + try { + vector = IntArrayVector.readArrayVector(sub.vectorPositions(), in, blockFactory); + var block = new IntArrayBlock(vector, sub.positionCount, sub.firstValueIndexes, sub.nullsMask, sub.mvOrdering); + blockFactory.adjustBreaker(block.ramBytesUsed() - vector.ramBytesUsed() - sub.bytesReserved); + success = true; + return block; + } finally { + if (success == false) { + Releasables.close(vector); + blockFactory.adjustBreaker(-sub.bytesReserved); + } + } + } + + void writeArrayBlock(StreamOutput out) throws IOException { + writeSubFields(out); + vector.writeArrayVector(vector.getPositionCount(), out); + } + @Override public IntVector asVector() { return null; @@ -115,8 +138,7 @@ public IntBlock expand() { expandedPositionCount, null, shiftNullsToExpandedPositions(), - MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, - blockFactory() + MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING ); blockFactory().adjustBreaker(expanded.ramBytesUsedOnlyBlock() - bitSetRamUsedEstimate); // We need to incRef after adjusting any breakers, otherwise we might leak the vector if the breaker trips. @@ -160,10 +182,14 @@ public String toString() { @Override public void allowPassingToDifferentDriver() { - super.allowPassingToDifferentDriver(); vector.allowPassingToDifferentDriver(); } + @Override + public BlockFactory blockFactory() { + return vector.blockFactory(); + } + @Override public void closeInternal() { blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock()); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayVector.java index 9374a4db4b4c4..644af9ae512a8 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayVector.java @@ -8,7 +8,10 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import java.io.IOException; import java.util.Arrays; /** @@ -28,6 +31,32 @@ final class IntArrayVector extends AbstractVector implements IntVector { this.values = values; } + static IntArrayVector readArrayVector(int positions, StreamInput in, BlockFactory blockFactory) throws IOException { + final long preAdjustedBytes = RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) positions * Integer.BYTES; + blockFactory.adjustBreaker(preAdjustedBytes); + boolean success = false; + try { + int[] values = new int[positions]; + for (int i = 0; i < positions; i++) { + values[i] = in.readInt(); + } + final var block = new IntArrayVector(values, positions, blockFactory); + blockFactory.adjustBreaker(block.ramBytesUsed() - preAdjustedBytes); + success = true; + return block; + } finally { + if (success == false) { + blockFactory.adjustBreaker(-preAdjustedBytes); + } + } + } + + void writeArrayVector(int positions, StreamOutput out) throws IOException { + for (int i = 0; i < positions; i++) { + out.writeInt(values[i]); + } + } + @Override public IntBlock asBlock() { return new IntVectorBlock(this); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayBlock.java index dc60ce43c04cc..5e29dace7449c 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayBlock.java @@ -36,20 +36,18 @@ public IntBigArrayBlock( positionCount, firstValueIndexes, nulls, - mvOrdering, - blockFactory + mvOrdering ); } private IntBigArrayBlock( - IntBigArrayVector vector, + IntBigArrayVector vector, // stylecheck int positionCount, int[] firstValueIndexes, BitSet nulls, - MvOrdering mvOrdering, - BlockFactory blockFactory + MvOrdering mvOrdering ) { - super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); + super(positionCount, firstValueIndexes, nulls, mvOrdering); this.vector = vector; assert firstValueIndexes == null ? vector.getPositionCount() == getPositionCount() @@ -116,8 +114,7 @@ public IntBlock expand() { expandedPositionCount, null, shiftNullsToExpandedPositions(), - MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, - blockFactory() + MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING ); blockFactory().adjustBreaker(expanded.ramBytesUsedOnlyBlock() - bitSetRamUsedEstimate); // We need to incRef after adjusting any breakers, otherwise we might leak the vector if the breaker trips. @@ -161,10 +158,14 @@ public String toString() { @Override public void allowPassingToDifferentDriver() { - super.allowPassingToDifferentDriver(); vector.allowPassingToDifferentDriver(); } + @Override + public BlockFactory blockFactory() { + return vector.blockFactory(); + } + @Override public void closeInternal() { blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock()); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java index 9a66445eb55a2..296d9378323a5 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java @@ -7,6 +7,7 @@ package org.elasticsearch.compute.data; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -49,10 +50,19 @@ private static IntBlock readFrom(StreamInput in) throws IOException { } private static IntBlock readFrom(BlockStreamInput in) throws IOException { - final boolean isVector = in.readBoolean(); - if (isVector) { - return IntVector.readFrom(in.blockFactory(), in).asBlock(); - } + final byte serializationType = in.readByte(); + return switch (serializationType) { + case SERIALIZE_BLOCK_VALUES -> IntBlock.readValues(in); + case SERIALIZE_BLOCK_VECTOR -> IntVector.readFrom(in.blockFactory(), in).asBlock(); + case SERIALIZE_BLOCK_ARRAY -> IntArrayBlock.readArrayBlock(in.blockFactory(), in); + default -> { + assert false : "invalid block serialization type " + serializationType; + throw new IllegalStateException("invalid serialization type " + serializationType); + } + }; + } + + private static IntBlock readValues(BlockStreamInput in) throws IOException { final int positions = in.readVInt(); try (IntBlock.Builder builder = in.blockFactory().newIntBlockBuilder(positions)) { for (int i = 0; i < positions; i++) { @@ -74,22 +84,31 @@ private static IntBlock readFrom(BlockStreamInput in) throws IOException { @Override default void writeTo(StreamOutput out) throws IOException { IntVector vector = asVector(); - out.writeBoolean(vector != null); + final var version = out.getTransportVersion(); if (vector != null) { + out.writeByte(SERIALIZE_BLOCK_VECTOR); vector.writeTo(out); + } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_ARRAY_BLOCK) && this instanceof IntArrayBlock b) { + out.writeByte(SERIALIZE_BLOCK_ARRAY); + b.writeArrayBlock(out); } else { - final int positions = getPositionCount(); - out.writeVInt(positions); - for (int pos = 0; pos < positions; pos++) { - if (isNull(pos)) { - out.writeBoolean(true); - } else { - out.writeBoolean(false); - final int valueCount = getValueCount(pos); - out.writeVInt(valueCount); - for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { - out.writeInt(getInt(getFirstValueIndex(pos) + valueIndex)); - } + out.writeByte(SERIALIZE_BLOCK_VALUES); + IntBlock.writeValues(this, out); + } + } + + private static void writeValues(IntBlock block, StreamOutput out) throws IOException { + final int positions = block.getPositionCount(); + out.writeVInt(positions); + for (int pos = 0; pos < positions; pos++) { + if (block.isNull(pos)) { + out.writeBoolean(true); + } else { + out.writeBoolean(false); + final int valueCount = block.getValueCount(pos); + out.writeVInt(valueCount); + for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { + out.writeInt(block.getInt(block.getFirstValueIndex(pos) + valueIndex)); } } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVector.java index bc7e3c87ec33d..1d4fb0741cab0 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVector.java @@ -7,6 +7,7 @@ package org.elasticsearch.compute.data; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -74,30 +75,47 @@ static int hash(IntVector vector) { /** Deserializes a Vector from the given stream input. */ static IntVector readFrom(BlockFactory blockFactory, StreamInput in) throws IOException { final int positions = in.readVInt(); - final boolean constant = in.readBoolean(); - if (constant && positions > 0) { - return blockFactory.newConstantIntVector(in.readInt(), positions); - } else { - try (var builder = blockFactory.newIntVectorFixedBuilder(positions)) { - for (int i = 0; i < positions; i++) { - builder.appendInt(in.readInt()); - } - return builder.build(); + final byte serializationType = in.readByte(); + return switch (serializationType) { + case SERIALIZE_VECTOR_VALUES -> readValues(positions, in, blockFactory); + case SERIALIZE_VECTOR_CONSTANT -> blockFactory.newConstantIntVector(in.readInt(), positions); + case SERIALIZE_VECTOR_ARRAY -> IntArrayVector.readArrayVector(positions, in, blockFactory); + default -> { + assert false : "invalid vector serialization type [" + serializationType + "]"; + throw new IllegalStateException("invalid vector serialization type [" + serializationType + "]"); } - } + }; } /** Serializes this Vector to the given stream output. */ default void writeTo(StreamOutput out) throws IOException { final int positions = getPositionCount(); + final var version = out.getTransportVersion(); out.writeVInt(positions); - out.writeBoolean(isConstant()); if (isConstant() && positions > 0) { + out.writeByte(SERIALIZE_VECTOR_CONSTANT); out.writeInt(getInt(0)); + } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_ARRAY_VECTOR) && this instanceof IntArrayVector v) { + out.writeByte(SERIALIZE_VECTOR_ARRAY); + v.writeArrayVector(positions, out); } else { + out.writeByte(SERIALIZE_VECTOR_VALUES); + writeValues(this, positions, out); + } + } + + private static IntVector readValues(int positions, StreamInput in, BlockFactory blockFactory) throws IOException { + try (var builder = blockFactory.newIntVectorFixedBuilder(positions)) { for (int i = 0; i < positions; i++) { - out.writeInt(getInt(i)); + builder.appendInt(in.readInt()); } + return builder.build(); + } + } + + private static void writeValues(IntVector v, int positions, StreamOutput out) throws IOException { + for (int i = 0; i < positions; i++) { + out.writeInt(v.getInt(i)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java index 97a4a48533e3a..4f9bb236dfa80 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java @@ -21,7 +21,6 @@ public final class IntVectorBlock extends AbstractVectorBlock implements IntBloc * @param vector considered owned by the current block; must not be used in any other {@code Block} */ IntVectorBlock(IntVector vector) { - super(vector.getPositionCount(), vector.blockFactory()); this.vector = vector; } @@ -36,7 +35,7 @@ public int getInt(int valueIndex) { } @Override - public int getTotalValueCount() { + public int getPositionCount() { return vector.getPositionCount(); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java index 7491d6519fc57..77ae863e41ff0 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java @@ -8,8 +8,10 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Releasables; +import java.io.IOException; import java.util.BitSet; /** @@ -35,26 +37,47 @@ final class LongArrayBlock extends AbstractArrayBlock implements LongBlock { positionCount, firstValueIndexes, nulls, - mvOrdering, - blockFactory + mvOrdering ); } private LongArrayBlock( - LongArrayVector vector, + LongArrayVector vector, // stylecheck int positionCount, int[] firstValueIndexes, BitSet nulls, - MvOrdering mvOrdering, - BlockFactory blockFactory + MvOrdering mvOrdering ) { - super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); + super(positionCount, firstValueIndexes, nulls, mvOrdering); this.vector = vector; assert firstValueIndexes == null ? vector.getPositionCount() == getPositionCount() : firstValueIndexes[getPositionCount()] == vector.getPositionCount(); } + static LongArrayBlock readArrayBlock(BlockFactory blockFactory, BlockStreamInput in) throws IOException { + final SubFields sub = new SubFields(blockFactory, in); + LongArrayVector vector = null; + boolean success = false; + try { + vector = LongArrayVector.readArrayVector(sub.vectorPositions(), in, blockFactory); + var block = new LongArrayBlock(vector, sub.positionCount, sub.firstValueIndexes, sub.nullsMask, sub.mvOrdering); + blockFactory.adjustBreaker(block.ramBytesUsed() - vector.ramBytesUsed() - sub.bytesReserved); + success = true; + return block; + } finally { + if (success == false) { + Releasables.close(vector); + blockFactory.adjustBreaker(-sub.bytesReserved); + } + } + } + + void writeArrayBlock(StreamOutput out) throws IOException { + writeSubFields(out); + vector.writeArrayVector(vector.getPositionCount(), out); + } + @Override public LongVector asVector() { return null; @@ -115,8 +138,7 @@ public LongBlock expand() { expandedPositionCount, null, shiftNullsToExpandedPositions(), - MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, - blockFactory() + MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING ); blockFactory().adjustBreaker(expanded.ramBytesUsedOnlyBlock() - bitSetRamUsedEstimate); // We need to incRef after adjusting any breakers, otherwise we might leak the vector if the breaker trips. @@ -160,10 +182,14 @@ public String toString() { @Override public void allowPassingToDifferentDriver() { - super.allowPassingToDifferentDriver(); vector.allowPassingToDifferentDriver(); } + @Override + public BlockFactory blockFactory() { + return vector.blockFactory(); + } + @Override public void closeInternal() { blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock()); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayVector.java index a50987f1d6959..b3cee58356d70 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayVector.java @@ -8,7 +8,10 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import java.io.IOException; import java.util.Arrays; /** @@ -28,6 +31,32 @@ final class LongArrayVector extends AbstractVector implements LongVector { this.values = values; } + static LongArrayVector readArrayVector(int positions, StreamInput in, BlockFactory blockFactory) throws IOException { + final long preAdjustedBytes = RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) positions * Long.BYTES; + blockFactory.adjustBreaker(preAdjustedBytes); + boolean success = false; + try { + long[] values = new long[positions]; + for (int i = 0; i < positions; i++) { + values[i] = in.readLong(); + } + final var block = new LongArrayVector(values, positions, blockFactory); + blockFactory.adjustBreaker(block.ramBytesUsed() - preAdjustedBytes); + success = true; + return block; + } finally { + if (success == false) { + blockFactory.adjustBreaker(-preAdjustedBytes); + } + } + } + + void writeArrayVector(int positions, StreamOutput out) throws IOException { + for (int i = 0; i < positions; i++) { + out.writeLong(values[i]); + } + } + @Override public LongBlock asBlock() { return new LongVectorBlock(this); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayBlock.java index 3ff9a12991d43..f4b1f16566d24 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayBlock.java @@ -36,20 +36,18 @@ public LongBigArrayBlock( positionCount, firstValueIndexes, nulls, - mvOrdering, - blockFactory + mvOrdering ); } private LongBigArrayBlock( - LongBigArrayVector vector, + LongBigArrayVector vector, // stylecheck int positionCount, int[] firstValueIndexes, BitSet nulls, - MvOrdering mvOrdering, - BlockFactory blockFactory + MvOrdering mvOrdering ) { - super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); + super(positionCount, firstValueIndexes, nulls, mvOrdering); this.vector = vector; assert firstValueIndexes == null ? vector.getPositionCount() == getPositionCount() @@ -116,8 +114,7 @@ public LongBlock expand() { expandedPositionCount, null, shiftNullsToExpandedPositions(), - MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, - blockFactory() + MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING ); blockFactory().adjustBreaker(expanded.ramBytesUsedOnlyBlock() - bitSetRamUsedEstimate); // We need to incRef after adjusting any breakers, otherwise we might leak the vector if the breaker trips. @@ -161,10 +158,14 @@ public String toString() { @Override public void allowPassingToDifferentDriver() { - super.allowPassingToDifferentDriver(); vector.allowPassingToDifferentDriver(); } + @Override + public BlockFactory blockFactory() { + return vector.blockFactory(); + } + @Override public void closeInternal() { blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock()); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java index 5e5dc0606b896..820600bda87f9 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java @@ -7,6 +7,7 @@ package org.elasticsearch.compute.data; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -49,10 +50,19 @@ private static LongBlock readFrom(StreamInput in) throws IOException { } private static LongBlock readFrom(BlockStreamInput in) throws IOException { - final boolean isVector = in.readBoolean(); - if (isVector) { - return LongVector.readFrom(in.blockFactory(), in).asBlock(); - } + final byte serializationType = in.readByte(); + return switch (serializationType) { + case SERIALIZE_BLOCK_VALUES -> LongBlock.readValues(in); + case SERIALIZE_BLOCK_VECTOR -> LongVector.readFrom(in.blockFactory(), in).asBlock(); + case SERIALIZE_BLOCK_ARRAY -> LongArrayBlock.readArrayBlock(in.blockFactory(), in); + default -> { + assert false : "invalid block serialization type " + serializationType; + throw new IllegalStateException("invalid serialization type " + serializationType); + } + }; + } + + private static LongBlock readValues(BlockStreamInput in) throws IOException { final int positions = in.readVInt(); try (LongBlock.Builder builder = in.blockFactory().newLongBlockBuilder(positions)) { for (int i = 0; i < positions; i++) { @@ -74,22 +84,31 @@ private static LongBlock readFrom(BlockStreamInput in) throws IOException { @Override default void writeTo(StreamOutput out) throws IOException { LongVector vector = asVector(); - out.writeBoolean(vector != null); + final var version = out.getTransportVersion(); if (vector != null) { + out.writeByte(SERIALIZE_BLOCK_VECTOR); vector.writeTo(out); + } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_ARRAY_BLOCK) && this instanceof LongArrayBlock b) { + out.writeByte(SERIALIZE_BLOCK_ARRAY); + b.writeArrayBlock(out); } else { - final int positions = getPositionCount(); - out.writeVInt(positions); - for (int pos = 0; pos < positions; pos++) { - if (isNull(pos)) { - out.writeBoolean(true); - } else { - out.writeBoolean(false); - final int valueCount = getValueCount(pos); - out.writeVInt(valueCount); - for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { - out.writeLong(getLong(getFirstValueIndex(pos) + valueIndex)); - } + out.writeByte(SERIALIZE_BLOCK_VALUES); + LongBlock.writeValues(this, out); + } + } + + private static void writeValues(LongBlock block, StreamOutput out) throws IOException { + final int positions = block.getPositionCount(); + out.writeVInt(positions); + for (int pos = 0; pos < positions; pos++) { + if (block.isNull(pos)) { + out.writeBoolean(true); + } else { + out.writeBoolean(false); + final int valueCount = block.getValueCount(pos); + out.writeVInt(valueCount); + for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { + out.writeLong(block.getLong(block.getFirstValueIndex(pos) + valueIndex)); } } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVector.java index 358f5b32366cb..60592469f0ea1 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVector.java @@ -7,6 +7,7 @@ package org.elasticsearch.compute.data; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -75,30 +76,47 @@ static int hash(LongVector vector) { /** Deserializes a Vector from the given stream input. */ static LongVector readFrom(BlockFactory blockFactory, StreamInput in) throws IOException { final int positions = in.readVInt(); - final boolean constant = in.readBoolean(); - if (constant && positions > 0) { - return blockFactory.newConstantLongVector(in.readLong(), positions); - } else { - try (var builder = blockFactory.newLongVectorFixedBuilder(positions)) { - for (int i = 0; i < positions; i++) { - builder.appendLong(in.readLong()); - } - return builder.build(); + final byte serializationType = in.readByte(); + return switch (serializationType) { + case SERIALIZE_VECTOR_VALUES -> readValues(positions, in, blockFactory); + case SERIALIZE_VECTOR_CONSTANT -> blockFactory.newConstantLongVector(in.readLong(), positions); + case SERIALIZE_VECTOR_ARRAY -> LongArrayVector.readArrayVector(positions, in, blockFactory); + default -> { + assert false : "invalid vector serialization type [" + serializationType + "]"; + throw new IllegalStateException("invalid vector serialization type [" + serializationType + "]"); } - } + }; } /** Serializes this Vector to the given stream output. */ default void writeTo(StreamOutput out) throws IOException { final int positions = getPositionCount(); + final var version = out.getTransportVersion(); out.writeVInt(positions); - out.writeBoolean(isConstant()); if (isConstant() && positions > 0) { + out.writeByte(SERIALIZE_VECTOR_CONSTANT); out.writeLong(getLong(0)); + } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_ARRAY_VECTOR) && this instanceof LongArrayVector v) { + out.writeByte(SERIALIZE_VECTOR_ARRAY); + v.writeArrayVector(positions, out); } else { + out.writeByte(SERIALIZE_VECTOR_VALUES); + writeValues(this, positions, out); + } + } + + private static LongVector readValues(int positions, StreamInput in, BlockFactory blockFactory) throws IOException { + try (var builder = blockFactory.newLongVectorFixedBuilder(positions)) { for (int i = 0; i < positions; i++) { - out.writeLong(getLong(i)); + builder.appendLong(in.readLong()); } + return builder.build(); + } + } + + private static void writeValues(LongVector v, int positions, StreamOutput out) throws IOException { + for (int i = 0; i < positions; i++) { + out.writeLong(v.getLong(i)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java index 1f4565fec5a8d..0d7d1f691837f 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java @@ -21,7 +21,6 @@ public final class LongVectorBlock extends AbstractVectorBlock implements LongBl * @param vector considered owned by the current block; must not be used in any other {@code Block} */ LongVectorBlock(LongVector vector) { - super(vector.getPositionCount(), vector.blockFactory()); this.vector = vector; } @@ -36,7 +35,7 @@ public long getLong(int valueIndex) { } @Override - public int getTotalValueCount() { + public int getPositionCount() { return vector.getPositionCount(); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractArrayBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractArrayBlock.java index d6046f0bda085..81098cba393bb 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractArrayBlock.java @@ -7,38 +7,37 @@ package org.elasticsearch.compute.data; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; +import java.io.IOException; import java.util.BitSet; -abstract class AbstractArrayBlock extends AbstractBlock { - +abstract class AbstractArrayBlock extends AbstractNonThreadSafeRefCounted implements Block { private final MvOrdering mvOrdering; + protected final int positionCount; - /** - * @param positionCount the number of values in this block - */ - protected AbstractArrayBlock(int positionCount, MvOrdering mvOrdering, BlockFactory blockFactory) { - super(positionCount, blockFactory); - this.mvOrdering = mvOrdering; - } + @Nullable + protected final int[] firstValueIndexes; + + @Nullable + protected final BitSet nullsMask; /** * @param positionCount the number of values in this block */ - protected AbstractArrayBlock( - int positionCount, - @Nullable int[] firstValueIndexes, - @Nullable BitSet nullsMask, - MvOrdering mvOrdering, - BlockFactory blockFactory - ) { - super(positionCount, firstValueIndexes, nullsMask, blockFactory); + protected AbstractArrayBlock(int positionCount, @Nullable int[] firstValueIndexes, @Nullable BitSet nullsMask, MvOrdering mvOrdering) { + this.positionCount = positionCount; + this.firstValueIndexes = firstValueIndexes; this.mvOrdering = mvOrdering; + this.nullsMask = nullsMask == null || nullsMask.isEmpty() ? null : nullsMask; + assert nullsMask != null || firstValueIndexes != null : "Create VectorBlock instead"; + assert assertInvariants(); } @Override - public boolean mayHaveMultivaluedFields() { + public final boolean mayHaveMultivaluedFields() { /* * This could return a false positive if all the indices are one away from * each other. But we will try to avoid that. @@ -51,7 +50,7 @@ public final MvOrdering mvOrdering() { return mvOrdering; } - protected BitSet shiftNullsToExpandedPositions() { + protected final BitSet shiftNullsToExpandedPositions() { BitSet expanded = new BitSet(nullsMask.size()); int next = -1; while ((next = nullsMask.nextSetBit(next + 1)) != -1) { @@ -59,4 +58,126 @@ protected BitSet shiftNullsToExpandedPositions() { } return expanded; } + + private boolean assertInvariants() { + if (firstValueIndexes != null) { + assert firstValueIndexes.length == getPositionCount() + 1; + for (int i = 0; i < getPositionCount(); i++) { + assert (firstValueIndexes[i + 1] - firstValueIndexes[i]) >= 0; + } + } + if (nullsMask != null) { + assert nullsMask.nextSetBit(getPositionCount() + 1) == -1; + } + if (firstValueIndexes != null && nullsMask != null) { + for (int i = 0; i < getPositionCount(); i++) { + // Either we have multi-values or a null but never both. + assert ((nullsMask.get(i) == false) || (firstValueIndexes[i + 1] - firstValueIndexes[i]) == 1); + } + } + return true; + } + + @Override + public final int getTotalValueCount() { + if (firstValueIndexes == null) { + return positionCount - nullValuesCount(); + } + return firstValueIndexes[positionCount] - nullValuesCount(); + } + + @Override + public final int getPositionCount() { + return positionCount; + } + + /** Gets the index of the first value for the given position. */ + public final int getFirstValueIndex(int position) { + return firstValueIndexes == null ? position : firstValueIndexes[position]; + } + + /** Gets the number of values for the given position, possibly 0. */ + @Override + public final int getValueCount(int position) { + return isNull(position) ? 0 : firstValueIndexes == null ? 1 : firstValueIndexes[position + 1] - firstValueIndexes[position]; + } + + @Override + public final boolean isNull(int position) { + return mayHaveNulls() && nullsMask.get(position); + } + + @Override + public final boolean mayHaveNulls() { + return nullsMask != null; + } + + @Override + public final int nullValuesCount() { + return mayHaveNulls() ? nullsMask.cardinality() : 0; + } + + @Override + public final boolean areAllValuesNull() { + return nullValuesCount() == getPositionCount(); + } + + static final class SubFields { + long bytesReserved = 0; + final int positionCount; + final int[] firstValueIndexes; + final BitSet nullsMask; + final MvOrdering mvOrdering; + + SubFields(BlockFactory blockFactory, StreamInput in) throws IOException { + this.positionCount = in.readVInt(); + boolean success = false; + try { + if (in.readBoolean()) { + bytesReserved += blockFactory.preAdjustBreakerForInt(positionCount + 1); + final int[] values = new int[positionCount + 1]; + values[0] = in.readVInt(); + for (int i = 1; i <= positionCount; i++) { + values[i] = values[i - 1] + in.readVInt(); + } + this.firstValueIndexes = values; + } else { + this.firstValueIndexes = null; + } + if (in.readBoolean()) { + bytesReserved += blockFactory.preAdjustBreakerForLong(positionCount / Long.BYTES); + nullsMask = BitSet.valueOf(in.readLongArray()); + } else { + nullsMask = null; + } + this.mvOrdering = in.readEnum(MvOrdering.class); + success = true; + } finally { + if (success == false) { + blockFactory.adjustBreaker(-bytesReserved); + } + } + } + + int vectorPositions() { + return firstValueIndexes == null ? positionCount : firstValueIndexes[positionCount]; + } + } + + void writeSubFields(StreamOutput out) throws IOException { + out.writeVInt(positionCount); + out.writeBoolean(firstValueIndexes != null); + if (firstValueIndexes != null) { + // firstValueIndexes are monotonic increasing + out.writeVInt(firstValueIndexes[0]); + for (int i = 1; i <= positionCount; i++) { + out.writeVInt(firstValueIndexes[i] - firstValueIndexes[i - 1]); + } + } + out.writeBoolean(nullsMask != null); + if (nullsMask != null) { + out.writeLongArray(nullsMask.toLongArray()); + } + out.writeEnum(mvOrdering); + } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractBlock.java deleted file mode 100644 index 0c5207133f71d..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractBlock.java +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.compute.data; - -import org.elasticsearch.core.Nullable; - -import java.util.BitSet; - -abstract class AbstractBlock extends AbstractNonThreadSafeRefCounted implements Block { - private final int positionCount; - - @Nullable - protected final int[] firstValueIndexes; - - @Nullable - protected final BitSet nullsMask; - - private BlockFactory blockFactory; - - /** - * @param positionCount the number of values in this block - */ - protected AbstractBlock(int positionCount, BlockFactory blockFactory) { - assert positionCount >= 0; - this.positionCount = positionCount; - this.blockFactory = blockFactory; - this.firstValueIndexes = null; - this.nullsMask = null; - assert assertInvariants(); - } - - /** - * @param positionCount the number of values in this block - */ - protected AbstractBlock(int positionCount, @Nullable int[] firstValueIndexes, @Nullable BitSet nullsMask, BlockFactory blockFactory) { - assert positionCount >= 0; - this.positionCount = positionCount; - this.blockFactory = blockFactory; - this.firstValueIndexes = firstValueIndexes; - this.nullsMask = nullsMask == null || nullsMask.isEmpty() ? null : nullsMask; - assert nullsMask != null || firstValueIndexes != null : "Create VectorBlock instead"; - assert assertInvariants(); - } - - private boolean assertInvariants() { - if (firstValueIndexes != null) { - assert firstValueIndexes.length == getPositionCount() + 1; - for (int i = 0; i < getPositionCount(); i++) { - assert (firstValueIndexes[i + 1] - firstValueIndexes[i]) >= 0; - } - } - if (nullsMask != null) { - assert nullsMask.nextSetBit(getPositionCount() + 1) == -1; - } - if (firstValueIndexes != null && nullsMask != null) { - for (int i = 0; i < getPositionCount(); i++) { - // Either we have multi-values or a null but never both. - assert ((nullsMask.get(i) == false) || (firstValueIndexes[i + 1] - firstValueIndexes[i]) == 1); - } - } - return true; - } - - @Override - public int getTotalValueCount() { - if (firstValueIndexes == null) { - return positionCount - nullValuesCount(); - } - return firstValueIndexes[positionCount] - nullValuesCount(); - } - - @Override - public final int getPositionCount() { - return positionCount; - } - - /** Gets the index of the first value for the given position. */ - public int getFirstValueIndex(int position) { - return firstValueIndexes == null ? position : firstValueIndexes[position]; - } - - /** Gets the number of values for the given position, possibly 0. */ - @Override - public int getValueCount(int position) { - return isNull(position) ? 0 : firstValueIndexes == null ? 1 : firstValueIndexes[position + 1] - firstValueIndexes[position]; - } - - @Override - public boolean isNull(int position) { - return mayHaveNulls() && nullsMask.get(position); - } - - @Override - public boolean mayHaveNulls() { - return nullsMask != null; - } - - @Override - public int nullValuesCount() { - return mayHaveNulls() ? nullsMask.cardinality() : 0; - } - - @Override - public boolean areAllValuesNull() { - return nullValuesCount() == getPositionCount(); - } - - @Override - public BlockFactory blockFactory() { - return blockFactory; - } - - @Override - public void allowPassingToDifferentDriver() { - blockFactory = blockFactory.parent(); - } - - @Override - public final boolean isReleased() { - return hasReferences() == false; - } -} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractNonThreadSafeRefCounted.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractNonThreadSafeRefCounted.java index 2dfd8c3eca5ac..ea023c6b46d9e 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractNonThreadSafeRefCounted.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractNonThreadSafeRefCounted.java @@ -59,6 +59,10 @@ public final void close() { decRef(); } + public final boolean isReleased() { + return hasReferences() == false; + } + /** * This is called when the number of references reaches zero. * This is where resources should be released (adjusting circuit breakers if needed). diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractVector.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractVector.java index 1eb2c09f78511..1e1f8bbf2f8df 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractVector.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractVector.java @@ -43,9 +43,4 @@ public void allowPassingToDifferentDriver() { protected void closeInternal() { blockFactory.adjustBreaker(-ramBytesUsed()); } - - @Override - public final boolean isReleased() { - return hasReferences() == false; - } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractVectorBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractVectorBlock.java index d33d1a1afda41..452bdad1ab192 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractVectorBlock.java @@ -10,43 +10,44 @@ /** * A Block view of a Vector. */ -abstract class AbstractVectorBlock extends AbstractBlock { +abstract class AbstractVectorBlock extends AbstractNonThreadSafeRefCounted implements Block { - AbstractVectorBlock(int positionCount, BlockFactory blockFactory) { - super(positionCount, blockFactory); + @Override + public final int getFirstValueIndex(int position) { + return position; } @Override - public int getFirstValueIndex(int position) { - return position; + public final int getTotalValueCount() { + return getPositionCount(); } - public int getValueCount(int position) { + public final int getValueCount(int position) { return 1; } @Override - public boolean isNull(int position) { + public final boolean isNull(int position) { return false; } @Override - public int nullValuesCount() { + public final int nullValuesCount() { return 0; } @Override - public boolean mayHaveNulls() { + public final boolean mayHaveNulls() { return false; } @Override - public boolean areAllValuesNull() { + public final boolean areAllValuesNull() { return false; } @Override - public boolean mayHaveMultivaluedFields() { + public final boolean mayHaveMultivaluedFields() { return false; } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java index 5a6d7cb4a6003..b14a27fa01930 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java @@ -239,4 +239,11 @@ static List getNamedWriteables() { ConstantNullBlock.ENTRY ); } + + /** + * Serialization type for blocks: 0 and 1 replace false/true used in pre-8.14 + */ + byte SERIALIZE_BLOCK_VALUES = 0; + byte SERIALIZE_BLOCK_VECTOR = 1; + byte SERIALIZE_BLOCK_ARRAY = 2; } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java index b093e3edd5884..eb86d01fbdf3c 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java @@ -18,12 +18,21 @@ /** * Block implementation representing a constant null value. */ -final class ConstantNullBlock extends AbstractBlock implements BooleanBlock, IntBlock, LongBlock, DoubleBlock, BytesRefBlock { +final class ConstantNullBlock extends AbstractNonThreadSafeRefCounted + implements + BooleanBlock, + IntBlock, + LongBlock, + DoubleBlock, + BytesRefBlock { private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(ConstantNullBlock.class); + private final int positionCount; + private BlockFactory blockFactory; ConstantNullBlock(int positionCount, BlockFactory blockFactory) { - super(positionCount, blockFactory); + this.positionCount = positionCount; + this.blockFactory = blockFactory; } @Override @@ -222,4 +231,34 @@ public long getLong(int valueIndex) { assert false : "null block"; throw new UnsupportedOperationException("null block"); } + + @Override + public int getTotalValueCount() { + return 0; + } + + @Override + public int getPositionCount() { + return positionCount; + } + + @Override + public int getFirstValueIndex(int position) { + return 0; + } + + @Override + public int getValueCount(int position) { + return 0; + } + + @Override + public BlockFactory blockFactory() { + return blockFactory; + } + + @Override + public void allowPassingToDifferentDriver() { + blockFactory = blockFactory.parent(); + } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocBlock.java index 8c75c8216c59e..a58b8c34b17d5 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocBlock.java @@ -20,7 +20,6 @@ public class DocBlock extends AbstractVectorBlock implements Block { private final DocVector vector; DocBlock(DocVector vector) { - super(vector.getPositionCount(), vector.blockFactory()); this.vector = vector; } @@ -190,4 +189,14 @@ public void close() { public void allowPassingToDifferentDriver() { vector.allowPassingToDifferentDriver(); } + + @Override + public int getPositionCount() { + return vector.getPositionCount(); + } + + @Override + public BlockFactory blockFactory() { + return vector.blockFactory(); + } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Vector.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Vector.java index fc09f636ac700..c309a7a0b8827 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Vector.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Vector.java @@ -75,4 +75,11 @@ interface Builder extends Releasable { * Whether this vector was released */ boolean isReleased(); + + /** + * The serialization type of vectors: 0 and 1 replaces the boolean false/true in pre-8.14. + */ + byte SERIALIZE_VECTOR_VALUES = 0; + byte SERIALIZE_VECTOR_CONSTANT = 1; + byte SERIALIZE_VECTOR_ARRAY = 2; } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st index 20395ff27b1b4..a7c5f10032394 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st @@ -10,14 +10,17 @@ package org.elasticsearch.compute.data; $if(BytesRef)$ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.BytesRefArray; import org.elasticsearch.core.Releasables; $else$ import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Releasables; $endif$ +import java.io.IOException; import java.util.BitSet; /** @@ -46,26 +49,47 @@ final class $Type$ArrayBlock extends AbstractArrayBlock implements $Type$Block { positionCount, firstValueIndexes, nulls, - mvOrdering, - blockFactory + mvOrdering ); } private $Type$ArrayBlock( - $Type$ArrayVector vector, + $Type$ArrayVector vector, // stylecheck int positionCount, int[] firstValueIndexes, BitSet nulls, - MvOrdering mvOrdering, - BlockFactory blockFactory + MvOrdering mvOrdering ) { - super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); + super(positionCount, firstValueIndexes, nulls, mvOrdering); this.vector = vector; assert firstValueIndexes == null ? vector.getPositionCount() == getPositionCount() : firstValueIndexes[getPositionCount()] == vector.getPositionCount(); } + static $Type$ArrayBlock readArrayBlock(BlockFactory blockFactory, BlockStreamInput in) throws IOException { + final SubFields sub = new SubFields(blockFactory, in); + $Type$ArrayVector vector = null; + boolean success = false; + try { + vector = $Type$ArrayVector.readArrayVector(sub.vectorPositions(), in, blockFactory); + var block = new $Type$ArrayBlock(vector, sub.positionCount, sub.firstValueIndexes, sub.nullsMask, sub.mvOrdering); + blockFactory.adjustBreaker(block.ramBytesUsed() - vector.ramBytesUsed() - sub.bytesReserved); + success = true; + return block; + } finally { + if (success == false) { + Releasables.close(vector); + blockFactory.adjustBreaker(-sub.bytesReserved); + } + } + } + + void writeArrayBlock(StreamOutput out) throws IOException { + writeSubFields(out); + vector.writeArrayVector(vector.getPositionCount(), out); + } + @Override public $Type$Vector asVector() { return null; @@ -134,8 +158,7 @@ $endif$ expandedPositionCount, null, shiftNullsToExpandedPositions(), - MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, - blockFactory() + MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING ); blockFactory().adjustBreaker(expanded.ramBytesUsedOnlyBlock() - bitSetRamUsedEstimate); // We need to incRef after adjusting any breakers, otherwise we might leak the vector if the breaker trips. @@ -179,10 +202,14 @@ $endif$ @Override public void allowPassingToDifferentDriver() { - super.allowPassingToDifferentDriver(); vector.allowPassingToDifferentDriver(); } + @Override + public BlockFactory blockFactory() { + return vector.blockFactory(); + } + @Override public void closeInternal() { blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock()); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayVector.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayVector.java.st index a02656f72e54c..b5ecb2cad4a56 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayVector.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayVector.java.st @@ -10,12 +10,19 @@ package org.elasticsearch.compute.data; $if(BytesRef)$ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.BytesRefArray; import org.elasticsearch.core.Releasables; +import java.io.IOException; + $else$ import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import java.io.IOException; import java.util.Arrays; $endif$ @@ -44,6 +51,56 @@ $endif$ this.values = values; } + static $Type$ArrayVector readArrayVector(int positions, StreamInput in, BlockFactory blockFactory) throws IOException { +$if(BytesRef)$ + final BytesRefArray values = new BytesRefArray(in, blockFactory.bigArrays()); + boolean success = false; + try { + final var block = new BytesRefArrayVector(values, positions, blockFactory); + blockFactory.adjustBreaker(block.ramBytesUsed() - values.bigArraysRamBytesUsed()); + success = true; + return block; + } finally { + if (success == false) { + values.close(); + } + } +$else$ + final long preAdjustedBytes = RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) positions * $BYTES$; + blockFactory.adjustBreaker(preAdjustedBytes); + boolean success = false; + try { + $type$[] values = new $type$[positions]; + for (int i = 0; i < positions; i++) { + values[i] = in.read$Type$(); + } + final var block = new $Type$ArrayVector(values, positions, blockFactory); + blockFactory.adjustBreaker(block.ramBytesUsed() - preAdjustedBytes); + success = true; + return block; + } finally { + if (success == false) { + blockFactory.adjustBreaker(-preAdjustedBytes); + } + } +$endif$ + } + + void writeArrayVector(int positions, StreamOutput out) throws IOException { +$if(BytesRef)$ + values.writeTo(out); +$elseif(boolean)$ + // TODO: One bit for each boolean + for (int i = 0; i < positions; i++) { + out.writeBoolean(values[i]); + } +$else$ + for (int i = 0; i < positions; i++) { + out.write$Type$(values[i]); + } +$endif$ + } + @Override public $Type$Block asBlock() { return new $Type$VectorBlock(this); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayBlock.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayBlock.java.st index d65c54b5e2b24..14ec5382f282c 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayBlock.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayBlock.java.st @@ -36,20 +36,18 @@ public final class $Type$BigArrayBlock extends AbstractArrayBlock implements $Ty positionCount, firstValueIndexes, nulls, - mvOrdering, - blockFactory + mvOrdering ); } private $Type$BigArrayBlock( - $Type$BigArrayVector vector, + $Type$BigArrayVector vector, // stylecheck int positionCount, int[] firstValueIndexes, BitSet nulls, - MvOrdering mvOrdering, - BlockFactory blockFactory + MvOrdering mvOrdering ) { - super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); + super(positionCount, firstValueIndexes, nulls, mvOrdering); this.vector = vector; assert firstValueIndexes == null ? vector.getPositionCount() == getPositionCount() @@ -116,8 +114,7 @@ public final class $Type$BigArrayBlock extends AbstractArrayBlock implements $Ty expandedPositionCount, null, shiftNullsToExpandedPositions(), - MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, - blockFactory() + MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING ); blockFactory().adjustBreaker(expanded.ramBytesUsedOnlyBlock() - bitSetRamUsedEstimate); // We need to incRef after adjusting any breakers, otherwise we might leak the vector if the breaker trips. @@ -161,10 +158,14 @@ public final class $Type$BigArrayBlock extends AbstractArrayBlock implements $Ty @Override public void allowPassingToDifferentDriver() { - super.allowPassingToDifferentDriver(); vector.allowPassingToDifferentDriver(); } + @Override + public BlockFactory blockFactory() { + return vector.blockFactory(); + } + @Override public void closeInternal() { blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock()); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st index c5fd7e8302776..06aed6f7b0fad 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st @@ -10,6 +10,7 @@ package org.elasticsearch.compute.data; $if(BytesRef)$ import org.apache.lucene.util.BytesRef; $endif$ +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -64,10 +65,19 @@ $endif$ } private static $Type$Block readFrom(BlockStreamInput in) throws IOException { - final boolean isVector = in.readBoolean(); - if (isVector) { - return $Type$Vector.readFrom(in.blockFactory(), in).asBlock(); - } + final byte serializationType = in.readByte(); + return switch (serializationType) { + case SERIALIZE_BLOCK_VALUES -> $Type$Block.readValues(in); + case SERIALIZE_BLOCK_VECTOR -> $Type$Vector.readFrom(in.blockFactory(), in).asBlock(); + case SERIALIZE_BLOCK_ARRAY -> $Type$ArrayBlock.readArrayBlock(in.blockFactory(), in); + default -> { + assert false : "invalid block serialization type " + serializationType; + throw new IllegalStateException("invalid serialization type " + serializationType); + } + }; + } + + private static $Type$Block readValues(BlockStreamInput in) throws IOException { final int positions = in.readVInt(); try ($Type$Block.Builder builder = in.blockFactory().new$Type$BlockBuilder(positions)) { for (int i = 0; i < positions; i++) { @@ -89,26 +99,38 @@ $endif$ @Override default void writeTo(StreamOutput out) throws IOException { $Type$Vector vector = asVector(); - out.writeBoolean(vector != null); + final var version = out.getTransportVersion(); if (vector != null) { + out.writeByte(SERIALIZE_BLOCK_VECTOR); vector.writeTo(out); + } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_ARRAY_BLOCK) && this instanceof $Type$ArrayBlock b) { + out.writeByte(SERIALIZE_BLOCK_ARRAY); + b.writeArrayBlock(out); } else { - final int positions = getPositionCount(); - out.writeVInt(positions); - for (int pos = 0; pos < positions; pos++) { - if (isNull(pos)) { - out.writeBoolean(true); - } else { - out.writeBoolean(false); - final int valueCount = getValueCount(pos); - out.writeVInt(valueCount); - for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { + out.writeByte(SERIALIZE_BLOCK_VALUES); + $Type$Block.writeValues(this, out); + } + } + + private static void writeValues($Type$Block block, StreamOutput out) throws IOException { + final int positions = block.getPositionCount(); + out.writeVInt(positions); + for (int pos = 0; pos < positions; pos++) { + if (block.isNull(pos)) { + out.writeBoolean(true); + } else { + out.writeBoolean(false); + final int valueCount = block.getValueCount(pos); + out.writeVInt(valueCount); $if(BytesRef)$ - out.write$Type$(get$Type$(getFirstValueIndex(pos) + valueIndex, new BytesRef())); + var scratch = new BytesRef(); +$endif$ + for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { +$if(BytesRef)$ + out.write$Type$(block.get$Type$(block.getFirstValueIndex(pos) + valueIndex, scratch)); $else$ - out.write$Type$(get$Type$(getFirstValueIndex(pos) + valueIndex)); + out.write$Type$(block.get$Type$(block.getFirstValueIndex(pos) + valueIndex)); $endif$ - } } } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Vector.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Vector.java.st index c303a8391ad18..0796801c55d40 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Vector.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Vector.java.st @@ -10,6 +10,7 @@ package org.elasticsearch.compute.data; $if(BytesRef)$ import org.apache.lucene.util.BytesRef; $endif$ +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -108,38 +109,58 @@ $endif$ /** Deserializes a Vector from the given stream input. */ static $Type$Vector readFrom(BlockFactory blockFactory, StreamInput in) throws IOException { final int positions = in.readVInt(); - final boolean constant = in.readBoolean(); - if (constant && positions > 0) { - return blockFactory.newConstant$Type$Vector(in.read$Type$(), positions); - } else { - try (var builder = blockFactory.new$Type$Vector$if(BytesRef)$$else$Fixed$endif$Builder(positions)) { - for (int i = 0; i < positions; i++) { - builder.append$Type$(in.read$Type$()); - } - return builder.build(); + final byte serializationType = in.readByte(); + return switch (serializationType) { + case SERIALIZE_VECTOR_VALUES -> readValues(positions, in, blockFactory); + case SERIALIZE_VECTOR_CONSTANT -> blockFactory.newConstant$Type$Vector(in.read$Type$(), positions); + case SERIALIZE_VECTOR_ARRAY -> $Type$ArrayVector.readArrayVector(positions, in, blockFactory); + default -> { + assert false : "invalid vector serialization type [" + serializationType + "]"; + throw new IllegalStateException("invalid vector serialization type [" + serializationType + "]"); } - } + }; } /** Serializes this Vector to the given stream output. */ default void writeTo(StreamOutput out) throws IOException { final int positions = getPositionCount(); + final var version = out.getTransportVersion(); out.writeVInt(positions); - out.writeBoolean(isConstant()); if (isConstant() && positions > 0) { + out.writeByte(SERIALIZE_VECTOR_CONSTANT); $if(BytesRef)$ out.write$Type$(get$Type$(0, new BytesRef())); $else$ out.write$Type$(get$Type$(0)); $endif$ + } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_ARRAY_VECTOR) && this instanceof $Type$ArrayVector v) { + out.writeByte(SERIALIZE_VECTOR_ARRAY); + v.writeArrayVector(positions, out); } else { + out.writeByte(SERIALIZE_VECTOR_VALUES); + writeValues(this, positions, out); + } + } + + private static $Type$Vector readValues(int positions, StreamInput in, BlockFactory blockFactory) throws IOException { + try (var builder = blockFactory.new$Type$Vector$if(BytesRef)$$else$Fixed$endif$Builder(positions)) { for (int i = 0; i < positions; i++) { + builder.append$Type$(in.read$Type$()); + } + return builder.build(); + } + } + + private static void writeValues($Type$Vector v, int positions, StreamOutput out) throws IOException { $if(BytesRef)$ - out.write$Type$(get$Type$(i, new BytesRef())); + var scratch = new BytesRef(); +$endif$ + for (int i = 0; i < positions; i++) { +$if(BytesRef)$ + out.write$Type$(v.get$Type$(i, scratch)); $else$ - out.write$Type$(get$Type$(i)); + out.write$Type$(v.get$Type$(i)); $endif$ - } } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st index 4bc3c66b65743..e3d696ddf9120 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st @@ -24,7 +24,6 @@ public final class $Type$VectorBlock extends AbstractVectorBlock implements $Typ * @param vector considered owned by the current block; must not be used in any other {@code Block} */ $Type$VectorBlock($Type$Vector vector) { - super(vector.getPositionCount(), vector.blockFactory()); this.vector = vector; } @@ -44,7 +43,7 @@ $endif$ } @Override - public int getTotalValueCount() { + public int getPositionCount() { return vector.getPositionCount(); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneCountOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneCountOperator.java index 4dda5c16295fb..d05593015211b 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneCountOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneCountOperator.java @@ -123,6 +123,7 @@ public Page getOutput() { assert remainingDocs <= 0 : remainingDocs; return null; } + long start = System.nanoTime(); try { final LuceneScorer scorer = getCurrentOrLoadNextScorer(); // no scorer means no more docs @@ -171,6 +172,8 @@ public Page getOutput() { return page; } catch (IOException e) { throw new UncheckedIOException(e); + } finally { + processingNanos += System.nanoTime() - start; } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneOperator.java index 1eeedd06d058d..d43eb8c280695 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneOperator.java @@ -16,6 +16,7 @@ import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; +import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -24,6 +25,7 @@ import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.compute.operator.SourceOperator; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; import org.elasticsearch.xcontent.XContentBuilder; @@ -60,6 +62,7 @@ public abstract class LuceneOperator extends SourceOperator { private LuceneScorer currentScorer; + long processingNanos; int pagesEmitted; boolean doneCollecting; @@ -198,6 +201,7 @@ public static class Status implements Operator.Status { private final int processedSlices; private final Set processedQueries; private final Set processedShards; + private final long processingNanos; private final int totalSlices; private final int pagesEmitted; private final int sliceIndex; @@ -208,6 +212,7 @@ public static class Status implements Operator.Status { private Status(LuceneOperator operator) { processedSlices = operator.processedSlices; processedQueries = operator.processedQueries.stream().map(Query::toString).collect(Collectors.toCollection(TreeSet::new)); + processingNanos = operator.processingNanos; processedShards = new TreeSet<>(operator.processedShards); sliceIndex = operator.sliceIndex; totalSlices = operator.sliceQueue.totalSlices(); @@ -233,6 +238,7 @@ private Status(LuceneOperator operator) { int processedSlices, Set processedQueries, Set processedShards, + long processingNanos, int sliceIndex, int totalSlices, int pagesEmitted, @@ -243,6 +249,7 @@ private Status(LuceneOperator operator) { this.processedSlices = processedSlices; this.processedQueries = processedQueries; this.processedShards = processedShards; + this.processingNanos = processingNanos; this.sliceIndex = sliceIndex; this.totalSlices = totalSlices; this.pagesEmitted = pagesEmitted; @@ -260,6 +267,7 @@ private Status(LuceneOperator operator) { processedQueries = Collections.emptySet(); processedShards = Collections.emptySet(); } + processingNanos = in.getTransportVersion().onOrAfter(TransportVersions.ESQL_TIMINGS) ? in.readVLong() : 0; sliceIndex = in.readVInt(); totalSlices = in.readVInt(); pagesEmitted = in.readVInt(); @@ -275,6 +283,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeCollection(processedQueries, StreamOutput::writeString); out.writeCollection(processedShards, StreamOutput::writeString); } + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_TIMINGS)) { + out.writeVLong(processingNanos); + } out.writeVInt(sliceIndex); out.writeVInt(totalSlices); out.writeVInt(pagesEmitted); @@ -300,6 +311,10 @@ public Set processedShards() { return processedShards; } + public long processNanos() { + return processingNanos; + } + public int sliceIndex() { return sliceIndex; } @@ -330,6 +345,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("processed_slices", processedSlices); builder.field("processed_queries", processedQueries); builder.field("processed_shards", processedShards); + builder.field("processing_nanos", processingNanos); + if (builder.humanReadable()) { + builder.field("processing_time", TimeValue.timeValueNanos(processingNanos)); + } builder.field("slice_index", sliceIndex); builder.field("total_slices", totalSlices); builder.field("pages_emitted", pagesEmitted); @@ -347,6 +366,7 @@ public boolean equals(Object o) { return processedSlices == status.processedSlices && processedQueries.equals(status.processedQueries) && processedShards.equals(status.processedShards) + && processingNanos == status.processingNanos && sliceIndex == status.sliceIndex && totalSlices == status.totalSlices && pagesEmitted == status.pagesEmitted @@ -364,6 +384,11 @@ public int hashCode() { public String toString() { return Strings.toString(this); } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.V_8_11_X; + } } static Function weightFunction(Function queryFunction, ScoreMode scoreMode) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSourceOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSourceOperator.java index 9d6e3f46d0e1e..f2ab362278c4c 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSourceOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSourceOperator.java @@ -128,6 +128,7 @@ public Page getOutput() { assert currentPagePos == 0 : currentPagePos; return null; } + long start = System.nanoTime(); try { final LuceneScorer scorer = getCurrentOrLoadNextScorer(); if (scorer == null) { @@ -163,6 +164,8 @@ public Page getOutput() { return page; } catch (IOException e) { throw new UncheckedIOException(e); + } finally { + processingNanos += System.nanoTime() - start; } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperator.java index 8cb9173adc197..df95e49ab2492 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperator.java @@ -140,10 +140,15 @@ public Page getOutput() { if (isFinished()) { return null; } - if (isEmitting()) { - return emit(false); - } else { - return collect(); + long start = System.nanoTime(); + try { + if (isEmitting()) { + return emit(false); + } else { + return collect(); + } + } finally { + processingNanos += System.nanoTime() - start; } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorFactory.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorFactory.java new file mode 100644 index 0000000000000..b1211c8ea5ff4 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorFactory.java @@ -0,0 +1,343 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.lucene; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SortedDocValues; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Weight; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.PriorityQueue; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.DocVector; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.SourceOperator; +import org.elasticsearch.core.Releasables; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.List; +import java.util.function.Function; + +/** + * Creates a source operator that takes advantage of the natural sorting of segments in a tsdb index. + *

    + * This source operator loads the _tsid and @timestamp fields, which is used for emitting documents in the correct order. These field values + * are included in the page as seperate blocks and downstream operators can make use of these loaded time series ids and timestamps. + *

    + * The source operator includes all documents of a time serie with the same page. So the same time series never exists in multiple pages. + * Downstream operators can make use of this implementation detail. + *

    + * This operator currently only supports shard level concurrency. A new concurrency mechanism should be introduced at the time serie level + * in order to read tsdb indices in parallel. + */ +public record TimeSeriesSortedSourceOperatorFactory(int limit, int maxPageSize, int taskConcurrency, LuceneSliceQueue sliceQueue) + implements + LuceneOperator.Factory { + + @Override + public SourceOperator get(DriverContext driverContext) { + return new Impl(driverContext.blockFactory(), sliceQueue, maxPageSize, limit); + } + + @Override + public int taskConcurrency() { + return taskConcurrency; + } + + @Override + public String describe() { + return "TimeSeriesSortedSourceOperator[maxPageSize = " + maxPageSize + ", limit = " + limit + "]"; + } + + public static TimeSeriesSortedSourceOperatorFactory create( + int limit, + int maxPageSize, + int taskConcurrency, + List searchContexts, + Function queryFunction + ) { + var weightFunction = LuceneOperator.weightFunction(queryFunction, ScoreMode.COMPLETE_NO_SCORES); + var sliceQueue = LuceneSliceQueue.create(searchContexts, weightFunction, DataPartitioning.SHARD, taskConcurrency); + taskConcurrency = Math.min(sliceQueue.totalSlices(), taskConcurrency); + return new TimeSeriesSortedSourceOperatorFactory(limit, maxPageSize, taskConcurrency, sliceQueue); + } + + static final class Impl extends SourceOperator { + + private final int maxPageSize; + private final BlockFactory blockFactory; + private final LuceneSliceQueue sliceQueue; + private int currentPagePos = 0; + private int remainingDocs; + private boolean doneCollecting; + private IntVector.Builder docsBuilder; + private IntVector.Builder segmentsBuilder; + private LongVector.Builder timestampIntervalBuilder; + // TODO: handle when a time series spans across backing indices + // In that case we need to bytes representation of the tsid + private IntVector.Builder tsOrdBuilder; + private TimeSeriesIterator iterator; + + Impl(BlockFactory blockFactory, LuceneSliceQueue sliceQueue, int maxPageSize, int limit) { + this.maxPageSize = maxPageSize; + this.blockFactory = blockFactory; + this.remainingDocs = limit; + this.docsBuilder = blockFactory.newIntVectorBuilder(Math.min(limit, maxPageSize)); + this.segmentsBuilder = null; + this.timestampIntervalBuilder = blockFactory.newLongVectorBuilder(Math.min(limit, maxPageSize)); + this.tsOrdBuilder = blockFactory.newIntVectorBuilder(Math.min(limit, maxPageSize)); + this.sliceQueue = sliceQueue; + } + + @Override + public void finish() { + this.doneCollecting = true; + } + + @Override + public boolean isFinished() { + return doneCollecting; + } + + @Override + public Page getOutput() { + if (isFinished()) { + return null; + } + + if (remainingDocs <= 0) { + doneCollecting = true; + return null; + } + + Page page = null; + IntBlock shard = null; + IntVector leaf = null; + IntVector docs = null; + LongVector timestampIntervals = null; + IntVector tsids = null; + try { + if (iterator == null) { + var slice = sliceQueue.nextSlice(); + if (slice == null) { + doneCollecting = true; + return null; + } + if (segmentsBuilder == null && slice.numLeaves() > 1) { + segmentsBuilder = blockFactory.newIntVectorBuilder(Math.min(remainingDocs, maxPageSize)); + } + iterator = new TimeSeriesIterator(slice); + } + iterator.consume(); + shard = blockFactory.newConstantIntBlockWith(iterator.slice.shardContext().index(), currentPagePos); + boolean singleSegmentNonDecreasing; + if (iterator.slice.numLeaves() == 1) { + singleSegmentNonDecreasing = true; + int segmentOrd = iterator.slice.getLeaf(0).leafReaderContext().ord; + leaf = blockFactory.newConstantIntBlockWith(segmentOrd, currentPagePos).asVector(); + } else { + // Due to the multi segment nature of time series source operator singleSegmentNonDecreasing must be false + singleSegmentNonDecreasing = false; + leaf = segmentsBuilder.build(); + segmentsBuilder = blockFactory.newIntVectorBuilder(Math.min(remainingDocs, maxPageSize)); + } + docs = docsBuilder.build(); + docsBuilder = blockFactory.newIntVectorBuilder(Math.min(remainingDocs, maxPageSize)); + + timestampIntervals = timestampIntervalBuilder.build(); + timestampIntervalBuilder = blockFactory.newLongVectorBuilder(Math.min(remainingDocs, maxPageSize)); + tsids = tsOrdBuilder.build(); + tsOrdBuilder = blockFactory.newIntVectorBuilder(Math.min(remainingDocs, maxPageSize)); + + page = new Page( + currentPagePos, + new DocVector(shard.asVector(), leaf, docs, singleSegmentNonDecreasing).asBlock(), + tsids.asBlock(), + timestampIntervals.asBlock() + ); + + currentPagePos = 0; + if (iterator.completed()) { + iterator = null; + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } finally { + if (page == null) { + Releasables.closeExpectNoException(shard, leaf, docs, timestampIntervals, tsids); + } + } + return page; + } + + @Override + public void close() { + Releasables.closeExpectNoException(docsBuilder, segmentsBuilder, timestampIntervalBuilder, tsOrdBuilder); + } + + class TimeSeriesIterator { + + final LuceneSlice slice; + final Leaf leaf; + final PriorityQueue queue; + int globalTsidOrd; + BytesRef currentTsid = new BytesRef(); + + TimeSeriesIterator(LuceneSlice slice) throws IOException { + this.slice = slice; + Weight weight = slice.weight().get(); + if (slice.numLeaves() == 1) { + queue = null; + leaf = new Leaf(weight, slice.getLeaf(0).leafReaderContext()); + } else { + queue = new PriorityQueue<>(slice.numLeaves()) { + @Override + protected boolean lessThan(Leaf a, Leaf b) { + // tsid hash in ascending order: + int cmp = a.timeSeriesHash.compareTo(b.timeSeriesHash); + if (cmp == 0) { + // timestamp in descending order: + cmp = -Long.compare(a.timestamp, b.timestamp); + } + return cmp < 0; + } + }; + leaf = null; + for (var leafReaderContext : slice.leaves()) { + Leaf leaf = new Leaf(weight, leafReaderContext.leafReaderContext()); + if (leaf.nextDoc()) { + queue.add(leaf); + } + } + } + } + + void consume() throws IOException { + if (queue != null) { + currentTsid = BytesRef.deepCopyOf(queue.top().timeSeriesHash); + boolean breakOnNextTsidChange = false; + while (queue.size() > 0) { + if (remainingDocs <= 0) { + break; + } + if (currentPagePos > maxPageSize) { + breakOnNextTsidChange = true; + } + + currentPagePos++; + remainingDocs--; + Leaf leaf = queue.top(); + segmentsBuilder.appendInt(leaf.segmentOrd); + docsBuilder.appendInt(leaf.iterator.docID()); + timestampIntervalBuilder.appendLong(leaf.timestamp); + tsOrdBuilder.appendInt(globalTsidOrd); + if (leaf.nextDoc()) { + // TODO: updating the top is one of the most expensive parts of this operation. + // Ideally we would do this a less as possible. Maybe the top can be updated every N docs? + Leaf newTop = queue.updateTop(); + if (newTop.timeSeriesHash.equals(currentTsid) == false) { + globalTsidOrd++; + currentTsid = BytesRef.deepCopyOf(newTop.timeSeriesHash); + if (breakOnNextTsidChange) { + break; + } + } + } else { + queue.pop(); + } + } + } else { + int previousTsidOrd = leaf.timeSeriesHashOrd; + boolean breakOnNextTsidChange = false; + // Only one segment, so no need to use priority queue and use segment ordinals as tsid ord. + while (leaf.nextDoc()) { + if (remainingDocs <= 0) { + break; + } + if (currentPagePos > maxPageSize) { + breakOnNextTsidChange = true; + } + if (breakOnNextTsidChange) { + if (previousTsidOrd != leaf.timeSeriesHashOrd) { + break; + } + } + + currentPagePos++; + remainingDocs--; + + tsOrdBuilder.appendInt(leaf.timeSeriesHashOrd); + timestampIntervalBuilder.appendLong(leaf.timestamp); + // Don't append segment ord, because there is only one segment. + docsBuilder.appendInt(leaf.iterator.docID()); + previousTsidOrd = leaf.timeSeriesHashOrd; + } + } + } + + boolean completed() { + if (queue != null) { + return iterator.queue.size() == 0; + } else { + return leaf.iterator.docID() == DocIdSetIterator.NO_MORE_DOCS; + } + } + + static class Leaf { + + private final int segmentOrd; + private final SortedDocValues tsids; + private final SortedNumericDocValues timestamps; + private final DocIdSetIterator iterator; + + private long timestamp; + private int timeSeriesHashOrd; + private BytesRef timeSeriesHash; + + Leaf(Weight weight, LeafReaderContext leaf) throws IOException { + this.segmentOrd = leaf.ord; + tsids = leaf.reader().getSortedDocValues("_tsid"); + timestamps = leaf.reader().getSortedNumericDocValues("@timestamp"); + iterator = weight.scorer(leaf).iterator(); + } + + boolean nextDoc() throws IOException { + int docID = iterator.nextDoc(); + if (docID == DocIdSetIterator.NO_MORE_DOCS) { + return false; + } + + boolean advanced = tsids.advanceExact(iterator.docID()); + assert advanced; + timeSeriesHashOrd = tsids.ordValue(); + timeSeriesHash = tsids.lookupOrd(timeSeriesHashOrd); + advanced = timestamps.advanceExact(iterator.docID()); + assert advanced; + timestamp = timestamps.nextValue(); + return true; + } + + } + + } + + @Override + public String toString() { + return this.getClass().getSimpleName() + "[" + "maxPageSize=" + maxPageSize + ", remainingDocs=" + remainingDocs + "]"; + } + + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java index b9be899cec4f3..08be21f95786f 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java @@ -475,8 +475,8 @@ public String toString() { } @Override - protected Status status(int pagesProcessed) { - return new Status(new TreeMap<>(readersBuilt), pagesProcessed); + protected Status status(long processNanos, int pagesProcessed) { + return new Status(new TreeMap<>(readersBuilt), processNanos, pagesProcessed); } public static class Status extends AbstractPageMappingOperator.Status { @@ -488,8 +488,8 @@ public static class Status extends AbstractPageMappingOperator.Status { private final Map readersBuilt; - Status(Map readersBuilt, int pagesProcessed) { - super(pagesProcessed); + Status(Map readersBuilt, long processNanos, int pagesProcessed) { + super(processNanos, pagesProcessed); this.readersBuilt = readersBuilt; } @@ -521,21 +521,20 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(e.getKey(), e.getValue()); } builder.endObject(); - builder.field("pages_processed", pagesProcessed()); + innerToXContent(builder); return builder.endObject(); } @Override public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; + if (super.equals(o) == false) return false; Status status = (Status) o; - return pagesProcessed() == status.pagesProcessed() && readersBuilt.equals(status.readersBuilt); + return readersBuilt.equals(status.readersBuilt); } @Override public int hashCode() { - return Objects.hash(readersBuilt, pagesProcessed()); + return Objects.hash(super.hashCode(), readersBuilt); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AbstractPageMappingOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AbstractPageMappingOperator.java index 5924e4086c743..800b648711f26 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AbstractPageMappingOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AbstractPageMappingOperator.java @@ -7,12 +7,15 @@ package org.elasticsearch.compute.operator; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.data.Page; import org.elasticsearch.core.Releasables; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; @@ -25,6 +28,11 @@ public abstract class AbstractPageMappingOperator implements Operator { private Page prev; private boolean finished = false; + /** + * Number of milliseconds this operation has run. + */ + private long processNanos; + /** * Count of pages that have been processed by this operator. */ @@ -64,19 +72,21 @@ public final Page getOutput() { if (prev.getPositionCount() == 0) { return prev; } - pagesProcessed++; + long start = System.nanoTime(); Page p = process(prev); + pagesProcessed++; + processNanos += System.nanoTime() - start; prev = null; return p; } @Override public final Status status() { - return status(pagesProcessed); + return status(processNanos, pagesProcessed); } - protected Status status(int pagesProcessed) { - return new Status(pagesProcessed); + protected Status status(long processNanos, int pagesProcessed) { + return new Status(processNanos, pagesProcessed); } @Override @@ -93,18 +103,24 @@ public static class Status implements Operator.Status { Status::new ); + private final long processNanos; private final int pagesProcessed; - public Status(int pagesProcessed) { + public Status(long processNanos, int pagesProcessed) { + this.processNanos = processNanos; this.pagesProcessed = pagesProcessed; } protected Status(StreamInput in) throws IOException { + processNanos = in.getTransportVersion().onOrAfter(TransportVersions.ESQL_TIMINGS) ? in.readVLong() : 0; pagesProcessed = in.readVInt(); } @Override public void writeTo(StreamOutput out) throws IOException { + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_TIMINGS)) { + out.writeVLong(processNanos); + } out.writeVInt(pagesProcessed); } @@ -117,29 +133,50 @@ public int pagesProcessed() { return pagesProcessed; } + public long processNanos() { + return processNanos; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field("pages_processed", pagesProcessed); + innerToXContent(builder); return builder.endObject(); } + /** + * Render the body of the object for this status. Protected so subclasses + * can call it to render the "default" body. + */ + protected final XContentBuilder innerToXContent(XContentBuilder builder) throws IOException { + builder.field("process_nanos", processNanos); + if (builder.humanReadable()) { + builder.field("process_time", TimeValue.timeValueNanos(processNanos)); + } + return builder.field("pages_processed", pagesProcessed); + } + @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Status status = (Status) o; - return pagesProcessed == status.pagesProcessed; + return processNanos == status.processNanos && pagesProcessed == status.pagesProcessed; } @Override public int hashCode() { - return Objects.hash(pagesProcessed); + return Objects.hash(processNanos, pagesProcessed); } @Override public String toString() { return Strings.toString(this); } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.V_8_11_X; + } } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AggregationOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AggregationOperator.java index 07d1809262c9b..20d3f0166f1cb 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AggregationOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AggregationOperator.java @@ -7,13 +7,22 @@ package org.elasticsearch.compute.operator; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.aggregation.Aggregator; import org.elasticsearch.compute.aggregation.Aggregator.Factory; import org.elasticsearch.compute.aggregation.AggregatorMode; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.Page; import org.elasticsearch.core.Releasables; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.xcontent.XContentBuilder; +import java.io.IOException; import java.util.Arrays; import java.util.List; import java.util.Objects; @@ -36,6 +45,15 @@ public class AggregationOperator implements Operator { private final List aggregators; private final DriverContext driverContext; + /** + * Nanoseconds this operator has spent running the aggregations. + */ + private long aggregationNanos; + /** + * Count of pages this operator has processed. + */ + private int pagesProcessed; + public record AggregationOperatorFactory(List aggregators, AggregatorMode mode) implements OperatorFactory { @Override @@ -72,6 +90,7 @@ public boolean needsInput() { @Override public void addInput(Page page) { + long start = System.nanoTime(); checkState(needsInput(), "Operator is already finishing"); requireNonNull(page, "page is null"); try { @@ -80,6 +99,8 @@ public void addInput(Page page) { } } finally { page.releaseBlocks(); + aggregationNanos += System.nanoTime() - start; + pagesProcessed++; } } @@ -150,4 +171,101 @@ public String toString() { sb.append("aggregators=").append(aggregators).append("]"); return sb.toString(); } + + @Override + public Operator.Status status() { + return new Status(aggregationNanos, pagesProcessed); + } + + public static class Status implements Operator.Status { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Operator.Status.class, + "agg", + Status::new + ); + + /** + * Nanoseconds this operator has spent running the aggregations. + */ + private final long aggregationNanos; + /** + * Count of pages this operator has processed. + */ + private final int pagesProcessed; + + /** + * Build. + * @param aggregationNanos Nanoseconds this operator has spent running the aggregations. + * @param pagesProcessed Count of pages this operator has processed. + */ + public Status(long aggregationNanos, int pagesProcessed) { + this.aggregationNanos = aggregationNanos; + this.pagesProcessed = pagesProcessed; + } + + protected Status(StreamInput in) throws IOException { + aggregationNanos = in.readVLong(); + pagesProcessed = in.readVInt(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(aggregationNanos); + out.writeVInt(pagesProcessed); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + + /** + * Nanoseconds this operator has spent running the aggregations. + */ + public long aggregationNanos() { + return aggregationNanos; + } + + /** + * Count of pages this operator has processed. + */ + public int pagesProcessed() { + return pagesProcessed; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("aggregation_nanos", aggregationNanos); + if (builder.humanReadable()) { + builder.field("aggregation_time", TimeValue.timeValueNanos(aggregationNanos)); + } + builder.field("pages_processed", pagesProcessed); + return builder.endObject(); + + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Status status = (Status) o; + return aggregationNanos == status.aggregationNanos && pagesProcessed == status.pagesProcessed; + } + + @Override + public int hashCode() { + return Objects.hash(aggregationNanos, pagesProcessed); + } + + @Override + public String toString() { + return Strings.toString(this); + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.ESQL_TIMINGS; + } + } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java index bcab6a39496fd..061cefc86bed0 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java @@ -8,16 +8,27 @@ package org.elasticsearch.compute.operator; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.SubscribableListener; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.seqno.LocalCheckpointTracker; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.tasks.TaskCancelledException; +import org.elasticsearch.xcontent.XContentBuilder; +import java.io.IOException; import java.util.Map; +import java.util.Objects; import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.atomic.LongAdder; /** * {@link AsyncOperator} performs an external computation specified in {@link #performAsync(Page, ActionListener)}. @@ -33,6 +44,7 @@ public abstract class AsyncOperator implements Operator { private final DriverContext driverContext; private final int maxOutstandingRequests; + private final LongAdder totalTimeInNanos = new LongAdder(); private boolean finished = false; private volatile boolean closed = false; @@ -81,7 +93,11 @@ public void addInput(Page input) { onFailure(e); onSeqNoCompleted(seqNo); }); - performAsync(input, ActionListener.runAfter(listener, driverContext::removeAsyncAction)); + final long startNanos = System.nanoTime(); + performAsync(input, ActionListener.runAfter(listener, () -> { + driverContext.removeAsyncAction(); + totalTimeInNanos.add(System.nanoTime() - startNanos); + })); success = true; } finally { if (success == false) { @@ -224,4 +240,107 @@ public SubscribableListener isBlocked() { return blockedFuture; } } + + @Override + public final Operator.Status status() { + return status( + Math.max(0L, checkpoint.getMaxSeqNo()), + Math.max(0L, checkpoint.getProcessedCheckpoint()), + TimeValue.timeValueNanos(totalTimeInNanos.sum()).millis() + ); + } + + protected Operator.Status status(long receivedPages, long completedPages, long totalTimeInMillis) { + return new Status(receivedPages, completedPages, totalTimeInMillis); + } + + public static class Status implements Operator.Status { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Operator.Status.class, + "async_operator", + Status::new + ); + + final long receivedPages; + final long completedPages; + final long totalTimeInMillis; + + protected Status(long receivedPages, long completedPages, long totalTimeInMillis) { + this.receivedPages = receivedPages; + this.completedPages = completedPages; + this.totalTimeInMillis = totalTimeInMillis; + } + + protected Status(StreamInput in) throws IOException { + this.receivedPages = in.readVLong(); + this.completedPages = in.readVLong(); + this.totalTimeInMillis = in.readVLong(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(receivedPages); + out.writeVLong(completedPages); + out.writeVLong(totalTimeInMillis); + } + + public long receivedPages() { + return receivedPages; + } + + public long completedPages() { + return completedPages; + } + + public long totalTimeInMillis() { + return totalTimeInMillis; + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + innerToXContent(builder); + return builder.endObject(); + } + + protected final XContentBuilder innerToXContent(XContentBuilder builder) throws IOException { + builder.field("received_pages", receivedPages); + builder.field("completed_pages", completedPages); + builder.field("total_time_in_millis", totalTimeInMillis); + if (totalTimeInMillis >= 0) { + builder.field("total_time", TimeValue.timeValueMillis(totalTimeInMillis)); + } + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Status status = (Status) o; + return receivedPages == status.receivedPages + && completedPages == status.completedPages + && totalTimeInMillis == status.totalTimeInMillis; + } + + @Override + public int hashCode() { + return Objects.hash(receivedPages, completedPages, totalTimeInMillis); + } + + @Override + public String toString() { + return Strings.toString(this); + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.ESQL_ENRICH_OPERATOR_STATUS; + } + } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Driver.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Driver.java index 3e9793ef87b2a..2537809fbd8ec 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Driver.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Driver.java @@ -26,6 +26,7 @@ import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.LongSupplier; import java.util.function.Supplier; import java.util.stream.Collectors; @@ -49,6 +50,22 @@ public class Driver implements Releasable, Describable { public static final TimeValue DEFAULT_STATUS_INTERVAL = TimeValue.timeValueSeconds(1); private final String sessionId; + + /** + * The wall clock time when this driver was created in milliseconds since epoch. + * Compared to {@link #startNanos} this is less accurate and is measured by a + * timer that can go backwards. This is only useful for presenting times to a + * user, like over the status API. + */ + private final long startTime; + + /** + * The time when this driver was created in nanos. This time is relative to + * some arbitrary point - imagine its program startup. The timer that generates + * this is monotonically increasing so even if NTP or something changes the + * clock it won't change. As such, this is only useful for measuring durations. + */ + private final long startNanos; private final DriverContext driverContext; private final Supplier description; private final List activeOperators; @@ -69,6 +86,13 @@ public class Driver implements Releasable, Describable { */ private final AtomicReference status; + /** + * The time this driver finished. Only set once the driver is finished, defaults to 0 + * which is *possibly* a valid value, so always use the driver status to check + * if the driver is actually finished. + */ + private long finishNanos; + /** * Creates a new driver with a chain of operators. * @param sessionId session Id @@ -81,6 +105,8 @@ public class Driver implements Releasable, Describable { */ public Driver( String sessionId, + long startTime, + long startNanos, DriverContext driverContext, Supplier description, SourceOperator source, @@ -90,6 +116,8 @@ public Driver( Releasable releasable ) { this.sessionId = sessionId; + this.startTime = startTime; + this.startNanos = startNanos; this.driverContext = driverContext; this.description = description; this.activeOperators = new ArrayList<>(); @@ -99,7 +127,7 @@ public Driver( this.statusNanos = statusInterval.nanos(); this.releasable = releasable; this.status = new AtomicReference<>( - new DriverStatus(sessionId, System.currentTimeMillis(), DriverStatus.Status.QUEUED, List.of(), List.of()) + new DriverStatus(sessionId, startTime, System.currentTimeMillis(), 0, 0, DriverStatus.Status.QUEUED, List.of(), List.of()) ); } @@ -118,7 +146,18 @@ public Driver( SinkOperator sink, Releasable releasable ) { - this("unset", driverContext, () -> null, source, intermediateOperators, sink, DEFAULT_STATUS_INTERVAL, releasable); + this( + "unset", + System.currentTimeMillis(), + System.nanoTime(), + driverContext, + () -> null, + source, + intermediateOperators, + sink, + DEFAULT_STATUS_INTERVAL, + releasable + ); } public DriverContext driverContext() { @@ -130,38 +169,39 @@ public DriverContext driverContext() { * Returns a blocked future when the chain of operators is blocked, allowing the caller * thread to do other work instead of blocking or busy-spinning on the blocked operator. */ - private SubscribableListener run(TimeValue maxTime, int maxIterations) { + SubscribableListener run(TimeValue maxTime, int maxIterations, LongSupplier nowSupplier) { long maxTimeNanos = maxTime.nanos(); - long startTime = System.nanoTime(); + long startTime = nowSupplier.getAsLong(); long nextStatus = startTime + statusNanos; int iter = 0; - while (isFinished() == false) { + while (true) { SubscribableListener fut = runSingleLoopIteration(); + iter++; if (fut.isDone() == false) { - status.set(updateStatus(DriverStatus.Status.ASYNC)); + updateStatus(nowSupplier.getAsLong() - startTime, iter, DriverStatus.Status.ASYNC); return fut; } + if (isFinished()) { + finishNanos = nowSupplier.getAsLong(); + updateStatus(finishNanos - startTime, iter, DriverStatus.Status.DONE); + driverContext.finish(); + Releasables.close(releasable, driverContext.getSnapshot()); + return Operator.NOT_BLOCKED; + } + long now = nowSupplier.getAsLong(); if (iter >= maxIterations) { - break; + updateStatus(now - startTime, iter, DriverStatus.Status.WAITING); + return Operator.NOT_BLOCKED; + } + if (now - startTime >= maxTimeNanos) { + updateStatus(now - startTime, iter, DriverStatus.Status.WAITING); + return Operator.NOT_BLOCKED; } - long now = System.nanoTime(); if (now > nextStatus) { - status.set(updateStatus(DriverStatus.Status.RUNNING)); + updateStatus(now - startTime, iter, DriverStatus.Status.RUNNING); nextStatus = now + statusNanos; } - iter++; - if (now - startTime > maxTimeNanos) { - break; - } - } - if (isFinished()) { - status.set(updateStatus(DriverStatus.Status.DONE)); - driverContext.finish(); - Releasables.close(releasable, driverContext.getSnapshot()); - } else { - status.set(updateStatus(DriverStatus.Status.WAITING)); } - return Operator.NOT_BLOCKED; } /** @@ -180,6 +220,7 @@ public void close() { * Abort the driver and wait for it to finish */ public void abort(Exception reason, ActionListener listener) { + finishNanos = System.nanoTime(); completionListener.addListener(listener); if (started.compareAndSet(false, true)) { drainAndCloseOperators(reason); @@ -286,7 +327,7 @@ public static void start( ) { driver.completionListener.addListener(listener); if (driver.started.compareAndSet(false, true)) { - driver.status.set(driver.updateStatus(DriverStatus.Status.STARTING)); + driver.updateStatus(0, 0, DriverStatus.Status.STARTING); schedule(DEFAULT_TIME_BEFORE_YIELDING, maxIterations, threadContext, executor, driver, driver.completionListener); } } @@ -324,7 +365,7 @@ protected void doRun() { onComplete(listener); return; } - SubscribableListener fut = driver.run(maxTime, maxIterations); + SubscribableListener fut = driver.run(maxTime, maxIterations, System::nanoTime); if (fut.isDone()) { schedule(maxTime, maxIterations, threadContext, executor, driver, listener); } else { @@ -384,23 +425,42 @@ public String sessionId() { /** * Get the last status update from the driver. These updates are made * when the driver is queued and after every - * processing {@link #run(TimeValue, int) batch}. + * processing {@link #run batch}. */ public DriverStatus status() { return status.get(); } + /** + * Build a "profile" of this driver's operations after it's been completed. + * This doesn't make sense to call before the driver is done. + */ + public DriverProfile profile() { + DriverStatus status = status(); + if (status.status() != DriverStatus.Status.DONE) { + throw new IllegalStateException("can only get profile from finished driver"); + } + return new DriverProfile(finishNanos - startNanos, status.cpuNanos(), status.iterations(), status.completedOperators()); + } + /** * Update the status. + * @param extraCpuNanos how many cpu nanoseconds to add to the previous status + * @param extraIterations how many iterations to add to the previous status * @param status the status of the overall driver request */ - private DriverStatus updateStatus(DriverStatus.Status status) { - return new DriverStatus( - sessionId, - System.currentTimeMillis(), - status, - statusOfCompletedOperators, - activeOperators.stream().map(op -> new DriverStatus.OperatorStatus(op.toString(), op.status())).toList() - ); + private void updateStatus(long extraCpuNanos, int extraIterations, DriverStatus.Status status) { + this.status.getAndUpdate(prev -> { + return new DriverStatus( + sessionId, + startTime, + System.currentTimeMillis(), + prev.cpuNanos() + extraCpuNanos, + prev.iterations() + extraIterations, + status, + statusOfCompletedOperators, + activeOperators.stream().map(op -> new DriverStatus.OperatorStatus(op.toString(), op.status())).toList() + ); + }); } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverProfile.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverProfile.java index d82ddc1899b1c..00c3771540867 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverProfile.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverProfile.java @@ -7,12 +7,15 @@ package org.elasticsearch.compute.operator; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.common.xcontent.ChunkedToXContentObject; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.xcontent.ToXContent; import java.io.IOException; @@ -24,35 +27,99 @@ * Profile results from a single {@link Driver}. */ public class DriverProfile implements Writeable, ChunkedToXContentObject { + /** + * Nanos between creation and completion of the {@link Driver}. + */ + private final long tookNanos; + + /** + * Nanos this {@link Driver} has been running on the cpu. Does not + * include async or waiting time. + */ + private final long cpuNanos; + + /** + * The number of times the driver has moved a single page up the + * chain of operators as far as it'll go. + */ + private final long iterations; + /** * Status of each {@link Operator} in the driver when it finishes. */ private final List operators; - public DriverProfile(List operators) { + public DriverProfile(long tookNanos, long cpuNanos, long iterations, List operators) { + this.tookNanos = tookNanos; + this.cpuNanos = cpuNanos; + this.iterations = iterations; this.operators = operators; } public DriverProfile(StreamInput in) throws IOException { + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_TIMINGS)) { + this.tookNanos = in.readVLong(); + this.cpuNanos = in.readVLong(); + this.iterations = in.readVLong(); + } else { + this.tookNanos = 0; + this.cpuNanos = 0; + this.iterations = 0; + } this.operators = in.readCollectionAsImmutableList(DriverStatus.OperatorStatus::new); } @Override public void writeTo(StreamOutput out) throws IOException { + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_TIMINGS)) { + out.writeVLong(tookNanos); + out.writeVLong(cpuNanos); + out.writeVLong(iterations); + } out.writeCollection(operators); } - List operators() { + /** + * Nanos between creation and completion of the {@link Driver}. + */ + public long tookNanos() { + return tookNanos; + } + + /** + * Nanos this {@link Driver} has been running on the cpu. Does not + * include async or waiting time. + */ + public long cpuNanos() { + return cpuNanos; + } + + /** + * The number of times the driver has moved a single page up the + * chain of operators as far as it'll go. + */ + public long iterations() { + return iterations; + } + + public List operators() { return operators; } @Override public Iterator toXContentChunked(ToXContent.Params params) { - return Iterators.concat( - ChunkedToXContentHelper.startObject(), - ChunkedToXContentHelper.array("operators", operators.iterator()), - ChunkedToXContentHelper.endObject() - ); + return Iterators.concat(ChunkedToXContentHelper.startObject(), Iterators.single((b, p) -> { + b.field("took_nanos", tookNanos); + if (b.humanReadable()) { + b.field("took_time", TimeValue.timeValueNanos(tookNanos)); + } + b.field("cpu_nanos", cpuNanos); + if (b.humanReadable()) { + b.field("cpu_time", TimeValue.timeValueNanos(cpuNanos)); + } + b.field("iterations", iterations); + return b; + }), ChunkedToXContentHelper.array("operators", operators.iterator()), ChunkedToXContentHelper.endObject()); } @Override @@ -64,11 +131,19 @@ public boolean equals(Object o) { return false; } DriverProfile that = (DriverProfile) o; - return Objects.equals(operators, that.operators); + return tookNanos == that.tookNanos + && cpuNanos == that.cpuNanos + && iterations == that.iterations + && Objects.equals(operators, that.operators); } @Override public int hashCode() { - return Objects.hash(operators); + return Objects.hash(tookNanos, cpuNanos, iterations, operators); + } + + @Override + public String toString() { + return Strings.toString(this); } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverStatus.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverStatus.java index 90713381deb07..f143216303d35 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverStatus.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverStatus.java @@ -12,8 +12,10 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.VersionedNamedWriteable; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.tasks.Task; import org.elasticsearch.xcontent.ToXContentFragment; @@ -39,10 +41,29 @@ public class DriverStatus implements Task.Status { * The session for this driver. */ private final String sessionId; + + /** + * Milliseconds since epoch when this driver started. + */ + private final long started; + /** * When this status was generated. */ private final long lastUpdated; + + /** + * Nanos this {@link Driver} has been running on the cpu. Does not + * include async or waiting time. + */ + private final long cpuNanos; + + /** + * The number of times the driver has moved a single page up the + * chain of operators as far as it'll go. + */ + private final long iterations; + /** * The state of the overall driver - queue, starting, running, finished. */ @@ -60,13 +81,19 @@ public class DriverStatus implements Task.Status { DriverStatus( String sessionId, + long started, long lastUpdated, + long cpuTime, + long iterations, Status status, List completedOperators, List activeOperators ) { this.sessionId = sessionId; + this.started = started; this.lastUpdated = lastUpdated; + this.cpuNanos = cpuTime; + this.iterations = iterations; this.status = status; this.completedOperators = completedOperators; this.activeOperators = activeOperators; @@ -74,7 +101,10 @@ public class DriverStatus implements Task.Status { public DriverStatus(StreamInput in) throws IOException { this.sessionId = in.readString(); + this.started = in.getTransportVersion().onOrAfter(TransportVersions.ESQL_TIMINGS) ? in.readLong() : 0; this.lastUpdated = in.readLong(); + this.cpuNanos = in.getTransportVersion().onOrAfter(TransportVersions.ESQL_TIMINGS) ? in.readVLong() : 0; + this.iterations = in.getTransportVersion().onOrAfter(TransportVersions.ESQL_TIMINGS) ? in.readVLong() : 0; this.status = Status.valueOf(in.readString()); if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { this.completedOperators = in.readCollectionAsImmutableList(OperatorStatus::new); @@ -87,7 +117,14 @@ public DriverStatus(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(sessionId); + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_TIMINGS)) { + out.writeLong(started); + } out.writeLong(lastUpdated); + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_TIMINGS)) { + out.writeVLong(cpuNanos); + out.writeVLong(iterations); + } out.writeString(status.toString()); if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { out.writeCollection(completedOperators); @@ -107,6 +144,13 @@ public String sessionId() { return sessionId; } + /** + * When this {@link Driver} was started. + */ + public long started() { + return started; + } + /** * When this status was generated. */ @@ -114,6 +158,22 @@ public long lastUpdated() { return lastUpdated; } + /** + * Nanos this {@link Driver} has been running on the cpu. Does not + * include async or waiting time. + */ + public long cpuNanos() { + return cpuNanos; + } + + /** + * The number of times the driver has moved a single page up the + * chain of operators as far as it'll go. + */ + public long iterations() { + return iterations; + } + /** * The state of the overall driver - queue, starting, running, finished. */ @@ -139,7 +199,13 @@ public List activeOperators() { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field("sessionId", sessionId); + builder.field("started", DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(started)); builder.field("last_updated", DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(lastUpdated)); + builder.field("cpu_nanos", cpuNanos); + if (builder.humanReadable()) { + builder.field("cpu_time", TimeValue.timeValueNanos(cpuNanos)); + } + builder.field("iterations", iterations); builder.field("status", status.toString().toLowerCase(Locale.ROOT)); builder.startArray("completed_operators"); for (OperatorStatus completed : completedOperators) { @@ -160,7 +226,10 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) return false; DriverStatus that = (DriverStatus) o; return sessionId.equals(that.sessionId) + && started == that.started && lastUpdated == that.lastUpdated + && cpuNanos == that.cpuNanos + && iterations == that.iterations && status == that.status && completedOperators.equals(that.completedOperators) && activeOperators.equals(that.activeOperators); @@ -168,7 +237,7 @@ public boolean equals(Object o) { @Override public int hashCode() { - return Objects.hash(sessionId, lastUpdated, status, completedOperators, activeOperators); + return Objects.hash(sessionId, started, lastUpdated, cpuNanos, iterations, status, completedOperators, activeOperators); } @Override @@ -204,7 +273,7 @@ public OperatorStatus(String operator, Operator.Status status) { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(operator); - out.writeOptionalNamedWriteable(status); + out.writeOptionalNamedWriteable(status != null && VersionedNamedWriteable.shouldSerialize(out, status) ? status : null); } public String operator() { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashAggregationOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashAggregationOperator.java index ad3dce98e34d9..6dcdd15fd1d1c 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashAggregationOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashAggregationOperator.java @@ -7,6 +7,12 @@ package org.elasticsearch.compute.operator; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.Describable; import org.elasticsearch.compute.aggregation.GroupingAggregator; import org.elasticsearch.compute.aggregation.GroupingAggregatorFunction; @@ -17,10 +23,14 @@ import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.data.Page; import org.elasticsearch.core.Releasables; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.xcontent.XContentBuilder; +import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Objects; import java.util.function.Supplier; import static java.util.Objects.requireNonNull; @@ -61,6 +71,19 @@ public String describe() { private final DriverContext driverContext; + /** + * Nanoseconds this operator has spent hashing grouping keys. + */ + private long hashNanos; + /** + * Nanoseconds this operator has spent running the aggregations. + */ + private long aggregationNanos; + /** + * Count of pages this operator has processed. + */ + private int pagesProcessed; + @SuppressWarnings("this-escape") public HashAggregationOperator( List aggregators, @@ -91,36 +114,58 @@ public boolean needsInput() { @Override public void addInput(Page page) { try { - checkState(needsInput(), "Operator is already finishing"); - requireNonNull(page, "page is null"); - GroupingAggregatorFunction.AddInput[] prepared = new GroupingAggregatorFunction.AddInput[aggregators.size()]; - for (int i = 0; i < prepared.length; i++) { - prepared[i] = aggregators.get(i).prepareProcessPage(blockHash, page); - } + class AddInput implements GroupingAggregatorFunction.AddInput { + long hashStart = System.nanoTime(); + long aggStart; - blockHash.add(wrapPage(page), new GroupingAggregatorFunction.AddInput() { @Override public void add(int positionOffset, IntBlock groupIds) { IntVector groupIdsVector = groupIds.asVector(); if (groupIdsVector != null) { add(positionOffset, groupIdsVector); } else { + startAggEndHash(); for (GroupingAggregatorFunction.AddInput p : prepared) { p.add(positionOffset, groupIds); } + end(); } } @Override public void add(int positionOffset, IntVector groupIds) { + startAggEndHash(); for (GroupingAggregatorFunction.AddInput p : prepared) { p.add(positionOffset, groupIds); } + end(); } - }); + + private void startAggEndHash() { + aggStart = System.nanoTime(); + hashNanos += aggStart - hashStart; + } + + private void end() { + hashStart = System.nanoTime(); + aggregationNanos += hashStart - aggStart; + } + } + AddInput add = new AddInput(); + + checkState(needsInput(), "Operator is already finishing"); + requireNonNull(page, "page is null"); + + for (int i = 0; i < prepared.length; i++) { + prepared[i] = aggregators.get(i).prepareProcessPage(blockHash, page); + } + + blockHash.add(wrapPage(page), add); + hashNanos += System.nanoTime() - add.hashStart; } finally { page.releaseBlocks(); + pagesProcessed++; } } @@ -178,6 +223,11 @@ public void close() { Releasables.close(blockHash, () -> Releasables.close(aggregators)); } + @Override + public Operator.Status status() { + return new Status(hashNanos, aggregationNanos, pagesProcessed); + } + protected static void checkState(boolean condition, String msg) { if (condition == false) { throw new IllegalArgumentException(msg); @@ -197,4 +247,115 @@ public String toString() { sb.append("]"); return sb.toString(); } + + public static class Status implements Operator.Status { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Operator.Status.class, + "hashagg", + Status::new + ); + + /** + * Nanoseconds this operator has spent hashing grouping keys. + */ + private final long hashNanos; + /** + * Nanoseconds this operator has spent running the aggregations. + */ + private final long aggregationNanos; + /** + * Count of pages this operator has processed. + */ + private final int pagesProcessed; + + /** + * Build. + * @param hashNanos Nanoseconds this operator has spent hashing grouping keys. + * @param aggregationNanos Nanoseconds this operator has spent running the aggregations. + * @param pagesProcessed Count of pages this operator has processed. + */ + public Status(long hashNanos, long aggregationNanos, int pagesProcessed) { + this.hashNanos = hashNanos; + this.aggregationNanos = aggregationNanos; + this.pagesProcessed = pagesProcessed; + } + + protected Status(StreamInput in) throws IOException { + hashNanos = in.readVLong(); + aggregationNanos = in.readVLong(); + pagesProcessed = in.readVInt(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(hashNanos); + out.writeVLong(aggregationNanos); + out.writeVInt(pagesProcessed); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + + /** + * Nanoseconds this operator has spent hashing grouping keys. + */ + public long hashNanos() { + return hashNanos; + } + + /** + * Nanoseconds this operator has spent running the aggregations. + */ + public long aggregationNanos() { + return aggregationNanos; + } + + /** + * Count of pages this operator has processed. + */ + public int pagesProcessed() { + return pagesProcessed; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("hash_nanos", hashNanos); + if (builder.humanReadable()) { + builder.field("hash_time", TimeValue.timeValueNanos(hashNanos)); + } + builder.field("aggregation_nanos", aggregationNanos); + if (builder.humanReadable()) { + builder.field("aggregation_time", TimeValue.timeValueNanos(aggregationNanos)); + } + builder.field("pages_processed", pagesProcessed); + return builder.endObject(); + + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Status status = (Status) o; + return hashNanos == status.hashNanos && aggregationNanos == status.aggregationNanos && pagesProcessed == status.pagesProcessed; + } + + @Override + public int hashCode() { + return Objects.hash(hashNanos, aggregationNanos, pagesProcessed); + } + + @Override + public String toString() { + return Strings.toString(this); + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.ESQL_TIMINGS; + } + } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/LimitOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/LimitOperator.java index bcd2ffa1f3855..34e37031e6f11 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/LimitOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/LimitOperator.java @@ -7,6 +7,8 @@ package org.elasticsearch.compute.operator; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; @@ -229,5 +231,10 @@ public int hashCode() { public String toString() { return Strings.toString(this); } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.V_8_11_X; + } } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MvExpandOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MvExpandOperator.java index 629cacb82a97f..e87329a907054 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MvExpandOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MvExpandOperator.java @@ -7,6 +7,8 @@ package org.elasticsearch.compute.operator; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; @@ -322,5 +324,10 @@ public int hashCode() { public String toString() { return Strings.toString(this); } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.V_8_11_X; + } } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Operator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Operator.java index fd6589bf5a913..1038277c39fe1 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Operator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Operator.java @@ -8,7 +8,7 @@ package org.elasticsearch.compute.operator; import org.elasticsearch.action.support.SubscribableListener; -import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.io.stream.VersionedNamedWriteable; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.compute.Describable; import org.elasticsearch.compute.data.Block; @@ -105,5 +105,5 @@ interface OperatorFactory extends Describable { /** * Status of an {@link Operator} to be returned by the tasks API. */ - interface Status extends ToXContentObject, NamedWriteable {} + interface Status extends ToXContentObject, VersionedNamedWriteable {} } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ProjectOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ProjectOperator.java index 2e61150061e1a..d318639625034 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ProjectOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ProjectOperator.java @@ -30,7 +30,7 @@ public String describe() { } private final int[] projection; - private Block[] blocks; + private final Block[] blocks; /** * Creates an operator that applies the given projection, encoded as an integer list where @@ -41,6 +41,7 @@ public String describe() { */ public ProjectOperator(List projection) { this.projection = projection.stream().mapToInt(Integer::intValue).toArray(); + this.blocks = new Block[projection.size()]; } @Override @@ -49,11 +50,6 @@ protected Page process(Page page) { if (blockCount == 0) { return page; } - if (blocks == null) { - blocks = new Block[projection.length]; - } - - Arrays.fill(blocks, null); int b = 0; for (int source : projection) { if (source >= blockCount) { @@ -69,7 +65,9 @@ protected Page process(Page page) { page.releaseBlocks(); // Use positionCount explicitly to avoid re-computing - also, if the projection is empty, there may be // no more blocks left to determine the positionCount from. - return new Page(positionCount, blocks); + Page output = new Page(positionCount, blocks); + Arrays.fill(blocks, null); + return output; } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkHandler.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkHandler.java index d1a2b8710cd23..945fdff50d31c 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkHandler.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkHandler.java @@ -49,10 +49,10 @@ public ExchangeSinkHandler(BlockFactory blockFactory, int maxBufferSize, LongSup this.lastUpdatedInMillis = new AtomicLong(nowInMillis.getAsLong()); } - private class LocalExchangeSink implements ExchangeSink { + private class ExchangeSinkImpl implements ExchangeSink { boolean finished; - LocalExchangeSink() { + ExchangeSinkImpl() { onChanged(); outstandingSinks.incrementAndGet(); } @@ -155,7 +155,7 @@ private void notifyListeners() { * @see ExchangeSinkOperator */ public ExchangeSink createExchangeSink() { - return new LocalExchangeSink(); + return new ExchangeSinkImpl(); } /** diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkOperator.java index fed0b2de4454b..01354d681017a 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkOperator.java @@ -7,6 +7,8 @@ package org.elasticsearch.compute.operator.exchange; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -148,5 +150,10 @@ public int hashCode() { public String toString() { return Strings.toString(this); } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.V_8_11_X; + } } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java index 859b1fc73c3e1..7492fa8c19385 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java @@ -44,10 +44,10 @@ public ExchangeSourceHandler(int maxBufferSize, Executor fetchExecutor) { this.outstandingSources = new PendingInstances(() -> buffer.finish(true)); } - private class LocalExchangeSource implements ExchangeSource { + private class ExchangeSourceImpl implements ExchangeSource { private boolean finished; - LocalExchangeSource() { + ExchangeSourceImpl() { outstandingSources.trackNewInstance(); } @@ -95,7 +95,7 @@ public int bufferSize() { * @see ExchangeSinkOperator */ public ExchangeSource createExchangeSource() { - return new LocalExchangeSource(); + return new ExchangeSourceImpl(); } /** diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceOperator.java index 8719ed6ab90ea..1efba31bd831b 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceOperator.java @@ -7,6 +7,8 @@ package org.elasticsearch.compute.operator.exchange; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -157,5 +159,10 @@ public int hashCode() { public String toString() { return Strings.toString(this); } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.V_8_11_X; + } } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/TopNOperatorStatus.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/TopNOperatorStatus.java index 1261332ea1423..1617a546be2cc 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/TopNOperatorStatus.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/TopNOperatorStatus.java @@ -7,6 +7,8 @@ package org.elasticsearch.compute.operator.topn; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -77,4 +79,9 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(occupiedRows, ramBytesUsed); } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.V_8_11_X; + } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneSourceOperatorStatusTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneSourceOperatorStatusTests.java index 6c787052a8ae7..def0710644d22 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneSourceOperatorStatusTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneSourceOperatorStatusTests.java @@ -20,7 +20,7 @@ public class LuceneSourceOperatorStatusTests extends AbstractWireSerializingTestCase { public static LuceneSourceOperator.Status simple() { - return new LuceneSourceOperator.Status(2, Set.of("*:*"), new TreeSet<>(List.of("a:0", "a:1")), 0, 1, 5, 123, 99990, 8000); + return new LuceneSourceOperator.Status(2, Set.of("*:*"), new TreeSet<>(List.of("a:0", "a:1")), 1002, 0, 1, 5, 123, 99990, 8000); } public static String simpleToJson() { @@ -34,6 +34,8 @@ public static String simpleToJson() { "a:0", "a:1" ], + "processing_nanos" : 1002, + "processing_time" : "1micros", "slice_index" : 0, "total_slices" : 1, "pages_emitted" : 5, @@ -58,6 +60,7 @@ public LuceneSourceOperator.Status createTestInstance() { randomNonNegativeInt(), randomProcessedQueries(), randomProcessedShards(), + randomNonNegativeLong(), randomNonNegativeInt(), randomNonNegativeInt(), randomNonNegativeInt(), @@ -90,29 +93,31 @@ protected LuceneSourceOperator.Status mutateInstance(LuceneSourceOperator.Status int processedSlices = instance.processedSlices(); Set processedQueries = instance.processedQueries(); Set processedShards = instance.processedShards(); + long processNanos = instance.processNanos(); int sliceIndex = instance.sliceIndex(); int totalSlices = instance.totalSlices(); int pagesEmitted = instance.pagesEmitted(); int sliceMin = instance.sliceMin(); int sliceMax = instance.sliceMax(); int current = instance.current(); - switch (between(0, 8)) { + switch (between(0, 9)) { case 0 -> processedSlices = randomValueOtherThan(processedSlices, ESTestCase::randomNonNegativeInt); case 1 -> processedQueries = randomValueOtherThan(processedQueries, LuceneSourceOperatorStatusTests::randomProcessedQueries); case 2 -> processedShards = randomValueOtherThan(processedShards, LuceneSourceOperatorStatusTests::randomProcessedShards); - case 3 -> sliceIndex = randomValueOtherThan(sliceIndex, ESTestCase::randomNonNegativeInt); - case 4 -> totalSlices = randomValueOtherThan(totalSlices, ESTestCase::randomNonNegativeInt); - case 5 -> pagesEmitted = randomValueOtherThan(pagesEmitted, ESTestCase::randomNonNegativeInt); - case 6 -> sliceMin = randomValueOtherThan(sliceMin, ESTestCase::randomNonNegativeInt); - case 7 -> sliceMax = randomValueOtherThan(sliceMax, ESTestCase::randomNonNegativeInt); - case 8 -> current = randomValueOtherThan(current, ESTestCase::randomNonNegativeInt); + case 3 -> processNanos = randomValueOtherThan(processNanos, ESTestCase::randomNonNegativeLong); + case 4 -> sliceIndex = randomValueOtherThan(sliceIndex, ESTestCase::randomNonNegativeInt); + case 5 -> totalSlices = randomValueOtherThan(totalSlices, ESTestCase::randomNonNegativeInt); + case 6 -> pagesEmitted = randomValueOtherThan(pagesEmitted, ESTestCase::randomNonNegativeInt); + case 7 -> sliceMin = randomValueOtherThan(sliceMin, ESTestCase::randomNonNegativeInt); + case 8 -> sliceMax = randomValueOtherThan(sliceMax, ESTestCase::randomNonNegativeInt); + case 9 -> current = randomValueOtherThan(current, ESTestCase::randomNonNegativeInt); default -> throw new UnsupportedOperationException(); } - ; return new LuceneSourceOperator.Status( processedSlices, processedQueries, processedShards, + processNanos, sliceIndex, totalSlices, pagesEmitted, diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java new file mode 100644 index 0000000000000..3b47597d6ea2f --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java @@ -0,0 +1,359 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.lucene; + +import org.apache.lucene.document.DoubleDocValuesField; +import org.apache.lucene.document.FloatDocValuesField; +import org.apache.lucene.document.LongPoint; +import org.apache.lucene.document.NumericDocValuesField; +import org.apache.lucene.document.SortedDocValuesField; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.document.SortedSetDocValuesField; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.SortedNumericSortField; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.DocVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.AnyOperatorTestCase; +import org.elasticsearch.compute.operator.Driver; +import org.elasticsearch.compute.operator.Operator; +import org.elasticsearch.compute.operator.OperatorTestCase; +import org.elasticsearch.compute.operator.TestResultPageSinkOperator; +import org.elasticsearch.core.CheckedFunction; +import org.elasticsearch.core.IOUtils; +import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; +import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.mapper.KeywordFieldMapper; +import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; +import org.junit.After; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; +import java.util.function.Function; + +import static org.hamcrest.Matchers.either; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +public class TimeSeriesSortedSourceOperatorTests extends AnyOperatorTestCase { + + private IndexReader reader; + private final Directory directory = newDirectory(); + + @After + public void cleanup() throws IOException { + IOUtils.close(reader, directory); + } + + public void testSimple() { + int numTimeSeries = 3; + int numSamplesPerTS = 10; + long timestampStart = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2024-01-01T00:00:00Z"); + List results = runDriver(1024, 1024, randomBoolean(), numTimeSeries, numSamplesPerTS, timestampStart); + assertThat(results, hasSize(1)); + Page page = results.get(0); + assertThat(page.getBlockCount(), equalTo(5)); + + DocVector docVector = (DocVector) page.getBlock(0).asVector(); + assertThat(docVector.getPositionCount(), equalTo(numTimeSeries * numSamplesPerTS)); + + IntVector tsidVector = (IntVector) page.getBlock(1).asVector(); + assertThat(tsidVector.getPositionCount(), equalTo(numTimeSeries * numSamplesPerTS)); + + LongVector timestampVector = (LongVector) page.getBlock(2).asVector(); + assertThat(timestampVector.getPositionCount(), equalTo(numTimeSeries * numSamplesPerTS)); + + LongVector voltageVector = (LongVector) page.getBlock(3).asVector(); + assertThat(voltageVector.getPositionCount(), equalTo(numTimeSeries * numSamplesPerTS)); + + BytesRefVector hostnameVector = (BytesRefVector) page.getBlock(4).asVector(); + assertThat(hostnameVector.getPositionCount(), equalTo(numTimeSeries * numSamplesPerTS)); + + int offset = 0; + for (int expectedTsidOrd = 0; expectedTsidOrd < numTimeSeries; expectedTsidOrd++) { + String expectedHostname = String.format(Locale.ROOT, "host-%02d", expectedTsidOrd); + long expectedVoltage = 5L + expectedTsidOrd; + for (int j = 0; j < numSamplesPerTS; j++) { + long expectedTimestamp = timestampStart + ((numSamplesPerTS - j - 1) * 10_000L); + + assertThat(docVector.shards().getInt(offset), equalTo(0)); + assertThat(voltageVector.getLong(offset), equalTo(expectedVoltage)); + assertThat(hostnameVector.getBytesRef(offset, new BytesRef()).utf8ToString(), equalTo(expectedHostname)); + assertThat(tsidVector.getInt(offset), equalTo(expectedTsidOrd)); + assertThat(timestampVector.getLong(offset), equalTo(expectedTimestamp)); + offset++; + } + } + } + + public void testMaxPageSize() { + int numTimeSeries = 3; + int numSamplesPerTS = 10; + long timestampStart = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2024-01-01T00:00:00Z"); + List results = runDriver(1024, 1, randomBoolean(), numTimeSeries, numSamplesPerTS, timestampStart); + // A time series shouldn't be split over multiple pages. + assertThat(results, hasSize(numTimeSeries)); + for (int i = 0; i < numTimeSeries; i++) { + Page page = results.get(i); + assertThat(page.getBlockCount(), equalTo(5)); + + DocVector docVector = (DocVector) page.getBlock(0).asVector(); + assertThat(docVector.getPositionCount(), equalTo(numSamplesPerTS)); + + IntVector tsidVector = (IntVector) page.getBlock(1).asVector(); + assertThat(tsidVector.getPositionCount(), equalTo(numSamplesPerTS)); + + LongVector timestampVector = (LongVector) page.getBlock(2).asVector(); + assertThat(timestampVector.getPositionCount(), equalTo(numSamplesPerTS)); + + LongVector voltageVector = (LongVector) page.getBlock(3).asVector(); + assertThat(voltageVector.getPositionCount(), equalTo(numSamplesPerTS)); + + BytesRefVector hostnameVector = (BytesRefVector) page.getBlock(4).asVector(); + assertThat(hostnameVector.getPositionCount(), equalTo(numSamplesPerTS)); + + int offset = 0; + int expectedTsidOrd = i; + String expectedHostname = String.format(Locale.ROOT, "host-%02d", expectedTsidOrd); + long expectedVoltage = 5L + expectedTsidOrd; + for (int j = 0; j < numSamplesPerTS; j++) { + long expectedTimestamp = timestampStart + ((numSamplesPerTS - j - 1) * 10_000L); + + assertThat(docVector.shards().getInt(offset), equalTo(0)); + assertThat(voltageVector.getLong(offset), equalTo(expectedVoltage)); + assertThat(hostnameVector.getBytesRef(offset, new BytesRef()).utf8ToString(), equalTo(expectedHostname)); + assertThat(tsidVector.getInt(offset), equalTo(expectedTsidOrd)); + assertThat(timestampVector.getLong(offset), equalTo(expectedTimestamp)); + offset++; + } + } + } + + public void testLimit() { + int numTimeSeries = 3; + int numSamplesPerTS = 10; + int limit = 1; + long timestampStart = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2024-01-01T00:00:00Z"); + List results = runDriver(limit, 1024, randomBoolean(), numTimeSeries, numSamplesPerTS, timestampStart); + assertThat(results, hasSize(1)); + Page page = results.get(0); + assertThat(page.getBlockCount(), equalTo(5)); + + DocVector docVector = (DocVector) page.getBlock(0).asVector(); + assertThat(docVector.getPositionCount(), equalTo(limit)); + + IntVector tsidVector = (IntVector) page.getBlock(1).asVector(); + assertThat(tsidVector.getPositionCount(), equalTo(limit)); + + LongVector timestampVector = (LongVector) page.getBlock(2).asVector(); + assertThat(timestampVector.getPositionCount(), equalTo(limit)); + + LongVector voltageVector = (LongVector) page.getBlock(3).asVector(); + assertThat(voltageVector.getPositionCount(), equalTo(limit)); + + BytesRefVector hostnameVector = (BytesRefVector) page.getBlock(4).asVector(); + assertThat(hostnameVector.getPositionCount(), equalTo(limit)); + + assertThat(docVector.shards().getInt(0), equalTo(0)); + assertThat(voltageVector.getLong(0), equalTo(5L)); + assertThat(hostnameVector.getBytesRef(0, new BytesRef()).utf8ToString(), equalTo("host-00")); + assertThat(tsidVector.getInt(0), equalTo(0)); + assertThat(timestampVector.getLong(0), equalTo(timestampStart + ((numSamplesPerTS - 1) * 10_000L))); + } + + public void testRandom() { + int numDocs = 1024; + var ctx = driverContext(); + long timestampStart = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2024-01-01T00:00:00Z"); + var timeSeriesFactory = createTimeSeriesSourceOperator(Integer.MAX_VALUE, Integer.MAX_VALUE, randomBoolean(), writer -> { + int commitEvery = 64; + long timestamp = timestampStart; + for (int i = 0; i < numDocs; i++) { + String hostname = String.format(Locale.ROOT, "host-%02d", i % 20); + int voltage = i % 5; + writeTS(writer, timestamp, new Object[] { "hostname", hostname }, new Object[] { "voltage", voltage }); + if (i % commitEvery == 0) { + writer.commit(); + } + timestamp += 10_000; + } + return numDocs; + }); + List results = new ArrayList<>(); + + var voltageField = new NumberFieldMapper.NumberFieldType("voltage", NumberFieldMapper.NumberType.LONG); + OperatorTestCase.runDriver( + new Driver( + ctx, + timeSeriesFactory.get(ctx), + List.of(ValuesSourceReaderOperatorTests.factory(reader, voltageField, ElementType.LONG).get(ctx)), + new TestResultPageSinkOperator(results::add), + () -> {} + ) + ); + OperatorTestCase.assertDriverContext(ctx); + assertThat(results, hasSize(1)); + Page page = results.get(0); + assertThat(page.getBlockCount(), equalTo(4)); + + DocVector docVector = (DocVector) page.getBlock(0).asVector(); + assertThat(docVector.getPositionCount(), equalTo(numDocs)); + + IntVector tsidVector = (IntVector) page.getBlock(1).asVector(); + assertThat(tsidVector.getPositionCount(), equalTo(numDocs)); + + LongVector timestampVector = (LongVector) page.getBlock(2).asVector(); + assertThat(timestampVector.getPositionCount(), equalTo(numDocs)); + + LongVector voltageVector = (LongVector) page.getBlock(3).asVector(); + assertThat(voltageVector.getPositionCount(), equalTo(numDocs)); + for (int i = 0; i < page.getBlockCount(); i++) { + assertThat(docVector.shards().getInt(0), equalTo(0)); + assertThat(voltageVector.getLong(i), either(greaterThanOrEqualTo(0L)).or(lessThanOrEqualTo(4L))); + assertThat(tsidVector.getInt(i), either(greaterThanOrEqualTo(0)).or(lessThan(20))); + assertThat(timestampVector.getLong(i), greaterThanOrEqualTo(timestampStart)); + } + } + + @Override + protected Operator.OperatorFactory simple() { + return createTimeSeriesSourceOperator(1, 1, false, writer -> { + long timestamp = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2024-01-01T00:00:00Z"); + writeTS(writer, timestamp, new Object[] { "hostname", "host-01" }, new Object[] { "voltage", 2 }); + return 1; + }); + } + + @Override + protected String expectedDescriptionOfSimple() { + return "TimeSeriesSortedSourceOperator[maxPageSize = 1, limit = 1]"; + } + + @Override + protected String expectedToStringOfSimple() { + return "Impl[maxPageSize=1, remainingDocs=1]"; + } + + List runDriver(int limit, int maxPageSize, boolean forceMerge, int numTimeSeries, int numSamplesPerTS, long timestampStart) { + var ctx = driverContext(); + var timeSeriesFactory = createTimeSeriesSourceOperator(limit, maxPageSize, forceMerge, writer -> { + long timestamp = timestampStart; + for (int i = 0; i < numSamplesPerTS; i++) { + for (int j = 0; j < numTimeSeries; j++) { + String hostname = String.format(Locale.ROOT, "host-%02d", j); + writeTS(writer, timestamp, new Object[] { "hostname", hostname }, new Object[] { "voltage", j + 5 }); + } + timestamp += 10_000; + writer.commit(); + } + return numTimeSeries * numSamplesPerTS; + }); + + List results = new ArrayList<>(); + var voltageField = new NumberFieldMapper.NumberFieldType("voltage", NumberFieldMapper.NumberType.LONG); + var hostnameField = new KeywordFieldMapper.KeywordFieldType("hostname"); + OperatorTestCase.runDriver( + new Driver( + ctx, + timeSeriesFactory.get(ctx), + List.of( + ValuesSourceReaderOperatorTests.factory(reader, voltageField, ElementType.LONG).get(ctx), + ValuesSourceReaderOperatorTests.factory(reader, hostnameField, ElementType.BYTES_REF).get(ctx) + ), + new TestResultPageSinkOperator(results::add), + () -> {} + ) + ); + OperatorTestCase.assertDriverContext(ctx); + return results; + } + + TimeSeriesSortedSourceOperatorFactory createTimeSeriesSourceOperator( + int limit, + int maxPageSize, + boolean forceMerge, + CheckedFunction indexingLogic + ) { + int numDocs; + Sort sort = new Sort( + new SortField(TimeSeriesIdFieldMapper.NAME, SortField.Type.STRING, false), + new SortedNumericSortField(DataStreamTimestampFieldMapper.DEFAULT_PATH, SortField.Type.LONG, true) + ); + try ( + RandomIndexWriter writer = new RandomIndexWriter( + random(), + directory, + newIndexWriterConfig().setIndexSort(sort).setMergePolicy(NoMergePolicy.INSTANCE) + ) + ) { + + numDocs = indexingLogic.apply(writer); + if (forceMerge) { + writer.forceMerge(1); + } + reader = writer.getReader(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + var ctx = new LuceneSourceOperatorTests.MockShardContext(reader, 0); + Function queryFunction = c -> new MatchAllDocsQuery(); + return TimeSeriesSortedSourceOperatorFactory.create( + Math.min(numDocs, limit), + Math.min(numDocs, maxPageSize), + 1, + List.of(ctx), + queryFunction + ); + } + + static void writeTS(RandomIndexWriter iw, long timestamp, Object[] dimensions, Object[] metrics) throws IOException { + final List fields = new ArrayList<>(); + fields.add(new SortedNumericDocValuesField(DataStreamTimestampFieldMapper.DEFAULT_PATH, timestamp)); + fields.add(new LongPoint(DataStreamTimestampFieldMapper.DEFAULT_PATH, timestamp)); + final TimeSeriesIdFieldMapper.TimeSeriesIdBuilder builder = new TimeSeriesIdFieldMapper.TimeSeriesIdBuilder(null); + for (int i = 0; i < dimensions.length; i += 2) { + if (dimensions[i + 1] instanceof Number n) { + builder.addLong(dimensions[i].toString(), n.longValue()); + } else { + builder.addString(dimensions[i].toString(), dimensions[i + 1].toString()); + fields.add(new SortedSetDocValuesField(dimensions[i].toString(), new BytesRef(dimensions[i + 1].toString()))); + } + } + for (int i = 0; i < metrics.length; i += 2) { + if (metrics[i + 1] instanceof Integer || metrics[i + 1] instanceof Long) { + fields.add(new NumericDocValuesField(metrics[i].toString(), ((Number) metrics[i + 1]).longValue())); + } else if (metrics[i + 1] instanceof Float) { + fields.add(new FloatDocValuesField(metrics[i].toString(), (float) metrics[i + 1])); + } else if (metrics[i + 1] instanceof Double) { + fields.add(new DoubleDocValuesField(metrics[i].toString(), (double) metrics[i + 1])); + } + } + // Use legacy tsid to make tests easier to understand: + fields.add(new SortedDocValuesField(TimeSeriesIdFieldMapper.NAME, builder.buildLegacyTsid().toBytesRef())); + iw.addDocument(fields); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorStatusTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorStatusTests.java index 1851f7ac948cc..5887da0bc466b 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorStatusTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorStatusTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.test.ESTestCase; import java.io.IOException; import java.util.Map; @@ -19,7 +20,7 @@ public class ValuesSourceReaderOperatorStatusTests extends AbstractWireSerializingTestCase { public static ValuesSourceReaderOperator.Status simple() { - return new ValuesSourceReaderOperator.Status(Map.of("ReaderType", 3), 123); + return new ValuesSourceReaderOperator.Status(Map.of("ReaderType", 3), 1022323, 123); } public static String simpleToJson() { @@ -28,6 +29,8 @@ public static String simpleToJson() { "readers_built" : { "ReaderType" : 3 }, + "process_nanos" : 1022323, + "process_time" : "1ms", "pages_processed" : 123 }"""; } @@ -43,7 +46,7 @@ protected Writeable.Reader instanceReader() { @Override public ValuesSourceReaderOperator.Status createTestInstance() { - return new ValuesSourceReaderOperator.Status(randomReadersBuilt(), between(0, Integer.MAX_VALUE)); + return new ValuesSourceReaderOperator.Status(randomReadersBuilt(), randomNonNegativeLong(), randomNonNegativeInt()); } private Map randomReadersBuilt() { @@ -57,19 +60,15 @@ private Map randomReadersBuilt() { @Override protected ValuesSourceReaderOperator.Status mutateInstance(ValuesSourceReaderOperator.Status instance) throws IOException { - switch (between(0, 1)) { - case 0: - return new ValuesSourceReaderOperator.Status( - randomValueOtherThan(instance.readersBuilt(), this::randomReadersBuilt), - instance.pagesProcessed() - ); - case 1: - return new ValuesSourceReaderOperator.Status( - instance.readersBuilt(), - randomValueOtherThan(instance.pagesProcessed(), () -> between(0, Integer.MAX_VALUE)) - ); - default: - throw new UnsupportedOperationException(); + Map readersBuilt = instance.readersBuilt(); + long processNanos = instance.processNanos(); + int pagesProcessed = instance.pagesProcessed(); + switch (between(0, 2)) { + case 0 -> readersBuilt = randomValueOtherThan(readersBuilt, this::randomReadersBuilt); + case 1 -> processNanos = randomValueOtherThan(processNanos, ESTestCase::randomNonNegativeLong); + case 2 -> pagesProcessed = randomValueOtherThan(pagesProcessed, ESTestCase::randomNonNegativeInt); + default -> throw new UnsupportedOperationException(); } + return new ValuesSourceReaderOperator.Status(readersBuilt, processNanos, pagesProcessed); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AbstractPageMappingOperatorStatusTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AbstractPageMappingOperatorStatusTests.java index c72e87bb96a81..3c04e6e5a9f57 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AbstractPageMappingOperatorStatusTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AbstractPageMappingOperatorStatusTests.java @@ -16,16 +16,20 @@ public class AbstractPageMappingOperatorStatusTests extends AbstractWireSerializingTestCase { public static AbstractPageMappingOperator.Status simple() { - return new AbstractPageMappingOperator.Status(123); + return new AbstractPageMappingOperator.Status(200012, 123); } public static String simpleToJson() { return """ - {"pages_processed":123}"""; + { + "process_nanos" : 200012, + "process_time" : "200micros", + "pages_processed" : 123 + }"""; } public void testToXContent() { - assertThat(Strings.toString(simple()), equalTo(simpleToJson())); + assertThat(Strings.toString(simple(), true, true), equalTo(simpleToJson())); } @Override @@ -35,11 +39,18 @@ protected Writeable.Reader instanceReader() @Override public AbstractPageMappingOperator.Status createTestInstance() { - return new AbstractPageMappingOperator.Status(randomNonNegativeInt()); + return new AbstractPageMappingOperator.Status(randomNonNegativeLong(), randomNonNegativeInt()); } @Override protected AbstractPageMappingOperator.Status mutateInstance(AbstractPageMappingOperator.Status instance) { - return new AbstractPageMappingOperator.Status(randomValueOtherThan(instance.pagesProcessed(), ESTestCase::randomNonNegativeInt)); + long processNanos = instance.processNanos(); + int pagesProcessed = instance.pagesProcessed(); + switch (between(0, 1)) { + case 0 -> processNanos = randomValueOtherThan(processNanos, ESTestCase::randomNonNegativeLong); + case 1 -> pagesProcessed = randomValueOtherThan(pagesProcessed, ESTestCase::randomNonNegativeInt); + default -> throw new UnsupportedOperationException(); + } + return new AbstractPageMappingOperator.Status(processNanos, pagesProcessed); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AggregationOperatorStatusTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AggregationOperatorStatusTests.java new file mode 100644 index 0000000000000..5d17538ee85ae --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AggregationOperatorStatusTests.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.equalTo; + +public class AggregationOperatorStatusTests extends AbstractWireSerializingTestCase { + public static AggregationOperator.Status simple() { + return new AggregationOperator.Status(200012, 123); + } + + public static String simpleToJson() { + return """ + { + "aggregation_nanos" : 200012, + "aggregation_time" : "200micros", + "pages_processed" : 123 + }"""; + } + + public void testToXContent() { + assertThat(Strings.toString(simple(), true, true), equalTo(simpleToJson())); + } + + @Override + protected Writeable.Reader instanceReader() { + return AggregationOperator.Status::new; + } + + @Override + public AggregationOperator.Status createTestInstance() { + return new AggregationOperator.Status(randomNonNegativeLong(), randomNonNegativeInt()); + } + + @Override + protected AggregationOperator.Status mutateInstance(AggregationOperator.Status instance) { + long aggregationNanos = instance.aggregationNanos(); + int pagesProcessed = instance.pagesProcessed(); + switch (between(0, 1)) { + case 0 -> aggregationNanos = randomValueOtherThan(aggregationNanos, ESTestCase::randomNonNegativeLong); + case 1 -> pagesProcessed = randomValueOtherThan(pagesProcessed, ESTestCase::randomNonNegativeInt); + default -> throw new UnsupportedOperationException(); + } + return new AggregationOperator.Status(aggregationNanos, pagesProcessed); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorStatusTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorStatusTests.java new file mode 100644 index 0000000000000..ab2dcc5e6c443 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorStatusTests.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; + +public class AsyncOperatorStatusTests extends AbstractWireSerializingTestCase { + @Override + protected Writeable.Reader instanceReader() { + return AsyncOperator.Status::new; + } + + @Override + protected AsyncOperator.Status createTestInstance() { + return new AsyncOperator.Status( + randomNonNegativeLong(), + randomNonNegativeLong(), + randomLongBetween(1, TimeValue.timeValueHours(1).millis()) + ); + } + + @Override + protected AsyncOperator.Status mutateInstance(AsyncOperator.Status in) throws IOException { + int field = randomIntBetween(0, 2); + return switch (field) { + case 0 -> new AsyncOperator.Status( + randomValueOtherThan(in.receivedPages(), ESTestCase::randomNonNegativeLong), + in.completedPages(), + in.totalTimeInMillis() + ); + case 1 -> new AsyncOperator.Status( + in.receivedPages(), + randomValueOtherThan(in.completedPages(), ESTestCase::randomNonNegativeLong), + in.totalTimeInMillis() + ); + case 2 -> new AsyncOperator.Status( + in.receivedPages(), + in.completedPages(), + randomValueOtherThan(in.totalTimeInMillis(), ESTestCase::randomNonNegativeLong) + ); + default -> throw new AssertionError("unknown "); + }; + } + + public void testToXContent() { + var status = new AsyncOperator.Status(100, 50, TimeValue.timeValueSeconds(10).millis()); + String json = Strings.toString(status, true, true); + assertThat(json, equalTo(""" + { + "received_pages" : 100, + "completed_pages" : 50, + "total_time_in_millis" : 10000, + "total_time" : "10s" + }""")); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverProfileTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverProfileTests.java index ec9952cdce022..86655bd3b7f73 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverProfileTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverProfileTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.compute.lucene.ValuesSourceReaderOperatorStatusTests; import org.elasticsearch.compute.operator.exchange.ExchangeSinkOperator; import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.test.ESTestCase; import java.io.IOException; import java.util.List; @@ -25,6 +26,9 @@ public class DriverProfileTests extends AbstractWireSerializingTestCase { public void testToXContent() { DriverProfile status = new DriverProfile( + 10012, + 10000, + 12, List.of( new DriverStatus.OperatorStatus("LuceneSource", LuceneSourceOperatorStatusTests.simple()), new DriverStatus.OperatorStatus("ValuesSourceReader", ValuesSourceReaderOperatorStatusTests.simple()) @@ -32,6 +36,11 @@ public void testToXContent() { ); assertThat(Strings.toString(status, true, true), equalTo(""" { + "took_nanos" : 10012, + "took_time" : "10micros", + "cpu_nanos" : 10000, + "cpu_time" : "10micros", + "iterations" : 12, "operators" : [ { "operator" : "LuceneSource", @@ -56,13 +65,28 @@ protected Writeable.Reader instanceReader() { @Override protected DriverProfile createTestInstance() { - return new DriverProfile(DriverStatusTests.randomOperatorStatuses()); + return new DriverProfile( + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + DriverStatusTests.randomOperatorStatuses() + ); } @Override protected DriverProfile mutateInstance(DriverProfile instance) throws IOException { - var operators = randomValueOtherThan(instance.operators(), DriverStatusTests::randomOperatorStatuses); - return new DriverProfile(operators); + long tookNanos = instance.tookNanos(); + long cpuNanos = instance.cpuNanos(); + long iterations = instance.iterations(); + var operators = instance.operators(); + switch (between(0, 3)) { + case 0 -> tookNanos = randomValueOtherThan(tookNanos, ESTestCase::randomNonNegativeLong); + case 1 -> cpuNanos = randomValueOtherThan(cpuNanos, ESTestCase::randomNonNegativeLong); + case 2 -> iterations = randomValueOtherThan(iterations, ESTestCase::randomNonNegativeLong); + case 3 -> operators = randomValueOtherThan(operators, DriverStatusTests::randomOperatorStatuses); + default -> throw new UnsupportedOperationException(); + } + return new DriverProfile(tookNanos, cpuNanos, iterations, operators); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverStatusTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverStatusTests.java index c10bcf8d49ca4..e82cbb831cff2 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverStatusTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverStatusTests.java @@ -31,7 +31,10 @@ public class DriverStatusTests extends AbstractWireSerializingTestCase instanceReader() { @Override protected DriverStatus createTestInstance() { - return new DriverStatus(randomSessionId(), randomLong(), randomStatus(), randomOperatorStatuses(), randomOperatorStatuses()); + return new DriverStatus( + randomSessionId(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomStatus(), + randomOperatorStatuses(), + randomOperatorStatuses() + ); } private String randomSessionId() { @@ -104,30 +120,25 @@ private static DriverStatus.OperatorStatus randomOperatorStatus() { @Override protected DriverStatus mutateInstance(DriverStatus instance) throws IOException { var sessionId = instance.sessionId(); + long started = instance.started(); long lastUpdated = instance.lastUpdated(); + long cpuNanos = instance.cpuNanos(); + long iterations = instance.iterations(); var status = instance.status(); var completedOperators = instance.completedOperators(); var activeOperators = instance.activeOperators(); - switch (between(0, 4)) { - case 0: - sessionId = randomValueOtherThan(sessionId, this::randomSessionId); - break; - case 1: - lastUpdated = randomValueOtherThan(lastUpdated, ESTestCase::randomLong); - break; - case 2: - status = randomValueOtherThan(status, this::randomStatus); - break; - case 3: - completedOperators = randomValueOtherThan(completedOperators, DriverStatusTests::randomOperatorStatuses); - break; - case 4: - activeOperators = randomValueOtherThan(activeOperators, DriverStatusTests::randomOperatorStatuses); - break; - default: - throw new UnsupportedOperationException(); + switch (between(0, 7)) { + case 0 -> sessionId = randomValueOtherThan(sessionId, this::randomSessionId); + case 1 -> started = randomValueOtherThan(started, ESTestCase::randomNonNegativeLong); + case 2 -> lastUpdated = randomValueOtherThan(lastUpdated, ESTestCase::randomNonNegativeLong); + case 3 -> cpuNanos = randomValueOtherThan(cpuNanos, ESTestCase::randomNonNegativeLong); + case 4 -> iterations = randomValueOtherThan(iterations, ESTestCase::randomNonNegativeLong); + case 5 -> status = randomValueOtherThan(status, this::randomStatus); + case 6 -> completedOperators = randomValueOtherThan(completedOperators, DriverStatusTests::randomOperatorStatuses); + case 7 -> activeOperators = randomValueOtherThan(activeOperators, DriverStatusTests::randomOperatorStatuses); + default -> throw new UnsupportedOperationException(); } - return new DriverStatus(sessionId, lastUpdated, status, completedOperators, activeOperators); + return new DriverStatus(sessionId, started, lastUpdated, cpuNanos, iterations, status, completedOperators, activeOperators); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverTests.java index ba45db3c48299..694aaba4bd85e 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverTests.java @@ -35,10 +35,177 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.TimeUnit; +import java.util.function.LongSupplier; import static org.hamcrest.Matchers.equalTo; public class DriverTests extends ESTestCase { + /** + * Runs a driver to completion in a single call and asserts that the + * status and profile returned makes sense. + */ + public void testProfileAndStatusFinishInOneRound() { + DriverContext driverContext = driverContext(); + List inPages = randomList(1, 100, DriverTests::randomPage); + List outPages = new ArrayList<>(); + + long startEpoch = randomNonNegativeLong(); + long startNanos = randomLong(); + long waitTime = randomLongBetween(1000, 100000); + long tickTime = randomLongBetween(1, 10000); + + Driver driver = new Driver( + "unset", + startEpoch, + startNanos, + driverContext, + () -> "unset", + new CannedSourceOperator(inPages.iterator()), + List.of(), + new TestResultPageSinkOperator(outPages::add), + TimeValue.timeValueDays(10), + () -> {} + ); + + NowSupplier nowSupplier = new NowSupplier(startNanos, waitTime, tickTime); + + logger.info("status {}", driver.status()); + assertThat(driver.status().status(), equalTo(DriverStatus.Status.QUEUED)); + assertThat(driver.status().started(), equalTo(startEpoch)); + assertThat(driver.status().cpuNanos(), equalTo(0L)); + assertThat(driver.status().iterations(), equalTo(0L)); + driver.run(TimeValue.timeValueSeconds(Long.MAX_VALUE), Integer.MAX_VALUE, nowSupplier); + logger.info("status {}", driver.status()); + assertThat(driver.status().status(), equalTo(DriverStatus.Status.DONE)); + assertThat(driver.status().started(), equalTo(startEpoch)); + long sumRunningTime = tickTime * (nowSupplier.callCount - 1); + assertThat(driver.status().cpuNanos(), equalTo(sumRunningTime)); + assertThat(driver.status().iterations(), equalTo((long) inPages.size())); + + logger.info("profile {}", driver.profile()); + assertThat(driver.profile().tookNanos(), equalTo(waitTime + sumRunningTime)); + assertThat(driver.profile().cpuNanos(), equalTo(sumRunningTime)); + assertThat(driver.profile().iterations(), equalTo((long) inPages.size())); + } + + /** + * Runs the driver processing a single page at a time and asserting that + * the status reported between each call is sane. And that the profile + * returned after completion is sane. + */ + public void testProfileAndStatusOneIterationAtATime() { + DriverContext driverContext = driverContext(); + List inPages = randomList(2, 100, DriverTests::randomPage); + List outPages = new ArrayList<>(); + + long startEpoch = randomNonNegativeLong(); + long startNanos = randomLong(); + long waitTime = randomLongBetween(1000, 100000); + long tickTime = randomLongBetween(1, 10000); + + Driver driver = new Driver( + "unset", + startEpoch, + startNanos, + driverContext, + () -> "unset", + new CannedSourceOperator(inPages.iterator()), + List.of(), + new TestResultPageSinkOperator(outPages::add), + TimeValue.timeValueDays(10), + () -> {} + ); + + NowSupplier nowSupplier = new NowSupplier(startNanos, waitTime, tickTime); + for (int i = 0; i < inPages.size(); i++) { + logger.info("status {} {}", i, driver.status()); + assertThat(driver.status().status(), equalTo(i == 0 ? DriverStatus.Status.QUEUED : DriverStatus.Status.WAITING)); + assertThat(driver.status().started(), equalTo(startEpoch)); + assertThat(driver.status().iterations(), equalTo((long) i)); + assertThat(driver.status().cpuNanos(), equalTo(tickTime * i)); + driver.run(TimeValue.timeValueSeconds(Long.MAX_VALUE), 1, nowSupplier); + } + + logger.info("status {}", driver.status()); + assertThat(driver.status().status(), equalTo(DriverStatus.Status.DONE)); + assertThat(driver.status().started(), equalTo(startEpoch)); + assertThat(driver.status().iterations(), equalTo((long) inPages.size())); + assertThat(driver.status().cpuNanos(), equalTo(tickTime * inPages.size())); + + logger.info("profile {}", driver.profile()); + assertThat(driver.profile().tookNanos(), equalTo(waitTime + tickTime * (nowSupplier.callCount - 1))); + assertThat(driver.profile().cpuNanos(), equalTo(tickTime * inPages.size())); + assertThat(driver.profile().iterations(), equalTo((long) inPages.size())); + } + + /** + * Runs the driver processing a single page at a time via a synthetic timeout + * and asserting that the status reported between each call is sane. And that + * the profile returned after completion is sane. + */ + public void testProfileAndStatusTimeout() { + DriverContext driverContext = driverContext(); + List inPages = randomList(2, 100, DriverTests::randomPage); + List outPages = new ArrayList<>(); + + long startEpoch = randomNonNegativeLong(); + long startNanos = randomLong(); + long waitTime = randomLongBetween(1000, 100000); + long tickTime = randomLongBetween(1, 10000); + + Driver driver = new Driver( + "unset", + startEpoch, + startNanos, + driverContext, + () -> "unset", + new CannedSourceOperator(inPages.iterator()), + List.of(), + new TestResultPageSinkOperator(outPages::add), + TimeValue.timeValueNanos(tickTime), + () -> {} + ); + + NowSupplier nowSupplier = new NowSupplier(startNanos, waitTime, tickTime); + for (int i = 0; i < inPages.size(); i++) { + logger.info("status {} {}", i, driver.status()); + assertThat(driver.status().status(), equalTo(i == 0 ? DriverStatus.Status.QUEUED : DriverStatus.Status.WAITING)); + assertThat(driver.status().started(), equalTo(startEpoch)); + assertThat(driver.status().iterations(), equalTo((long) i)); + assertThat(driver.status().cpuNanos(), equalTo(tickTime * i)); + driver.run(TimeValue.timeValueNanos(tickTime), Integer.MAX_VALUE, nowSupplier); + } + + logger.info("status {}", driver.status()); + assertThat(driver.status().status(), equalTo(DriverStatus.Status.DONE)); + assertThat(driver.status().started(), equalTo(startEpoch)); + assertThat(driver.status().iterations(), equalTo((long) inPages.size())); + assertThat(driver.status().cpuNanos(), equalTo(tickTime * inPages.size())); + + logger.info("profile {}", driver.profile()); + assertThat(driver.profile().tookNanos(), equalTo(waitTime + tickTime * (nowSupplier.callCount - 1))); + assertThat(driver.profile().cpuNanos(), equalTo(tickTime * inPages.size())); + assertThat(driver.profile().iterations(), equalTo((long) inPages.size())); + } + + class NowSupplier implements LongSupplier { + private final long startNanos; + private final long waitTime; + private final long tickTime; + + private int callCount; + + NowSupplier(long startNanos, long waitTime, long tickTime) { + this.startNanos = startNanos; + this.waitTime = waitTime; + this.tickTime = tickTime; + } + + @Override + public long getAsLong() { + return startNanos + waitTime + tickTime * callCount++; + } + } public void testThreadContext() throws Exception { DriverContext driverContext = driverContext(); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashAggregationOperatorStatusTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashAggregationOperatorStatusTests.java new file mode 100644 index 0000000000000..245ae171c630b --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashAggregationOperatorStatusTests.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.equalTo; + +public class HashAggregationOperatorStatusTests extends AbstractWireSerializingTestCase { + public static HashAggregationOperator.Status simple() { + return new HashAggregationOperator.Status(500012, 200012, 123); + } + + public static String simpleToJson() { + return """ + { + "hash_nanos" : 500012, + "hash_time" : "500micros", + "aggregation_nanos" : 200012, + "aggregation_time" : "200micros", + "pages_processed" : 123 + }"""; + } + + public void testToXContent() { + assertThat(Strings.toString(simple(), true, true), equalTo(simpleToJson())); + } + + @Override + protected Writeable.Reader instanceReader() { + return HashAggregationOperator.Status::new; + } + + @Override + public HashAggregationOperator.Status createTestInstance() { + return new HashAggregationOperator.Status(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeInt()); + } + + @Override + protected HashAggregationOperator.Status mutateInstance(HashAggregationOperator.Status instance) { + long hashNanos = instance.hashNanos(); + long aggregationNanos = instance.aggregationNanos(); + int pagesProcessed = instance.pagesProcessed(); + switch (between(0, 2)) { + case 0 -> hashNanos = randomValueOtherThan(hashNanos, ESTestCase::randomNonNegativeLong); + case 1 -> aggregationNanos = randomValueOtherThan(aggregationNanos, ESTestCase::randomNonNegativeLong); + case 2 -> pagesProcessed = randomValueOtherThan(pagesProcessed, ESTestCase::randomNonNegativeInt); + default -> throw new UnsupportedOperationException(); + } + return new HashAggregationOperator.Status(hashNanos, aggregationNanos, pagesProcessed); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java index 68a2bde0c2f6c..f8b53a9bcd3c0 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java @@ -284,6 +284,8 @@ public static void runDriver(List drivers) { drivers.add( new Driver( "dummy-session", + 0, + 0, new DriverContext(BigArrays.NON_RECYCLING_INSTANCE, TestBlockFactory.getNonBreakingInstance()), () -> "dummy-driver", new SequenceLongBlockSourceOperator( diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java index f45bda077da05..bdaa045633dc0 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java @@ -275,6 +275,8 @@ void runConcurrentTest( DriverContext dc = driverContext(); Driver d = new Driver( "test-session:1", + 0, + 0, dc, () -> description, seqNoGenerator.get(dc), @@ -291,6 +293,8 @@ void runConcurrentTest( DriverContext dc = driverContext(); Driver d = new Driver( "test-session:2", + 0, + 0, dc, () -> description, sourceOperator, diff --git a/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/FieldExtractorIT.java b/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/FieldExtractorIT.java new file mode 100644 index 0000000000000..8c1e47c29670a --- /dev/null +++ b/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/FieldExtractorIT.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.qa.mixed; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.elasticsearch.test.TestClustersThreadFilter; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.xpack.esql.qa.rest.FieldExtractorTestCase; +import org.junit.ClassRule; + +@ThreadLeakFilters(filters = TestClustersThreadFilter.class) +public class FieldExtractorIT extends FieldExtractorTestCase { + @ClassRule + public static ElasticsearchCluster cluster = Clusters.mixedVersionCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/Clusters.java b/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/Clusters.java new file mode 100644 index 0000000000000..4aa17801fa217 --- /dev/null +++ b/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/Clusters.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.qa.multi_node; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; + +public class Clusters { + public static ElasticsearchCluster testCluster() { + return ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .nodes(2) + .setting("xpack.security.enabled", "false") + .setting("xpack.license.self_generated.type", "trial") + .build(); + } +} diff --git a/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlSpecIT.java b/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlSpecIT.java index d73f66ab00107..67b916a815819 100644 --- a/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlSpecIT.java +++ b/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlSpecIT.java @@ -8,19 +8,13 @@ package org.elasticsearch.xpack.esql.qa.multi_node; import org.elasticsearch.test.cluster.ElasticsearchCluster; -import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase; import org.elasticsearch.xpack.ql.CsvSpecReader.CsvTestCase; import org.junit.ClassRule; public class EsqlSpecIT extends EsqlSpecTestCase { @ClassRule - public static ElasticsearchCluster cluster = ElasticsearchCluster.local() - .distribution(DistributionType.DEFAULT) - .nodes(2) - .setting("xpack.security.enabled", "false") - .setting("xpack.license.self_generated.type", "trial") - .build(); + public static ElasticsearchCluster cluster = Clusters.testCluster(); @Override protected String getTestRestCluster() { diff --git a/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/FieldExtractorIT.java b/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/FieldExtractorIT.java new file mode 100644 index 0000000000000..bcb83a31f7641 --- /dev/null +++ b/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/FieldExtractorIT.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.qa.multi_node; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.elasticsearch.test.TestClustersThreadFilter; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.xpack.esql.qa.rest.FieldExtractorTestCase; +import org.junit.ClassRule; + +@ThreadLeakFilters(filters = TestClustersThreadFilter.class) +public class FieldExtractorIT extends FieldExtractorTestCase { + @ClassRule + public static ElasticsearchCluster cluster = Clusters.testCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/x-pack/plugin/esql/qa/server/multi-node/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlClientYamlIT.java b/x-pack/plugin/esql/qa/server/multi-node/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlClientYamlIT.java index 5a615def1186f..a90cce0a566e7 100644 --- a/x-pack/plugin/esql/qa/server/multi-node/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlClientYamlIT.java +++ b/x-pack/plugin/esql/qa/server/multi-node/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlClientYamlIT.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.qa.mixed; +package org.elasticsearch.xpack.esql.qa.multi_node; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/FieldExtractorIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/FieldExtractorIT.java new file mode 100644 index 0000000000000..695db7ddf4c3d --- /dev/null +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/FieldExtractorIT.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.qa.single_node; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.elasticsearch.test.TestClustersThreadFilter; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.xpack.esql.qa.rest.FieldExtractorTestCase; +import org.junit.ClassRule; + +@ThreadLeakFilters(filters = TestClustersThreadFilter.class) +public class FieldExtractorIT extends FieldExtractorTestCase { + @ClassRule + public static ElasticsearchCluster cluster = Clusters.testCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java index c7727d40d25f2..6743657e86874 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java @@ -185,8 +185,7 @@ public void testIncompatibleMappingsErrors() throws IOException { assertException("from test_alias | where _size is not null | limit 1", "Unknown column [_size]"); assertException( "from test_alias | where message.hash is not null | limit 1", - "Cannot use field [message.hash] due to ambiguities", - "incompatible types: [integer] in [index2], [murmur3] in [index1]" + "Cannot use field [message.hash] with unsupported type [murmur3]" ); assertException( "from index1 | where message.hash is not null | limit 1", diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/TSDBRestEsqlIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/TSDBRestEsqlIT.java new file mode 100644 index 0000000000000..b7ab7b623d460 --- /dev/null +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/TSDBRestEsqlIT.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.esql.qa.single_node; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.apache.http.util.EntityUtils; +import org.elasticsearch.Build; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.TestClustersThreadFilter; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.esql.CsvTestsDataLoader; +import org.elasticsearch.xpack.esql.qa.rest.RestEsqlTestCase; +import org.junit.ClassRule; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xpack.esql.qa.rest.RestEsqlTestCase.runEsqlSync; + +/** + * A dedicated test suite for testing time series esql functionality. + * This while the functionality is gated behind a query pragma. + */ +@ThreadLeakFilters(filters = TestClustersThreadFilter.class) +public class TSDBRestEsqlIT extends ESRestTestCase { + @ClassRule + public static ElasticsearchCluster cluster = Clusters.testCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + public void testTimeSeriesQuerying() throws IOException { + assumeTrue("time series querying relies on query pragma", Build.current().isSnapshot()); + var settings = Settings.builder() + .loadFromStream("tsdb-settings.json", TSDBRestEsqlIT.class.getResourceAsStream("/tsdb-settings.json"), false) + .build(); + String mapping = CsvTestsDataLoader.readTextFile(TSDBRestEsqlIT.class.getResource("/tsdb-mapping.json")); + createIndex("k8s", settings, mapping); + + Request bulk = new Request("POST", "/k8s/_bulk"); + bulk.addParameter("refresh", "true"); + bulk.addParameter("filter_path", "errors"); + + String bulkBody = new String( + TSDBRestEsqlIT.class.getResourceAsStream("/tsdb-bulk-request.txt").readAllBytes(), + StandardCharsets.UTF_8 + ); + bulk.setJsonEntity(bulkBody); + Response response = client().performRequest(bulk); + assertEquals("{\"errors\":false}", EntityUtils.toString(response.getEntity(), StandardCharsets.UTF_8)); + + RestEsqlTestCase.RequestObjectBuilder builder = new RestEsqlTestCase.RequestObjectBuilder().query( + "FROM k8s | KEEP k8s.pod.name, @timestamp" + ); + builder.pragmas(Settings.builder().put("time_series", true).build()); + Map result = runEsqlSync(builder); + @SuppressWarnings("unchecked") + List> columns = (List>) result.get("columns"); + assertEquals(2, columns.size()); + assertEquals("k8s.pod.name", columns.get(0).get("name")); + assertEquals("@timestamp", columns.get(1).get("name")); + + // Note that _tsid is a hashed value, so tsid no longer is sorted lexicographically. + @SuppressWarnings("unchecked") + List> values = (List>) result.get("values"); + assertEquals(8, values.size()); + assertEquals("hamster", values.get(0).get(0)); + assertEquals("2021-04-29T17:29:22.470Z", values.get(0).get(1)); + assertEquals("hamster", values.get(1).get(0)); + assertEquals("2021-04-29T17:29:12.470Z", values.get(1).get(1)); + + assertEquals("rat", values.get(2).get(0)); + assertEquals("2021-04-29T17:29:22.470Z", values.get(2).get(1)); + assertEquals("rat", values.get(3).get(0)); + assertEquals("2021-04-29T17:29:12.470Z", values.get(3).get(1)); + + assertEquals("cow", values.get(4).get(0)); + assertEquals("2021-04-29T17:29:22.470Z", values.get(4).get(1)); + assertEquals("cow", values.get(5).get(0)); + assertEquals("2021-04-29T17:29:12.470Z", values.get(5).get(1)); + + assertEquals("cat", values.get(6).get(0)); + assertEquals("2021-04-29T17:29:22.470Z", values.get(6).get(1)); + assertEquals("cat", values.get(7).get(0)); + assertEquals("2021-04-29T17:29:12.470Z", values.get(7).get(1)); + } +} diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/resources/tsdb-bulk-request.txt b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/resources/tsdb-bulk-request.txt new file mode 100644 index 0000000000000..e7ec37c14a072 --- /dev/null +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/resources/tsdb-bulk-request.txt @@ -0,0 +1,17 @@ +{"create": {}} +{"@timestamp": "2021-04-29T17:29:12.470Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "network": {"tx": 2001818691, "rx": 802133794},"cpu": {"limit": 0.3787411612903226, "nanocores": 35222928, "node": 0.048845732}}}} +{"create": {}} +{"@timestamp": "2021-04-29T17:29:12.470Z", "metricset": "pod", "k8s": {"pod": {"name": "hamster", "uid":"947e4ced-1786-4e53-9e0c-5c447e959508", "network": {"tx": 2005177954, "rx": 801479970},"cpu": {"limit": 0.5786461612903226, "nanocores": 25222928, "node": 0.505805732}}}} +{"create": {}} +{"@timestamp": "2021-04-29T17:29:12.470Z", "metricset": "pod", "k8s": {"pod": {"name": "cow", "uid":"947e4ced-1786-4e53-9e0c-5c447e959509", "network": {"tx": 2006223737, "rx": 802337279},"cpu": {"limit": 0.5787451612903226, "nanocores": 55252928, "node": 0.606805732}}}} +{"create": {}} +{"@timestamp": "2021-04-29T17:29:12.470Z", "metricset": "pod", "k8s": {"pod": {"name": "rat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959510", "network": {"tx": 2012916202, "rx": 803685721},"cpu": {"limit": 0.6786461612903226, "nanocores": 75227928, "node": 0.058855732}}}} +{"create": {}} +{"@timestamp": "2021-04-29T17:29:22.470Z", "metricset": "pod", "k8s": {"pod": {"name": "rat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959510", "network": {"tx": 1434521831, "rx": 530575198},"cpu": {"limit": 0.7787411712903226, "nanocores": 75727928, "node": 0.068865732}}}} +{"create": {}} +{"@timestamp": "2021-04-29T17:29:22.470Z", "metricset": "pod", "k8s": {"pod": {"name": "cow", "uid":"947e4ced-1786-4e53-9e0c-5c447e959509", "network": {"tx": 1434577921, "rx": 530600088},"cpu": {"limit": 0.2782412612903226, "nanocores": 25222228, "node": 0.078875732}}}} +{"create": {}} +{"@timestamp": "2021-04-29T17:29:22.470Z", "metricset": "pod", "k8s": {"pod": {"name": "hamster", "uid":"947e4ced-1786-4e53-9e0c-5c447e959508", "network": {"tx": 1434587694, "rx": 530604797},"cpu": {"limit": 0.1717411612903226, "nanocores": 15121928, "node": 0.808805732}}}} +{"create": {}} +{"@timestamp": "2021-04-29T17:29:22.470Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "network": {"tx": 1434595272, "rx": 530605511},"cpu": {"limit": 0.8787481682903226, "nanocores": 95292928, "node": 0.908905732}}}} + diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/resources/tsdb-mapping.json b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/resources/tsdb-mapping.json new file mode 100644 index 0000000000000..78af243bac610 --- /dev/null +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/resources/tsdb-mapping.json @@ -0,0 +1,56 @@ +{ + "properties": { + "@timestamp": { + "type": "date" + }, + "metricset": { + "type": "keyword", + "time_series_dimension": true + }, + "k8s": { + "properties": { + "pod": { + "properties": { + "uid": { + "type": "keyword", + "time_series_dimension": true + }, + "name": { + "type": "keyword" + }, + "cpu": { + "properties": { + "limit": { + "type": "scaled_float", + "scaling_factor": 1000.0, + "time_series_metric": "gauge" + }, + "nanocores": { + "type": "long", + "time_series_metric": "gauge" + }, + "node": { + "type": "scaled_float", + "scaling_factor": 1000.0, + "time_series_metric": "gauge" + } + } + }, + "network": { + "properties": { + "rx": { + "type": "long", + "time_series_metric": "gauge" + }, + "tx": { + "type": "long", + "time_series_metric": "gauge" + } + } + } + } + } + } + } + } +} diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/resources/tsdb-settings.json b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/resources/tsdb-settings.json new file mode 100644 index 0000000000000..f84b1bc2a9dd1 --- /dev/null +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/resources/tsdb-settings.json @@ -0,0 +1,9 @@ +{ + "index": { + "mode": "time_series", + "routing_path": [ + "metricset", + "k8s.pod.uid" + ] + } +} diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/FieldExtractorTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/FieldExtractorTestCase.java new file mode 100644 index 0000000000000..39c21651a7e02 --- /dev/null +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/FieldExtractorTestCase.java @@ -0,0 +1,1465 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.qa.rest; + +import org.apache.http.util.EntityUtils; +import org.elasticsearch.Version; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.geo.GeometryTestUtils; +import org.elasticsearch.index.mapper.BlockLoader; +import org.elasticsearch.index.mapper.SourceFieldMapper; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.ListMatcher; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.hamcrest.Matcher; +import org.junit.Before; + +import java.io.IOException; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.TreeMap; +import java.util.function.Function; + +import static org.elasticsearch.test.ListMatcher.matchesList; +import static org.elasticsearch.test.MapMatcher.assertMap; +import static org.elasticsearch.test.MapMatcher.matchesMap; +import static org.elasticsearch.xpack.esql.qa.rest.RestEsqlTestCase.entityToMap; +import static org.elasticsearch.xpack.esql.qa.rest.RestEsqlTestCase.runEsqlSync; +import static org.hamcrest.Matchers.closeTo; +import static org.hamcrest.Matchers.containsString; + +/** + * Creates indices with many different mappings and fetches values from them to make sure + * we can do it. Think of this as an integration test for {@link BlockLoader} + * implementations and an integration test for field resolution. + * This is a port of a test with the same name on the SQL side. + */ +public abstract class FieldExtractorTestCase extends ESRestTestCase { + private static final Logger logger = LogManager.getLogger(FieldExtractorTestCase.class); + + @Before + public void notOld() { + assumeTrue( + "support changed pretty radically in 8.12 so we don't test against 8.11", + getCachedNodesVersions().stream().allMatch(v -> Version.fromString(v).onOrAfter(Version.V_8_12_0)) + ); + } + + public void testTextField() throws IOException { + textTest().test(randomAlphaOfLength(20)); + } + + private Test textTest() { + return new Test("text").randomStoreUnlessSynthetic(); + } + + public void testKeywordField() throws IOException { + Integer ignoreAbove = randomBoolean() ? null : between(10, 50); + int length = between(10, 50); + + String value = randomAlphaOfLength(length); + keywordTest().ignoreAbove(ignoreAbove).test(value, ignoredByIgnoreAbove(ignoreAbove, length) ? null : value); + } + + private Test keywordTest() { + return new Test("keyword").randomDocValuesAndStoreUnlessSynthetic(); + } + + public void testConstantKeywordField() throws IOException { + boolean specifyInMapping = randomBoolean(); + boolean specifyInDocument = randomBoolean(); + + String value = randomAlphaOfLength(20); + new Test("constant_keyword").expectedType("keyword") + .value(specifyInMapping ? value : null) + .test(specifyInDocument ? value : null, specifyInMapping || specifyInDocument ? value : null); + } + + public void testWildcardField() throws IOException { + Integer ignoreAbove = randomBoolean() ? null : between(10, 50); + int length = between(10, 50); + + String value = randomAlphaOfLength(length); + new Test("wildcard").expectedType("keyword") + .ignoreAbove(ignoreAbove) + .test(value, ignoredByIgnoreAbove(ignoreAbove, length) ? null : value); + } + + public void testLong() throws IOException { + long value = randomLong(); + longTest().test(randomBoolean() ? Long.toString(value) : value, value); + } + + public void testLongWithDecimalParts() throws IOException { + long value = randomLong(); + int decimalPart = between(1, 99); + BigDecimal withDecimals = new BigDecimal(value + "." + decimalPart); + /* + * It's possible to pass the BigDecimal here without converting to a string + * but that rounds in a different way, and I'm not quite able to reproduce it + * at the time. + */ + longTest().test(withDecimals.toString(), value); + } + + public void testLongMalformed() throws IOException { + longTest().forceIgnoreMalformed().test(randomAlphaOfLength(5), null); + } + + private Test longTest() { + return new Test("long").randomIgnoreMalformedUnlessSynthetic().randomDocValuesUnlessSynthetic(); + } + + public void testInt() throws IOException { + int value = randomInt(); + intTest().test(randomBoolean() ? Integer.toString(value) : value, value); + } + + public void testIntWithDecimalParts() throws IOException { + double value = randomDoubleBetween(Integer.MIN_VALUE, Integer.MAX_VALUE, true); + intTest().test(randomBoolean() ? Double.toString(value) : value, (int) value); + } + + public void testIntMalformed() throws IOException { + intTest().forceIgnoreMalformed().test(randomAlphaOfLength(5), null); + } + + private Test intTest() { + return new Test("integer").randomIgnoreMalformedUnlessSynthetic().randomDocValuesUnlessSynthetic(); + } + + public void testShort() throws IOException { + short value = randomShort(); + shortTest().test(randomBoolean() ? Short.toString(value) : value, (int) value); + } + + public void testShortWithDecimalParts() throws IOException { + double value = randomDoubleBetween(Short.MIN_VALUE, Short.MAX_VALUE, true); + shortTest().test(randomBoolean() ? Double.toString(value) : value, (int) value); + } + + public void testShortMalformed() throws IOException { + shortTest().forceIgnoreMalformed().test(randomAlphaOfLength(5), null); + } + + private Test shortTest() { + return new Test("short").expectedType("integer").randomIgnoreMalformedUnlessSynthetic().randomDocValuesUnlessSynthetic(); + } + + public void testByte() throws IOException { + byte value = randomByte(); + byteTest().test(Byte.toString(value), (int) value); + } + + public void testByteWithDecimalParts() throws IOException { + double value = randomDoubleBetween(Byte.MIN_VALUE, Byte.MAX_VALUE, true); + byteTest().test(randomBoolean() ? Double.toString(value) : value, (int) value); + } + + public void testByteMalformed() throws IOException { + byteTest().forceIgnoreMalformed().test(randomAlphaOfLength(5), null); + } + + private Test byteTest() { + return new Test("byte").expectedType("integer").randomIgnoreMalformedUnlessSynthetic().randomDocValuesUnlessSynthetic(); + } + + public void testUnsignedLong() throws IOException { + assumeTrue( + "order of fields in error message inconsistent before 8.14", + getCachedNodesVersions().stream().allMatch(v -> Version.fromString(v).onOrAfter(Version.V_8_14_0)) + ); + BigInteger value = randomUnsignedLong(); + new Test("unsigned_long").randomIgnoreMalformedUnlessSynthetic() + .randomDocValuesUnlessSynthetic() + .test( + randomBoolean() ? value.toString() : value, + value.compareTo(BigInteger.valueOf(Long.MAX_VALUE)) <= 0 ? value.longValue() : value + ); + } + + public void testUnsignedLongMalformed() throws IOException { + new Test("unsigned_long").forceIgnoreMalformed().randomDocValuesUnlessSynthetic().test(randomAlphaOfLength(5), null); + } + + public void testDouble() throws IOException { + double value = randomDouble(); + new Test("double").randomIgnoreMalformedUnlessSynthetic() + .randomDocValuesUnlessSynthetic() + .test(randomBoolean() ? Double.toString(value) : value, value); + } + + public void testFloat() throws IOException { + float value = randomFloat(); + new Test("float").expectedType("double") + .randomIgnoreMalformedUnlessSynthetic() + .randomDocValuesUnlessSynthetic() + .test(randomBoolean() ? Float.toString(value) : value, (double) value); + } + + public void testScaledFloat() throws IOException { + double value = randomBoolean() ? randomDoubleBetween(-Double.MAX_VALUE, Double.MAX_VALUE, true) : randomFloat(); + double scalingFactor = randomDoubleBetween(0, Double.MAX_VALUE, false); + new Test("scaled_float").expectedType("double") + .randomIgnoreMalformedUnlessSynthetic() + .randomDocValuesUnlessSynthetic() + .scalingFactor(scalingFactor) + .test(randomBoolean() ? Double.toString(value) : value, scaledFloatMatcher(scalingFactor, value)); + } + + private Matcher scaledFloatMatcher(double scalingFactor, double d) { + long encoded = Math.round(d * scalingFactor); + double decoded = encoded / scalingFactor; + return closeTo(decoded, Math.ulp(decoded)); + } + + public void testBoolean() throws IOException { + boolean value = randomBoolean(); + new Test("boolean").ignoreMalformed(randomBoolean()) + .randomDocValuesUnlessSynthetic() + .test(randomBoolean() ? Boolean.toString(value) : value, value); + } + + public void testIp() throws IOException { + ipTest().test(NetworkAddress.format(randomIp(randomBoolean()))); + } + + private Test ipTest() { + return new Test("ip").ignoreMalformed(randomBoolean()); + } + + public void testVersionField() throws IOException { + new Test("version").test(randomVersionString()); + } + + public void testGeoPoint() throws IOException { + assumeTrue( + "not supported until 8.13", + getCachedNodesVersions().stream().allMatch(v -> Version.fromString(v).onOrAfter(Version.V_8_13_0)) + ); + new Test("geo_point") + // TODO we should support loading geo_point from doc values if source isn't enabled + .sourceMode(randomValueOtherThanMany(s -> s.stored() == false, () -> randomFrom(SourceMode.values()))) + .ignoreMalformed(randomBoolean()) + .storeAndDocValues(randomBoolean(), randomBoolean()) + .test(GeometryTestUtils.randomPoint(false).toString()); + } + + public void testGeoShape() throws IOException { + assumeTrue( + "not supported until 8.13", + getCachedNodesVersions().stream().allMatch(v -> Version.fromString(v).onOrAfter(Version.V_8_13_0)) + ); + new Test("geo_shape") + // TODO if source isn't enabled how can we load *something*? It's just triangles, right? + .sourceMode(randomValueOtherThanMany(s -> s.stored() == false, () -> randomFrom(SourceMode.values()))) + .ignoreMalformed(randomBoolean()) + .storeAndDocValues(randomBoolean(), randomBoolean()) + // TODO pick supported random shapes + .test(GeometryTestUtils.randomPoint(false).toString()); + } + + public void testAliasToKeyword() throws IOException { + keywordTest().createAlias().test(randomAlphaOfLength(20)); + } + + public void testAliasToText() throws IOException { + textTest().createAlias().test(randomAlphaOfLength(20)); + } + + public void testAliasToInt() throws IOException { + intTest().createAlias().test(randomInt()); + } + + public void testFlattenedUnsupported() throws IOException { + new Test("flattened").createIndex("test", "flattened"); + index("test", """ + {"flattened": {"a": "foo"}}"""); + Map result = runEsqlSync(new RestEsqlTestCase.RequestObjectBuilder().query("FROM test* | LIMIT 2")); + + assertMap( + result, + matchesMap().entry("columns", List.of(columnInfo("flattened", "unsupported"))) + .entry("values", List.of(matchesList().item(null))) + ); + } + + public void testEmptyMapping() throws IOException { + createIndex("test", index -> {}); + index("test", """ + {}"""); + + ResponseException e = expectThrows( + ResponseException.class, + () -> runEsqlSync(new RestEsqlTestCase.RequestObjectBuilder().query("FROM test* | SORT missing | LIMIT 3")) + ); + String err = EntityUtils.toString(e.getResponse().getEntity()); + assertThat(err, containsString("Unknown column [missing]")); + + // TODO this is broken in main too + // Map result = runEsqlSync(new RestEsqlTestCase.RequestObjectBuilder().query("FROM test* | LIMIT 2")); + // assertMap( + // result, + // matchesMap().entry("columns", List.of(columnInfo("f", "unsupported"), columnInfo("f.raw", "unsupported"))) + // .entry("values", List.of(matchesList().item(null).item(null))) + // ); + } + + /** + *

    +     * "text_field": {
    +     *   "type": "text",
    +     *   "fields": {
    +     *     "raw": {
    +     *       "type": "keyword",
    +     *       "ignore_above": 10
    +     *     }
    +     *   }
    +     * }
    +     * 
    + */ + public void testTextFieldWithKeywordSubfield() throws IOException { + String value = randomAlphaOfLength(20); + Map result = new Test("text").storeAndDocValues(randomBoolean(), null).sub("raw", keywordTest()).roundTrip(value); + + assertMap( + result, + matchesMap().entry("columns", List.of(columnInfo("text_field", "text"), columnInfo("text_field.raw", "keyword"))) + .entry("values", List.of(matchesList().item(value).item(value))) + ); + } + + /** + *
    +     * "text_field": {
    +     *   "type": "text",
    +     *   "fields": {
    +     *     "int": {
    +     *       "type": "integer",
    +     *       "ignore_malformed": true/false
    +     *     }
    +     *   }
    +     * }
    +     * 
    + */ + public void testTextFieldWithIntegerSubfield() throws IOException { + int value = randomInt(); + Map result = textTest().sub("int", intTest()).roundTrip(value); + + assertMap( + result, + matchesMap().entry("columns", List.of(columnInfo("text_field", "text"), columnInfo("text_field.int", "integer"))) + .entry("values", List.of(matchesList().item(Integer.toString(value)).item(value))) + ); + } + + /** + *
    +     * "text_field": {
    +     *   "type": "text",
    +     *   "fields": {
    +     *     "int": {
    +     *       "type": "integer",
    +     *       "ignore_malformed": true
    +     *     }
    +     *   }
    +     * }
    +     * 
    + */ + public void testTextFieldWithIntegerSubfieldMalformed() throws IOException { + String value = randomAlphaOfLength(5); + Map result = textTest().sourceMode(SourceMode.DEFAULT).sub("int", intTest().ignoreMalformed(true)).roundTrip(value); + + assertMap( + result, + matchesMap().entry("columns", List.of(columnInfo("text_field", "text"), columnInfo("text_field.int", "integer"))) + .entry("values", List.of(matchesList().item(value).item(null))) + ); + } + + /** + *
    +     * "text_field": {
    +     *   "type": "text",
    +     *   "fields": {
    +     *     "ip": {
    +     *       "type": "ip",
    +     *       "ignore_malformed": true/false
    +     *     }
    +     *   }
    +     * }
    +     * 
    + */ + public void testTextFieldWithIpSubfield() throws IOException { + String value = NetworkAddress.format(randomIp(randomBoolean())); + Map result = textTest().sub("ip", ipTest()).roundTrip(value); + + assertMap( + result, + matchesMap().entry("columns", List.of(columnInfo("text_field", "text"), columnInfo("text_field.ip", "ip"))) + .entry("values", List.of(matchesList().item(value).item(value))) + ); + } + + /** + *
    +     * "text_field": {
    +     *   "type": "text",
    +     *   "fields": {
    +     *     "ip": {
    +     *       "type": "ip",
    +     *       "ignore_malformed": true
    +     *     }
    +     *   }
    +     * }
    +     * 
    + */ + public void testTextFieldWithIpSubfieldMalformed() throws IOException { + String value = randomAlphaOfLength(10); + Map result = textTest().sourceMode(SourceMode.DEFAULT).sub("ip", ipTest().ignoreMalformed(true)).roundTrip(value); + + assertMap( + result, + matchesMap().entry("columns", List.of(columnInfo("text_field", "text"), columnInfo("text_field.ip", "ip"))) + .entry("values", List.of(matchesList().item(value).item(null))) + ); + } + + /** + *
    +     * "integer_field": {
    +     *   "type": "integer",
    +     *   "ignore_malformed": true/false,
    +     *   "fields": {
    +     *     "str": {
    +     *       "type": "text/keyword"
    +     *     }
    +     *   }
    +     * }
    +     * 
    + */ + public void testIntFieldWithTextOrKeywordSubfield() throws IOException { + int value = randomInt(); + boolean text = randomBoolean(); + Map result = intTest().sub("str", text ? textTest() : keywordTest()).roundTrip(value); + + assertMap( + result, + matchesMap().entry( + "columns", + List.of(columnInfo("integer_field", "integer"), columnInfo("integer_field.str", text ? "text" : "keyword")) + ).entry("values", List.of(matchesList().item(value).item(Integer.toString(value)))) + ); + } + + /** + *
    +     * "integer_field": {
    +     *   "type": "integer",
    +     *   "ignore_malformed": true,
    +     *   "fields": {
    +     *     "str": {
    +     *       "type": "text/keyword"
    +     *     }
    +     *   }
    +     * }
    +     * 
    + */ + public void testIntFieldWithTextOrKeywordSubfieldMalformed() throws IOException { + String value = randomAlphaOfLength(5); + boolean text = randomBoolean(); + Map result = intTest().forceIgnoreMalformed().sub("str", text ? textTest() : keywordTest()).roundTrip(value); + + assertMap( + result, + matchesMap().entry( + "columns", + List.of(columnInfo("integer_field", "integer"), columnInfo("integer_field.str", text ? "text" : "keyword")) + ).entry("values", List.of(matchesList().item(null).item(value))) + ); + } + + /** + *
    +     * "ip_field": {
    +     *   "type": "ip",
    +     *   "ignore_malformed": true/false,
    +     *   "fields": {
    +     *     "str": {
    +     *       "type": "text/keyword"
    +     *     }
    +     *   }
    +     * }
    +     * 
    + */ + public void testIpFieldWithTextOrKeywordSubfield() throws IOException { + String value = NetworkAddress.format(randomIp(randomBoolean())); + boolean text = randomBoolean(); + Map result = ipTest().sub("str", text ? textTest() : keywordTest()).roundTrip(value); + + assertMap( + result, + matchesMap().entry("columns", List.of(columnInfo("ip_field", "ip"), columnInfo("ip_field.str", text ? "text" : "keyword"))) + .entry("values", List.of(matchesList().item(value).item(value))) + ); + } + + /** + *
    +     * "ip_field": {
    +     *   "type": "ip",
    +     *   "ignore_malformed": true,
    +     *   "fields": {
    +     *     "str": {
    +     *       "type": "text/keyword"
    +     *     }
    +     *   }
    +     * }
    +     * 
    + */ + public void testIpFieldWithTextOrKeywordSubfieldMalformed() throws IOException { + String value = randomAlphaOfLength(5); + boolean text = randomBoolean(); + Map result = ipTest().forceIgnoreMalformed().sub("str", text ? textTest() : keywordTest()).roundTrip(value); + + assertMap( + result, + matchesMap().entry("columns", List.of(columnInfo("ip_field", "ip"), columnInfo("ip_field.str", text ? "text" : "keyword"))) + .entry("values", List.of(matchesList().item(null).item(value))) + ); + } + + /** + *
    +     * "integer_field": {
    +     *   "type": "ip",
    +     *   "ignore_malformed": true/false,
    +     *   "fields": {
    +     *     "byte": {
    +     *       "type": "byte",
    +     *       "ignore_malformed": true/false
    +     *     }
    +     *   }
    +     * }
    +     * 
    + */ + public void testIntFieldWithByteSubfield() throws IOException { + byte value = randomByte(); + Map result = intTest().sub("byte", byteTest()).roundTrip(value); + + assertMap( + result, + matchesMap().entry("columns", List.of(columnInfo("integer_field", "integer"), columnInfo("integer_field.byte", "integer"))) + .entry("values", List.of(matchesList().item((int) value).item((int) value))) + ); + } + + /** + *
    +     * "integer_field": {
    +     *   "type": "integer",
    +     *   "ignore_malformed": true/false,
    +     *   "fields": {
    +     *     "byte": {
    +     *       "type": "byte",
    +     *       "ignore_malformed": true
    +     *     }
    +     *   }
    +     * }
    +     * 
    + */ + public void testIntFieldWithByteSubfieldTooBig() throws IOException { + int value = randomValueOtherThanMany((Integer v) -> (Byte.MIN_VALUE <= v) && (v <= Byte.MAX_VALUE), ESTestCase::randomInt); + Map result = intTest().sourceMode(SourceMode.DEFAULT) + .sub("byte", byteTest().ignoreMalformed(true)) + .roundTrip(value); + + assertMap( + result, + matchesMap().entry("columns", List.of(columnInfo("integer_field", "integer"), columnInfo("integer_field.byte", "integer"))) + .entry("values", List.of(matchesList().item(value).item(null))) + ); + } + + /** + *
    +     * "byte_field": {
    +     *   "type": "byte",
    +     *   "ignore_malformed": true/false,
    +     *   "fields": {
    +     *     "int": {
    +     *       "type": "int",
    +     *       "ignore_malformed": true/false
    +     *     }
    +     *   }
    +     * }
    +     * 
    + */ + public void testByteFieldWithIntSubfield() throws IOException { + byte value = randomByte(); + Map result = byteTest().sub("int", intTest()).roundTrip(value); + + assertMap( + result, + matchesMap().entry("columns", List.of(columnInfo("byte_field", "integer"), columnInfo("byte_field.int", "integer"))) + .entry("values", List.of(matchesList().item((int) value).item((int) value))) + ); + } + + /** + *
    +     * "byte_field": {
    +     *   "type": "byte",
    +     *   "ignore_malformed": true,
    +     *   "fields": {
    +     *     "int": {
    +     *       "type": "int",
    +     *       "ignore_malformed": true/false
    +     *     }
    +     *   }
    +     * }
    +     * 
    + */ + public void testByteFieldWithIntSubfieldTooBig() throws IOException { + int value = randomValueOtherThanMany((Integer v) -> (Byte.MIN_VALUE <= v) && (v <= Byte.MAX_VALUE), ESTestCase::randomInt); + Map result = byteTest().forceIgnoreMalformed().sub("int", intTest()).roundTrip(value); + + assertMap( + result, + matchesMap().entry("columns", List.of(columnInfo("byte_field", "integer"), columnInfo("byte_field.int", "integer"))) + .entry("values", List.of(matchesList().item(null).item(value))) + ); + } + + /** + * Two indices, one with: + *
    +     * "f": {
    +     *     "type": "keyword"
    +     * }
    +     * 
    + * and the other with + *
    +     * "f": {
    +     *     "type": "long"
    +     * }
    +     * 
    . + */ + public void testIncompatibleTypes() throws IOException { + keywordTest().createIndex("test1", "f"); + index("test1", """ + {"f": "f1"}"""); + longTest().createIndex("test2", "f"); + index("test2", """ + {"f": 1}"""); + + Map result = runEsqlSync(new RestEsqlTestCase.RequestObjectBuilder().query("FROM test*")); + assertMap( + result, + matchesMap().entry("columns", List.of(columnInfo("f", "unsupported"))) + .entry("values", List.of(matchesList().item(null), matchesList().item(null))) + ); + ResponseException e = expectThrows( + ResponseException.class, + () -> runEsqlSync(new RestEsqlTestCase.RequestObjectBuilder().query("FROM test* | SORT f | LIMIT 3")) + ); + String err = EntityUtils.toString(e.getResponse().getEntity()); + assertThat( + deyaml(err), + containsString( + "Cannot use field [f] due to ambiguities being mapped as [2] incompatible types: [keyword] in [test1], [long] in [test2]" + ) + ); + } + + /** + * Two indices, one with: + *
    +     * "file": {
    +     *     "type": "keyword"
    +     * }
    +     * 
    + * and the other with + *
    +     * "other_file": {
    +     *     "type": "keyword"
    +     * }
    +     * 
    . + */ + public void testDistinctInEachIndex() throws IOException { + keywordTest().createIndex("test1", "file"); + index("test1", """ + {"file": "f1"}"""); + keywordTest().createIndex("test2", "other"); + index("test2", """ + {"other": "o2"}"""); + + Map result = runEsqlSync(new RestEsqlTestCase.RequestObjectBuilder().query("FROM test* | SORT file, other")); + assertMap( + result, + matchesMap().entry("columns", List.of(columnInfo("file", "keyword"), columnInfo("other", "keyword"))) + .entry("values", List.of(matchesList().item("f1").item(null), matchesList().item(null).item("o2"))) + ); + } + + /** + * Two indices, one with: + *
    +     * "file": {
    +     *    "type": "keyword"
    +     * }
    +     * 
    + * and the other with + *
    +     * "file": {
    +     *    "type": "object",
    +     *    "properties": {
    +     *       "raw": {
    +     *          "type": "keyword"
    +     *       }
    +     *    }
    +     * }
    +     * 
    . + */ + public void testMergeKeywordAndObject() throws IOException { + assumeTrue( + "order of fields in error message inconsistent before 8.14", + getCachedNodesVersions().stream().allMatch(v -> Version.fromString(v).onOrAfter(Version.V_8_14_0)) + ); + keywordTest().createIndex("test1", "file"); + index("test1", """ + {"file": "f1"}"""); + createIndex("test2", index -> { + index.startObject("properties"); + { + index.startObject("file"); + { + index.field("type", "object"); + index.startObject("properties"); + { + index.startObject("raw").field("type", "keyword").endObject(); + } + index.endObject(); + } + index.endObject(); + } + index.endObject(); + }); + index("test2", """ + {"file": {"raw": "o2"}}"""); + + ResponseException e = expectThrows( + ResponseException.class, + () -> runEsqlSync(new RestEsqlTestCase.RequestObjectBuilder().query("FROM test* | SORT file, file.raw | LIMIT 3")) + ); + String err = EntityUtils.toString(e.getResponse().getEntity()); + assertThat( + deyaml(err), + containsString( + "Cannot use field [file] due to ambiguities" + + " being mapped as [2] incompatible types: [keyword] in [test1], [object] in [test2]" + ) + ); + + Map result = runEsqlSync(new RestEsqlTestCase.RequestObjectBuilder().query("FROM test* | SORT file.raw | LIMIT 2")); + assertMap( + result, + matchesMap().entry("columns", List.of(columnInfo("file", "unsupported"), columnInfo("file.raw", "keyword"))) + .entry("values", List.of(matchesList().item(null).item("o2"), matchesList().item(null).item(null))) + ); + } + + /** + * One index with an unsupported field and a supported sub-field. The supported sub-field + * is marked as unsupported because the parent is unsupported. Mapping: + *
    +     * "f": {
    +     *    "type": "ip_range"  ----- The type here doesn't matter, but it has to be one we don't support
    +     *    "fields": {
    +     *       "raw": {
    +     *          "type": "keyword"
    +     *       }
    +     *    }
    +     * }
    +     * 
    . + */ + public void testPropagateUnsupportedToSubFields() throws IOException { + createIndex("test", index -> { + index.startObject("properties"); + index.startObject("f"); + { + index.field("type", "ip_range"); + index.startObject("fields"); + { + index.startObject("raw").field("type", "keyword").endObject(); + } + index.endObject(); + } + index.endObject(); + index.endObject(); + }); + index("test", """ + {"f": "192.168.0.1/24"}"""); + + ResponseException e = expectThrows( + ResponseException.class, + () -> runEsqlSync(new RestEsqlTestCase.RequestObjectBuilder().query("FROM test* | SORT f, f.raw | LIMIT 3")) + ); + String err = EntityUtils.toString(e.getResponse().getEntity()); + assertThat(err, containsString("Cannot use field [f] with unsupported type [ip_range]")); + assertThat(err, containsString("Cannot use field [f.raw] with unsupported type [ip_range]")); + + Map result = runEsqlSync(new RestEsqlTestCase.RequestObjectBuilder().query("FROM test* | LIMIT 2")); + assertMap( + result, + matchesMap().entry("columns", List.of(columnInfo("f", "unsupported"), columnInfo("f.raw", "unsupported"))) + .entry("values", List.of(matchesList().item(null).item(null))) + ); + } + + /** + * Two indices, one with: + *
    +     * "f": {
    +     *    "type": "ip_range"  ----- The type here doesn't matter, but it has to be one we don't support
    +     * }
    +     * 
    + * and the other with + *
    +     * "f": {
    +     *    "type": "object",
    +     *    "properties": {
    +     *       "raw": {
    +     *          "type": "keyword"
    +     *       }
    +     *    }
    +     * }
    +     * 
    . + */ + public void testMergeUnsupportedAndObject() throws IOException { + assumeTrue( + "order of fields in error message inconsistent before 8.14", + getCachedNodesVersions().stream().allMatch(v -> Version.fromString(v).onOrAfter(Version.V_8_14_0)) + ); + createIndex("test1", index -> { + index.startObject("properties"); + index.startObject("f").field("type", "ip_range").endObject(); + index.endObject(); + }); + index("test1", """ + {"f": "192.168.0.1/24"}"""); + createIndex("test2", index -> { + index.startObject("properties"); + { + index.startObject("f"); + { + index.field("type", "object"); + index.startObject("properties"); + { + index.startObject("raw").field("type", "keyword").endObject(); + } + index.endObject(); + } + index.endObject(); + } + index.endObject(); + }); + index("test2", """ + {"f": {"raw": "o2"}}"""); + + ResponseException e = expectThrows( + ResponseException.class, + () -> runEsqlSync(new RestEsqlTestCase.RequestObjectBuilder().query("FROM test* | SORT f, f.raw | LIMIT 3")) + ); + String err = EntityUtils.toString(e.getResponse().getEntity()); + assertThat(err, containsString("Cannot use field [f] with unsupported type [ip_range]")); + assertThat(err, containsString("Cannot use field [f.raw] with unsupported type [ip_range]")); + + Map result = runEsqlSync(new RestEsqlTestCase.RequestObjectBuilder().query("FROM test* | LIMIT 2")); + assertMap( + result, + matchesMap().entry("columns", List.of(columnInfo("f", "unsupported"), columnInfo("f.raw", "unsupported"))) + .entry("values", List.of(matchesList().item(null).item(null), matchesList().item(null).item(null))) + ); + } + + /** + * Two indices, one with: + *
    +     * "emp_no": {
    +     *     "type": "integer"
    +     * }
    +     * 
    + * and the other with + *
    +     * "emp_no": {
    +     *     "type": "integer",
    +     *     "doc_values": false
    +     * }
    +     * 
    . + */ + public void testIntegerDocValuesConflict() throws IOException { + assumeTrue( + "order of fields in error message inconsistent before 8.14", + getCachedNodesVersions().stream().allMatch(v -> Version.fromString(v).onOrAfter(Version.V_8_14_0)) + ); + intTest().sourceMode(SourceMode.DEFAULT).storeAndDocValues(null, true).createIndex("test1", "emp_no"); + index("test1", """ + {"emp_no": 1}"""); + intTest().sourceMode(SourceMode.DEFAULT).storeAndDocValues(null, false).createIndex("test2", "emp_no"); + index("test2", """ + {"emp_no": 2}"""); + + Map result = runEsqlSync(new RestEsqlTestCase.RequestObjectBuilder().query("FROM test* | SORT emp_no | LIMIT 2")); + assertMap( + result, + matchesMap().entry("columns", List.of(columnInfo("emp_no", "integer"))) + .entry("values", List.of(matchesList().item(1), matchesList().item(2))) + ); + } + + /** + * Two indices, one with: + *
    +     * "emp_no": {
    +     *     "type": "long"
    +     * }
    +     * 
    + * and the other with + *
    +     * "emp_no": {
    +     *     "type": "integer"
    +     * }
    +     * 
    . + * + * In an ideal world we'd promote the {@code integer} to an {@code long} and just go. + */ + public void testLongIntegerConflict() throws IOException { + assumeTrue( + "order of fields in error message inconsistent before 8.14", + getCachedNodesVersions().stream().allMatch(v -> Version.fromString(v).onOrAfter(Version.V_8_14_0)) + ); + longTest().sourceMode(SourceMode.DEFAULT).createIndex("test1", "emp_no"); + index("test1", """ + {"emp_no": 1}"""); + intTest().sourceMode(SourceMode.DEFAULT).createIndex("test2", "emp_no"); + index("test2", """ + {"emp_no": 2}"""); + + ResponseException e = expectThrows( + ResponseException.class, + () -> runEsqlSync(new RestEsqlTestCase.RequestObjectBuilder().query("FROM test* | SORT emp_no | LIMIT 3")) + ); + String err = EntityUtils.toString(e.getResponse().getEntity()); + assertThat( + deyaml(err), + containsString( + "Cannot use field [emp_no] due to ambiguities being " + + "mapped as [2] incompatible types: [integer] in [test2], [long] in [test1]" + ) + ); + + Map result = runEsqlSync(new RestEsqlTestCase.RequestObjectBuilder().query("FROM test* | LIMIT 2")); + assertMap( + result, + matchesMap().entry("columns", List.of(columnInfo("emp_no", "unsupported"))) + .entry("values", List.of(matchesList().item(null), matchesList().item(null))) + ); + } + + /** + * Two indices, one with: + *
    +     * "emp_no": {
    +     *     "type": "integer"
    +     * }
    +     * 
    + * and the other with + *
    +     * "emp_no": {
    +     *     "type": "short"
    +     * }
    +     * 
    . + * + * In an ideal world we'd promote the {@code short} to an {@code integer} and just go. + */ + public void testIntegerShortConflict() throws IOException { + assumeTrue( + "order of fields in error message inconsistent before 8.14", + getCachedNodesVersions().stream().allMatch(v -> Version.fromString(v).onOrAfter(Version.V_8_14_0)) + ); + intTest().sourceMode(SourceMode.DEFAULT).createIndex("test1", "emp_no"); + index("test1", """ + {"emp_no": 1}"""); + shortTest().sourceMode(SourceMode.DEFAULT).createIndex("test2", "emp_no"); + index("test2", """ + {"emp_no": 2}"""); + + ResponseException e = expectThrows( + ResponseException.class, + () -> runEsqlSync(new RestEsqlTestCase.RequestObjectBuilder().query("FROM test* | SORT emp_no | LIMIT 3")) + ); + String err = EntityUtils.toString(e.getResponse().getEntity()); + assertThat( + deyaml(err), + containsString( + "Cannot use field [emp_no] due to ambiguities being " + + "mapped as [2] incompatible types: [integer] in [test1], [short] in [test2]" + ) + ); + + Map result = runEsqlSync(new RestEsqlTestCase.RequestObjectBuilder().query("FROM test* | LIMIT 2")); + assertMap( + result, + matchesMap().entry("columns", List.of(columnInfo("emp_no", "unsupported"))) + .entry("values", List.of(matchesList().item(null), matchesList().item(null))) + ); + } + + /** + * Two indices, one with: + *
    +     * "foo": {
    +     *   "type": "object",
    +     *   "properties": {
    +     *     "emp_no": {
    +     *       "type": "integer"
    +     *     }
    +     * }
    +     * 
    + * and the other with + *
    +     * "foo": {
    +     *   "type": "object",
    +     *   "properties": {
    +     *     "emp_no": {
    +     *       "type": "keyword"
    +     *     }
    +     * }
    +     * 
    . + */ + public void testTypeConflictInObject() throws IOException { + assumeTrue( + "order of fields in error message inconsistent before 8.14", + getCachedNodesVersions().stream().allMatch(v -> Version.fromString(v).onOrAfter(Version.V_8_14_0)) + ); + createIndex("test1", empNoInObject("integer")); + index("test1", """ + {"foo": {"emp_no": 1}}"""); + createIndex("test2", empNoInObject("keyword")); + index("test2", """ + {"foo": {"emp_no": "cat"}}"""); + + Map result = runEsqlSync(new RestEsqlTestCase.RequestObjectBuilder().query("FROM test* | LIMIT 3")); + assertMap(result, matchesMap().entry("columns", List.of(columnInfo("foo.emp_no", "unsupported"))).extraOk()); + + ResponseException e = expectThrows( + ResponseException.class, + () -> runEsqlSync(new RestEsqlTestCase.RequestObjectBuilder().query("FROM test* | SORT foo.emp_no | LIMIT 3")) + ); + String err = EntityUtils.toString(e.getResponse().getEntity()); + assertThat( + deyaml(err), + containsString( + "Cannot use field [foo.emp_no] due to ambiguities being " + + "mapped as [2] incompatible types: [integer] in [test1], [keyword] in [test2]" + ) + ); + } + + private CheckedConsumer empNoInObject(String empNoType) { + return index -> { + index.startObject("properties"); + { + index.startObject("foo"); + { + index.field("type", "object"); + index.startObject("properties"); + { + index.startObject("emp_no").field("type", empNoType).endObject(); + } + index.endObject(); + } + index.endObject(); + } + index.endObject(); + }; + } + + private enum SourceMode { + DEFAULT { + @Override + void sourceMapping(XContentBuilder builder) {} + + @Override + boolean stored() { + return true; + } + }, + STORED { + @Override + void sourceMapping(XContentBuilder builder) throws IOException { + builder.startObject(SourceFieldMapper.NAME).field("mode", "stored").endObject(); + } + + @Override + boolean stored() { + return true; + } + }, + /* TODO add support to this test for disabling _source + DISABLED { + @Override + void sourceMapping(XContentBuilder builder) throws IOException { + builder.startObject(SourceFieldMapper.NAME).field("mode", "disabled").endObject(); + } + + @Override + boolean stored() { + return false; + } + }, + */ + SYNTHETIC { + @Override + void sourceMapping(XContentBuilder builder) throws IOException { + builder.startObject(SourceFieldMapper.NAME).field("mode", "synthetic").endObject(); + } + + @Override + boolean stored() { + return false; + } + }; + + abstract void sourceMapping(XContentBuilder builder) throws IOException; + + abstract boolean stored(); + } + + private boolean ignoredByIgnoreAbove(Integer ignoreAbove, int length) { + return ignoreAbove != null && length > ignoreAbove; + } + + private BigInteger randomUnsignedLong() { + BigInteger big = BigInteger.valueOf(randomNonNegativeLong()).shiftLeft(1); + return big.add(randomBoolean() ? BigInteger.ONE : BigInteger.ZERO); + } + + private static String randomVersionString() { + return randomVersionNumber() + (randomBoolean() ? "" : randomPrerelease()); + } + + private static String randomVersionNumber() { + int numbers = between(1, 3); + String v = Integer.toString(between(0, 100)); + for (int i = 1; i < numbers; i++) { + v += "." + between(0, 100); + } + return v; + } + + private static String randomPrerelease() { + if (rarely()) { + return randomFrom("alpha", "beta", "prerelease", "whatever"); + } + return randomFrom("alpha", "beta", "") + randomVersionNumber(); + } + + private record StoreAndDocValues(Boolean store, Boolean docValues) {} + + private static class Test { + private final String type; + private final Map subFields = new TreeMap<>(); + + private SourceMode sourceMode; + private String expectedType; + private Function ignoreMalformed; + private Function storeAndDocValues = s -> new StoreAndDocValues(null, null); + private Double scalingFactor; + private Integer ignoreAbove; + private Object value; + private boolean createAlias; + + Test(String type) { + this.type = type; + // Default the expected return type to the field type. + this.expectedType = type; + } + + Test sourceMode(SourceMode sourceMode) { + this.sourceMode = sourceMode; + return this; + } + + Test expectedType(String expectedType) { + this.expectedType = expectedType; + return this; + } + + Test ignoreMalformed(boolean ignoreMalformed) { + this.ignoreMalformed = s -> ignoreMalformed; + return this; + } + + /** + * Enable {@code ignore_malformed} and disable synthetic _source because + * most fields don't support ignore_malformed and synthetic _source. + */ + Test forceIgnoreMalformed() { + return this.sourceMode(randomValueOtherThan(SourceMode.SYNTHETIC, () -> randomFrom(SourceMode.values()))).ignoreMalformed(true); + } + + Test randomIgnoreMalformedUnlessSynthetic() { + this.ignoreMalformed = s -> s == SourceMode.SYNTHETIC ? false : randomBoolean(); + return this; + } + + Test storeAndDocValues(Boolean store, Boolean docValues) { + this.storeAndDocValues = s -> new StoreAndDocValues(store, docValues); + return this; + } + + Test randomStoreUnlessSynthetic() { + this.storeAndDocValues = s -> new StoreAndDocValues(s == SourceMode.SYNTHETIC ? true : randomBoolean(), null); + return this; + } + + Test randomDocValuesAndStoreUnlessSynthetic() { + this.storeAndDocValues = s -> { + if (s == SourceMode.SYNTHETIC) { + boolean store = randomBoolean(); + return new StoreAndDocValues(store, store == false || randomBoolean()); + } + return new StoreAndDocValues(randomBoolean(), randomBoolean()); + }; + return this; + } + + Test randomDocValuesUnlessSynthetic() { + this.storeAndDocValues = s -> new StoreAndDocValues(null, s == SourceMode.SYNTHETIC || randomBoolean()); + return this; + } + + Test scalingFactor(double scalingFactor) { + this.scalingFactor = scalingFactor; + return this; + } + + Test ignoreAbove(Integer ignoreAbove) { + this.ignoreAbove = ignoreAbove; + return this; + } + + Test value(Object value) { + this.value = value; + return this; + } + + Test createAlias() { + this.createAlias = true; + return this; + } + + Test sub(String name, Test sub) { + this.subFields.put(name, sub); + return this; + } + + Map roundTrip(Object value) throws IOException { + String fieldName = type + "_field"; + createIndex("test", fieldName); + if (randomBoolean()) { + createIndex("test2", fieldName); + } + + if (value == null) { + logger.info("indexing empty doc"); + index("test", "{}"); + } else { + logger.info("indexing {}::{}", value, value.getClass().getName()); + index("test", Strings.toString(JsonXContent.contentBuilder().startObject().field(fieldName, value).endObject())); + } + + return fetchAll(); + } + + void test(Object value) throws IOException { + test(value, value); + } + + /** + * Round trip the value through and index configured by the parameters + * of this test and assert that it matches the {@code expectedValues} + * which can be either the expected value or a subclass of {@link Matcher}. + */ + void test(Object value, Object expectedValue) throws IOException { + Map result = roundTrip(value); + + logger.info("expecting {}", expectedValue == null ? null : expectedValue + "::" + expectedValue.getClass().getName()); + + List> columns = new ArrayList<>(); + columns.add(columnInfo(type + "_field", expectedType)); + if (createAlias) { + columns.add(columnInfo("a.b.c." + type + "_field_alias", expectedType)); + columns.add(columnInfo(type + "_field_alias", expectedType)); + } + Collections.sort(columns, Comparator.comparing(m -> (String) m.get("name"))); + + ListMatcher values = matchesList(); + values = values.item(expectedValue); + if (createAlias) { + values = values.item(expectedValue); + values = values.item(expectedValue); + } + + assertMap(result, matchesMap().entry("columns", columns).entry("values", List.of(values))); + } + + void createIndex(String name, String fieldName) throws IOException { + if (sourceMode == null) { + sourceMode(randomFrom(SourceMode.values())); + } + logger.info("source_mode: {}", sourceMode); + + FieldExtractorTestCase.createIndex(name, index -> { + sourceMode.sourceMapping(index); + index.startObject("properties"); + { + index.startObject(fieldName); + fieldMapping(index); + index.endObject(); + + if (createAlias) { + // create two aliases - one within a hierarchy, the other just a simple field w/o hierarchy + index.startObject(fieldName + "_alias"); + { + index.field("type", "alias"); + index.field("path", fieldName); + } + index.endObject(); + index.startObject("a.b.c." + fieldName + "_alias"); + { + index.field("type", "alias"); + index.field("path", fieldName); + } + index.endObject(); + } + } + index.endObject(); + }); + } + + private void fieldMapping(XContentBuilder builder) throws IOException { + builder.field("type", type); + if (ignoreMalformed != null) { + boolean v = ignoreMalformed.apply(sourceMode); + builder.field("ignore_malformed", v); + ignoreMalformed = m -> v; + } + StoreAndDocValues sd = storeAndDocValues.apply(sourceMode); + storeAndDocValues = m -> sd; + if (sd.docValues != null) { + builder.field("doc_values", sd.docValues); + } + if (sd.store != null) { + builder.field("store", sd.store); + } + if (scalingFactor != null) { + builder.field("scaling_factor", scalingFactor); + } + if (ignoreAbove != null) { + builder.field("ignore_above", ignoreAbove); + } + if (value != null) { + builder.field("value", value); + } + + if (subFields.isEmpty() == false) { + builder.startObject("fields"); + for (Map.Entry sub : subFields.entrySet()) { + builder.startObject(sub.getKey()); + if (sub.getValue().sourceMode == null) { + sub.getValue().sourceMode = sourceMode; + } else if (sub.getValue().sourceMode != sourceMode) { + throw new IllegalStateException("source_mode can't be configured on sub-fields"); + } + sub.getValue().fieldMapping(builder); + builder.endObject(); + } + builder.endObject(); + } + } + + private Map fetchAll() throws IOException { + return runEsqlSync(new RestEsqlTestCase.RequestObjectBuilder().query("FROM test* | LIMIT 10")); + } + } + + private static Map columnInfo(String name, String type) { + return Map.of("name", name, "type", type); + } + + private static void index(String name, String... docs) throws IOException { + Request request = new Request("POST", "/" + name + "/_bulk"); + request.addParameter("refresh", "true"); + StringBuilder bulk = new StringBuilder(); + for (String doc : docs) { + bulk.append(String.format(Locale.ROOT, """ + {"index":{}} + %s + """, doc)); + } + request.setJsonEntity(bulk.toString()); + Response response = client().performRequest(request); + Map result = entityToMap(response.getEntity(), XContentType.JSON); + assertMap(result, matchesMap().extraOk().entry("errors", false)); + } + + private static void createIndex(String name, CheckedConsumer mapping) throws IOException { + Request request = new Request("PUT", "/" + name); + XContentBuilder index = JsonXContent.contentBuilder().prettyPrint().startObject(); + index.startObject("settings"); + { + index.field("index.number_of_replicas", 0); + index.field("index.number_of_shards", 1); + } + index.endObject(); + index.startObject("mappings"); + mapping.accept(index); + index.endObject(); + index.endObject(); + String configStr = Strings.toString(index); + logger.info("index: {} {}", name, configStr); + request.setJsonEntity(configStr); + client().performRequest(request); + } + + /** + * Yaml adds newlines and some indentation which we don't want to match. + */ + private String deyaml(String err) { + return err.replaceAll("\\\\\n\s+\\\\", ""); + } +} diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java index 302fda9b331e3..9763c362c9b4b 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java @@ -48,6 +48,7 @@ import static org.elasticsearch.xpack.esql.CsvTestUtils.multiValuesAwareCsvToStringArray; public class CsvTestsDataLoader { + private static final int BULK_DATA_SIZE = 100_000; private static final TestsDataset EMPLOYEES = new TestsDataset("employees", "mapping-default.json", "employees.csv"); private static final TestsDataset HOSTS = new TestsDataset("hosts", "mapping-hosts.json", "hosts.csv"); private static final TestsDataset APPS = new TestsDataset("apps", "mapping-apps.json", "apps.csv"); @@ -243,8 +244,6 @@ private static void loadCsvData( CheckedBiFunction p, Logger logger ) throws IOException { - // The indexName is optional for a bulk request, but we use it for routing in MultiClusterSpecIT. - Request request = new Request("POST", "/" + indexName + "/_bulk"); StringBuilder builder = new StringBuilder(); try (BufferedReader reader = org.elasticsearch.xpack.ql.TestUtils.reader(resource)) { String line; @@ -359,10 +358,22 @@ private static void loadCsvData( } } lineNumber++; + if (builder.length() > BULK_DATA_SIZE) { + sendBulkRequest(indexName, builder, client, logger); + builder.setLength(0); + } } - builder.append("\n"); } + if (builder.length() > 0) { + sendBulkRequest(indexName, builder, client, logger); + } + } + private static void sendBulkRequest(String indexName, StringBuilder builder, RestClient client, Logger logger) throws IOException { + // The indexName is optional for a bulk request, but we use it for routing in MultiClusterSpecIT. + builder.append("\n"); + logger.debug("Sending bulk request of [{}] bytes for [{}]", builder.length(), indexName); + Request request = new Request("POST", "/" + indexName + "/_bulk"); request.setJsonEntity(builder.toString()); request.addParameter("refresh", "false"); // will be _forcemerge'd next Response response = client.performRequest(request); @@ -373,7 +384,7 @@ private static void loadCsvData( Map result = XContentHelper.convertToMap(xContentType.xContent(), content, false); Object errors = result.get("errors"); if (Boolean.FALSE.equals(errors)) { - logger.info("Data loading of [{}] OK", indexName); + logger.info("Data loading of [{}] bytes into [{}] OK", builder.length(), indexName); } else { throw new IOException("Data loading of [" + indexName + "] failed with errors: " + errors); } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec index 3d9f9aa6e1c27..bda103080adc0 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec @@ -232,3 +232,26 @@ emp_no:integer |languages:integer |byte2bool:boolean |short2bool:boolean 10020 |null |null |null 10030 |3 |true |true ; + +mvSlice#[skip:-8.13.99, reason:newly added in 8.14] +row a = [true, false, false, true] +| eval a1 = mv_slice(a, 1), a2 = mv_slice(a, 2, 3); + +a:boolean | a1:boolean | a2:boolean +[true, false, false, true] | false | [false, true] +; + +mvSliceEmp#[skip:-8.13.99, reason:newly added in 8.14] +from employees +| eval a1 = mv_slice(is_rehired, 0) +| keep emp_no, is_rehired, a1 +| sort emp_no +| limit 5; + +emp_no:integer | is_rehired:boolean | a1:boolean +10001 | [false,true] | false +10002 | [false,false] | false +10003 | null | null +10004 | true | true +10005 | [false,false,false,true] | false +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/conditional.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/conditional.csv-spec index 177e169387642..64c5a7358ce22 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/conditional.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/conditional.csv-spec @@ -127,3 +127,44 @@ error_rate:double | hour:date 0.6 |2023-10-23T13:00:00.000Z // end::docsCaseHourlyErrorRate-result[] ; + + +nullOnMultivaluesMathOperation#[skip:-8.13.99,reason:fixed in 8.14+] +ROW a = 5, b = [ 1, 2 ]| EVAL sum = a + b| LIMIT 1 | WHERE sum IS NULL; +warning:Line 1:37: evaluation of [a + b] failed, treating result as null. Only first 20 failures recorded. +warning:Line 1:37: java.lang.IllegalArgumentException: single-value function encountered multi-value + +a:integer | b:integer | sum:integer +5 | [1, 2] | null +; + + +notNullOnMultivaluesMathOperation#[skip:-8.13.99,reason:fixed in 8.14+] +ROW a = 5, b = [ 1, 2 ]| EVAL sum = a + b| LIMIT 1 | WHERE sum IS NOT NULL; +warning:Line 1:37: evaluation of [a + b] failed, treating result as null. Only first 20 failures recorded. +warning:Line 1:37: java.lang.IllegalArgumentException: single-value function encountered multi-value + +a:integer | b:integer | sum:integer +; + + +nullOnMultivaluesComparisonOperation#[skip:-8.13.99,reason:fixed in 8.14+] +ROW a = 5, b = [ 1, 2 ]| EVAL same = a == b| LIMIT 1 | WHERE same IS NULL; + +a:integer | b:integer | same:boolean +5 | [1, 2] | null +; + + +notNullOnMultivaluesComparisonOperation#[skip:-8.13.99,reason:fixed in 8.14+] +ROW a = 5, b = [ 1, 2 ]| EVAL same = a == b| LIMIT 1 | WHERE same IS NOT NULL; + +a:integer | b:integer | same:boolean +; + + +notNullOnMultivaluesComparisonOperationWithPartialMatch#[skip:-8.13.99,reason:fixed in 8.14+] +ROW a = 5, b = [ 5, 2 ]| EVAL same = a == b| LIMIT 1 | WHERE same IS NOT NULL; + +a:integer | b:integer | same:boolean +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/dissect.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/dissect.csv-spec index 1133b24cd1cf3..225ea37688689 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/dissect.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/dissect.csv-spec @@ -146,6 +146,14 @@ Bezalel Simmel | Bezalel | Simmel ; +overwriteNameAfterSort#[skip:-8.13.0] +from employees | sort emp_no ASC | dissect first_name "Ge%{emp_no}gi" | limit 1 | rename emp_no as first_name_fragment | keep first_name_fragment +; + +first_name_fragment:keyword +or +; + # for now it calculates only based on the first value multivalueInput from employees | where emp_no <= 10006 | dissect job_positions "%{a} %{b} %{c}" | sort emp_no | keep emp_no, a, b, c; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich-IT_tests_only.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich-IT_tests_only.csv-spec index e107fc2ffea63..2fa567996290d 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich-IT_tests_only.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich-IT_tests_only.csv-spec @@ -51,7 +51,6 @@ emp_no:integer | x:keyword | lang:keyword ; - withAliasSort from employees | eval x = to_string(languages) | keep emp_no, x | sort emp_no | limit 3 | enrich languages_policy on x with lang = language_name; @@ -63,6 +62,17 @@ emp_no:integer | x:keyword | lang:keyword ; +withAliasOverwriteName#[skip:-8.13.0] +from employees | sort emp_no +| eval x = to_string(languages) | enrich languages_policy on x with emp_no = language_name +| keep emp_no | limit 1 +; + +emp_no:keyword +French +; + + withAliasAndPlain from employees | sort emp_no desc | limit 3 | eval x = to_string(languages) | keep emp_no, x | enrich languages_policy on x with lang = language_name, language_name; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec index 21ce5cf5c7fc2..7d18d2616e376 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec @@ -200,14 +200,14 @@ Chirstian. |Chirstian.Koblick|Chirstian.KoblickChirstian.|Chirstian Kyoichi. |Kyoichi.Maliniak |Kyoichi.MaliniakKyoichi. |Kyoichi ; -roundArrays#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +roundArrays#[skip:-8.13.99, reason:Alert order changed in 8.14] row a = [1.2], b = [2.4, 7.9] | eval c = round(a), d = round(b), e = round([1.2]), f = round([1.2, 4.6]), g = round([1.14], 1), h = round([1.14], [1, 2]); +warning:Line 1:56: evaluation of [round(b)] failed, treating result as null. Only first 20 failures recorded. +warning:Line 1:56: java.lang.IllegalArgumentException: single-value function encountered multi-value warning:Line 1:88: evaluation of [round([1.2, 4.6])] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:88: java.lang.IllegalArgumentException: single-value function encountered multi-value warning:Line 1:133: evaluation of [round([1.14], [1, 2])] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:133: java.lang.IllegalArgumentException: single-value function encountered multi-value -warning:Line 1:56: evaluation of [round(b)] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:56: java.lang.IllegalArgumentException: single-value function encountered multi-value a:double | b:double | c:double | d: double | e:double | f:double | g:double | h:double 1.2 | [2.4, 7.9] | 1.0 | null | 1.0 | null | 1.1 | null @@ -368,4 +368,57 @@ avg_height_feet:double // end::evalUnnamedColumnStats-result[] ; +overwriteName#[skip:-8.13.0] +FROM employees +| SORT emp_no asc +| EVAL full_name = concat(first_name, " ", last_name) +| EVAL emp_no = concat(full_name, " ", to_string(emp_no)) +| KEEP full_name, emp_no +| LIMIT 3; + +full_name:keyword | emp_no:keyword +Georgi Facello | Georgi Facello 10001 +Bezalel Simmel | Bezalel Simmel 10002 +Parto Bamford | Parto Bamford 10003 +; + +overwriteNameWhere#[skip:-8.13.0] +FROM employees +| SORT emp_no ASC +| EVAL full_name = concat(first_name, " ", last_name) +| EVAL emp_no = concat(full_name, " ", to_string(emp_no)) +| WHERE emp_no == "Bezalel Simmel 10002" +| KEEP full_name, emp_no +| LIMIT 3; + +full_name:keyword | emp_no:keyword +Bezalel Simmel | Bezalel Simmel 10002 +; + +overwriteNameAfterSort#[skip:-8.13.0] +FROM employees +| SORT emp_no ASC +| EVAL emp_no = -emp_no +| LIMIT 3 +| KEEP emp_no +; + +emp_no:i +-10001 +-10002 +-10003 +; + +overwriteNameAfterSortChained#[skip:-8.13.0] +FROM employees +| SORT emp_no ASC +| EVAL x = emp_no, y = -emp_no, emp_no = y +| LIMIT 3 +| KEEP emp_no +; +emp_no:i +-10001 +-10002 +-10003 +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec index f56266f868d44..0138ec1a70989 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec @@ -224,6 +224,21 @@ row a = [1.1, 2.1, 2.1] | eval da = mv_dedupe(a); [1.1, 2.1, 2.1] | [1.1, 2.1] ; +mvSliceEmp#[skip:-8.13.99, reason:newly added in 8.14] +from employees +| eval a1 = mv_slice(salary_change, 0, 1) +| keep emp_no, salary_change, a1 +| sort emp_no +| limit 5; + +emp_no:integer | salary_change:double | a1:double +10001 | 1.19 | 1.19 +10002 | [-7.23,11.17] | [-7.23,11.17] +10003 | [12.82,14.68] | [12.82,14.68] +10004 | [-0.35,1.13,3.65,13.48] | [-0.35, 1.13] +10005 | [-2.14,13.07] | [-2.14,13.07] +; + autoBucket FROM employees | WHERE hire_date >= "1985-01-01T00:00:00Z" AND hire_date < "1986-01-01T00:00:00Z" diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/grok.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/grok.csv-spec index f71f51d42c45f..fbe31deeb0f97 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/grok.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/grok.csv-spec @@ -122,6 +122,15 @@ Bezalel Simmel | Bezalel | Simmel ; +overwriteNameAfterSort#[skip:-8.13.0] +from employees | sort emp_no ASC | grok first_name "Ge(?[a-z]{2})gi" | limit 1 | rename emp_no as first_name_fragment | keep first_name_fragment +; + +first_name_fragment:keyword +or +; + + multivalueOutput row a = "foo bar" | grok a "%{WORD:b} %{WORD:b}"; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec index baf6da2cd0bde..63bc452bf5bd5 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec @@ -384,6 +384,151 @@ row a = [1, 2, 2, 3] | eval da = mv_dedupe(a); [1, 2, 2, 3] | [1, 2, 3] ; +mvSlice#[skip:-8.13.99, reason:newly added in 8.14] +// tag::mv_slice_positive[] +row a = [1, 2, 2, 3] +| eval a1 = mv_slice(a, 1), a2 = mv_slice(a, 2, 3) +// end::mv_slice_positive[] +; +// tag::mv_slice_positive-result[] +a:integer | a1:integer | a2:integer +[1, 2, 2, 3] | 2 | [2, 3] +// end::mv_slice_positive-result[] +; + +mvSliceNegativeOffset#[skip:-8.13.99, reason:newly added in 8.14] +// tag::mv_slice_negative[] +row a = [1, 2, 2, 3] +| eval a1 = mv_slice(a, -2), a2 = mv_slice(a, -3, -1) +// end::mv_slice_negative[] +; +// tag::mv_slice_negative-result[] +a:integer | a1:integer | a2:integer +[1, 2, 2, 3] | 2 | [2, 2, 3] +// end::mv_slice_negative-result[] +; + +mvSliceSingle#[skip:-8.13.99, reason:newly added in 8.14] +row a = 1 +| eval a1 = mv_slice(a, 0); + +a:integer | a1:integer +1 | 1 +; + +mvSliceOutOfBound#[skip:-8.13.99, reason:newly added in 8.14] +row a = [1, 2, 2, 3] +| eval a1 = mv_slice(a, 4), a2 = mv_slice(a, 2, 6), a3 = mv_slice(a, 4, 6); + +a:integer | a1:integer | a2:integer | a3:integer +[1, 2, 2, 3] | null | [2, 3] | null +; + +mvSliceEmpInt#[skip:-8.13.99, reason:newly added in 8.14] +from employees +| eval a1 = mv_slice(salary_change.int, 0, 1) +| keep emp_no, salary_change.int, a1 +| sort emp_no +| limit 5; + +emp_no:integer | salary_change.int:integer | a1:integer +10001 | 1 | 1 +10002 | [-7, 11] | [-7, 11] +10003 | [12, 14] | [12, 14] +10004 | [0, 1, 3, 13] | [0, 1] +10005 | [-2, 13] | [-2, 13] +; + +mvSliceEmpIntSingle#[skip:-8.13.99, reason:newly added in 8.14] +from employees +| eval a1 = mv_slice(salary_change.int, 1) +| keep emp_no, salary_change.int, a1 +| sort emp_no +| limit 5; + +emp_no:integer | salary_change.int:integer | a1:integer +10001 | 1 | null +10002 | [-7, 11] | 11 +10003 | [12, 14] | 14 +10004 | [0, 1, 3, 13] | 1 +10005 | [-2, 13] | 13 +; + +mvSliceEmpIntEndOutOfBound#[skip:-8.13.99, reason:newly added in 8.14] +from employees +| eval a1 = mv_slice(salary_change.int, 1, 4) +| keep emp_no, salary_change.int, a1 +| sort emp_no +| limit 5; + +emp_no:integer | salary_change.int:integer | a1:integer +10001 | 1 | null +10002 | [-7, 11] | 11 +10003 | [12, 14] | 14 +10004 | [0, 1, 3, 13] | [1, 3, 13] +10005 | [-2, 13] | 13 +; + +mvSliceEmpIntOutOfBound#[skip:-8.13.99, reason:newly added in 8.14] +from employees +| eval a1 = mv_slice(salary_change.int, 2, 4) +| keep emp_no, salary_change.int, a1 +| sort emp_no +| limit 5; + +emp_no:integer | salary_change.int:integer | a1:integer +10001 | 1 | null +10002 | [-7, 11] | null +10003 | [12, 14] | null +10004 | [0, 1, 3, 13] | [3, 13] +10005 | [-2, 13] | null +; + +mvSliceEmpIntStartOutOfBoundNegative#[skip:-8.13.99, reason:newly added in 8.14] +from employees +| eval a1 = mv_slice(salary_change.int, -5, -2) +| keep emp_no, salary_change.int, a1 +| sort emp_no +| limit 5; + +emp_no:integer | salary_change.int:integer | a1:integer +10001 | 1 | null +10002 | [-7, 11] | -7 +10003 | [12, 14] | 12 +10004 | [0, 1, 3, 13] | [0, 1, 3] +10005 | [-2, 13] | -2 +; + +mvSliceEmpIntOutOfBoundNegative#[skip:-8.13.99, reason:newly added in 8.14] +from employees +| eval a1 = mv_slice(salary_change.int, -5, -3) +| keep emp_no, salary_change.int, a1 +| sort emp_no +| limit 5; + +emp_no:integer | salary_change.int:integer | a1:integer +10001 | 1 | null +10002 | [-7, 11] | null +10003 | [12, 14] | null +10004 | [0, 1, 3, 13] | [0, 1] +10005 | [-2, 13] | null +; + +mvSliceEmpLong#[skip:-8.13.99, reason:newly added in 8.14] +from employees +| eval a1 = mv_slice(salary_change.long, 0, 1) +| keep emp_no, salary_change.long, a1 +| sort emp_no +| limit 5; + +emp_no:integer | salary_change.long:long | a1:long +10001 | 1 | 1 +10002 | [-7, 11] | [-7, 11] +10003 | [12, 14] | [12, 14] +10004 | [0, 1, 3, 13] | [0, 1] +10005 | [-2, 13] | [-2, 13] +; + autoBucket // tag::auto_bucket[] FROM employees diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec index 0b2ce54d5fd22..54256b3420c82 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec @@ -277,3 +277,47 @@ lo0 |fe81::cae2:65ff:fece:feb9 eth0 |127.0.0.3 eth0 |fe80::cae2:65ff:fece:fec1 ; + +mvSlice#[skip:-8.13.99, reason:newly added in 8.14] +from hosts +| where host == "epsilon" +| eval a1 = mv_slice(ip1, 0, 1) +| keep host, ip1, a1 +| sort host, ip1 +| limit 5; + +host:keyword | ip1:ip | a1:ip +epsilon | [127.0.0.1, 127.0.0.2, 127.0.0.3] | [127.0.0.1, 127.0.0.2] +epsilon | fe80::cae2:65ff:fece:fec1 | fe80::cae2:65ff:fece:fec1 +epsilon | [fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece:fec0] | [fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece:fec0] +; + +mvSlice#[skip:-8.13.99, reason:newly added in 8.14] +from hosts +| where host == "epsilon" +| eval a1 = mv_slice(ip1, 0, 1) +| keep host, ip1, a1 +| sort host, ip1 +| limit 5; + +host:keyword | ip1:ip | a1:ip +epsilon | [127.0.0.1, 127.0.0.2, 127.0.0.3] | [127.0.0.1, 127.0.0.2] +epsilon | fe80::cae2:65ff:fece:fec1 | fe80::cae2:65ff:fece:fec1 +epsilon | [fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece:fec0] | [fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece:fec0] +; + +mvZip#[skip:-8.13.99, reason:newly added in 8.14] +from hosts +| eval zip = mv_zip(to_string(description), to_string(ip0), "@@") +| keep host, description, ip0, zip +| sort host desc, ip0 +| limit 5 +; + +host:keyword | description:text | ip0:ip | zip:keyword +gamma | gamma k8s server | fe80::cae2:65ff:fece:feb9 | gamma k8s server@@fe80::cae2:65ff:fece:feb9 +gamma | gamma k8s server | fe80::cae2:65ff:fece:feb9 | gamma k8s server@@fe80::cae2:65ff:fece:feb9 +epsilon | epsilon gw instance | [fe80::cae2:65ff:fece:feb9, fe80::cae2:65ff:fece:fec0, fe80::cae2:65ff:fece:fec1] | [epsilon gw instance@@fe80::cae2:65ff:fece:feb9, fe80::cae2:65ff:fece:fec0, fe80::cae2:65ff:fece:fec1] +epsilon | [epsilon host, epsilon2 host] | [fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece:fec0] | [epsilon host@@fe81::cae2:65ff:fece:feb9, epsilon2 host@@fe82::cae2:65ff:fece:fec0] +epsilon | null | null | null +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec index 6887a1bbe9069..d38dce49020c4 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec @@ -54,7 +54,9 @@ mv_last |"boolean|cartesian_point|cartesian_shape|date|double|g mv_max |"boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version mv_max(v:boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version)" |v | "boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version" | "" |"boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version" | "Reduce a multivalued field to a single valued field containing the maximum value." | false | false | false mv_median |"double|integer|long|unsigned_long mv_median(v:double|integer|long|unsigned_long)" |v |"double|integer|long|unsigned_long" | "" |"double|integer|long|unsigned_long" | "Converts a multivalued field into a single valued field containing the median value." | false | false | false mv_min |"boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version mv_min(v:boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version)" |v | "boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version" | "" |"boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version" | "Reduce a multivalued field to a single valued field containing the minimum value." | false | false | false +mv_slice |"boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version mv_slice(v:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version, start:integer, ?end:integer)" |[v, start, end] | "[boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version, integer, integer]" | "[A multivalued field, start index, end index (included)]" |"boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version" | "Returns a subset of the multivalued field using the start and end index values." | [false, false, true] | false | false mv_sum |"double|integer|long|unsigned_long mv_sum(v:double|integer|long|unsigned_long)" |v |"double|integer|long|unsigned_long" | "" |"double|integer|long|unsigned_long" | "Converts a multivalued field into a single valued field containing the sum of all of the values." | false | false | false +mv_zip |"keyword mv_zip(mvLeft:keyword|text, mvRight:keyword|text, ?delim:keyword|text)" |[mvLeft, mvRight, delim] | ["keyword|text", "keyword|text", "keyword|text"] | [A multivalued field, A multivalued field, delimiter] | "keyword" | "Combines the values from two multivalued fields with a delimiter that joins them together." | [false, false, true] | false | false now |date now() | null |null | null |date | "Returns current date and time." | null | false | false percentile |"double|integer|long percentile(field:double|integer|long, percentile:double|integer|long)" |[field, percentile] |["double|integer|long, double|integer|long"] |["", ""] |"double|integer|long" | "The value at which a certain percentage of observed values occur." | [false, false] | false | true pi |double pi() | null | null | null |double | "The ratio of a circle’s circumference to its diameter." | null | false | false @@ -68,6 +70,8 @@ sinh |"double sinh(n:double|integer|long|unsigned_long)"|n split |"keyword split(str:keyword|text, delim:keyword|text)" |[str, delim] |["keyword|text", "keyword|text"] |["", ""] |keyword | "Split a single valued string into multiple strings." | [false, false] | false | false sqrt |"double sqrt(n:double|integer|long|unsigned_long)" |n |"double|integer|long|unsigned_long" | "" |double | "Returns the square root of a number." | false | false | false st_centroid |"geo_point|cartesian_point st_centroid(field:geo_point|cartesian_point)" |field |"geo_point|cartesian_point" | "" |"geo_point|cartesian_point" | "The centroid of a spatial field." | false | false | true +st_x |"double st_x(point:geo_point|cartesian_point)" |point |"geo_point|cartesian_point" | "" |double | "Extracts the x-coordinate from a point geometry." | false | false | false +st_y |"double st_y(point:geo_point|cartesian_point)" |point |"geo_point|cartesian_point" | "" |double | "Extracts the y-coordinate from a point geometry." | false | false | false starts_with |"boolean starts_with(str:keyword|text, prefix:keyword|text)" |[str, prefix] |["keyword|text", "keyword|text"] |["", ""] |boolean | "Returns a boolean that indicates whether a keyword string starts with another string" | [false, false] | false | false substring |"keyword substring(str:keyword|text, start:integer, ?length:integer)" |[str, start, length] |["keyword|text", "integer", "integer"] |["", "", ""] |keyword | "Returns a substring of a string, specified by a start position and an optional length" | [false, false, true]| false | false sum |"long sum(field:double|integer|long)" |field |"double|integer|long" | "" |long | "The sum of a numeric field." | false | false | true @@ -103,7 +107,7 @@ trim |"keyword|text trim(str:keyword|text)" ; -showFunctionsSynopsis#[skip:-8.12.99] +showFunctionsSynopsis#[skip:-8.13.99] show functions | keep synopsis; synopsis:keyword @@ -151,7 +155,9 @@ double e() "boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version mv_max(v:boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version)" "double|integer|long|unsigned_long mv_median(v:double|integer|long|unsigned_long)" "boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version mv_min(v:boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version)" +"boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version mv_slice(v:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version, start:integer, ?end:integer)" "double|integer|long|unsigned_long mv_sum(v:double|integer|long|unsigned_long)" +"keyword mv_zip(mvLeft:keyword|text, mvRight:keyword|text, ?delim:keyword|text)" date now() "double|integer|long percentile(field:double|integer|long, percentile:double|integer|long)" double pi() @@ -165,6 +171,8 @@ double pi() "keyword split(str:keyword|text, delim:keyword|text)" "double sqrt(n:double|integer|long|unsigned_long)" "geo_point|cartesian_point st_centroid(field:geo_point|cartesian_point)" +"double st_x(point:geo_point|cartesian_point)" +"double st_y(point:geo_point|cartesian_point)" "boolean starts_with(str:keyword|text, prefix:keyword|text)" "keyword substring(str:keyword|text, start:integer, ?length:integer)" "long sum(field:double|integer|long)" @@ -216,9 +224,9 @@ sinh | "double sinh(n:double|integer|long|unsigned_long)" // see https://github.com/elastic/elasticsearch/issues/102120 -countFunctions#[skip:-8.12.99] +countFunctions#[skip:-8.13.99] show functions | stats a = count(*), b = count(*), c = count(*) | mv_expand c; a:long | b:long | c:long -90 | 90 | 90 +94 | 94 | 94 ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec index 5c4aae740910b..5c789cee0492f 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec @@ -69,6 +69,30 @@ c:geo_point POINT(39.58327988510707 20.619513023697994) ; +centroidFromString4#[skip:-8.13.99, reason:st_x and st_y added in 8.14] +ROW wkt = ["POINT(42.97109629958868 14.7552534006536)", "POINT(75.80929149873555 22.72774917539209)", "POINT(-0.030548143003023033 24.37553649504829)"] +| MV_EXPAND wkt +| EVAL pt = TO_GEOPOINT(wkt) +| STATS c = ST_CENTROID(pt) +| EVAL x = ST_X(c), y = ST_Y(c); + +c:geo_point | x:double | y:double +POINT(39.58327988510707 20.619513023697994) | 39.58327988510707 | 20.619513023697994 +; + +stXFromString#[skip:-8.13.99, reason:st_x and st_y added in 8.14] +// tag::st_x_y[] +ROW point = TO_GEOPOINT("POINT(42.97109629958868 14.7552534006536)") +| EVAL x = ST_X(point), y = ST_Y(point) +// end::st_x_y[] +; + +// tag::st_x_y-result[] +point:geo_point | x:double | y:double +POINT(42.97109629958868 14.7552534006536) | 42.97109629958868 | 14.7552534006536 +// end::st_x_y-result[] +; + simpleLoad#[skip:-8.12.99, reason:spatial type geo_point improved precision in 8.13] FROM airports | WHERE scalerank == 9 | SORT abbrev | WHERE length(name) > 12; @@ -87,6 +111,18 @@ WIIT | Bandar Lampung | POINT(105.2667 -5.45) | Indonesia ZAH | Zāhedān | POINT(60.8628 29.4964) | Iran | POINT(60.900708564915 29.4752941956573) | Zahedan Int'l | 9 | mid ; +stXFromAirportsSupportsNull#[skip:-8.13.99, reason:st_x and st_y added in 8.14] +FROM airports +| EVAL x = FLOOR(ABS(ST_X(city_location))/200), y = FLOOR(ABS(ST_Y(city_location))/100) +| STATS c = count(*) BY x, y +| SORT c DESC +; + +c:long | x:double | y:double +872 | 0.0 | 0.0 +19 | null | null +; + centroidFromAirports#[skip:-8.12.99, reason:st_centroid added in 8.13] // tag::st_centroid-airports[] FROM airports @@ -399,6 +435,15 @@ c:cartesian_point POINT(3949.163965353159 1078.2645465797348) ; +stXFromCartesianString#[skip:-8.13.99, reason:st_x and st_y added in 8.14] +ROW point = TO_CARTESIANPOINT("POINT(4297.10986328125 -1475.530029296875)") +| EVAL x = ST_X(point), y = ST_Y(point) +; + +point:cartesian_point | x:double | y:double +POINT(4297.10986328125 -1475.530029296875) | 4297.10986328125 | -1475.530029296875 +; + simpleCartesianLoad#[skip:-8.12.99, reason:spatial type cartesian_point improved precision in 8.13] FROM airports_web | WHERE scalerank == 9 | SORT abbrev | WHERE length(name) > 12; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index 97b36859c1419..4aff4c689c077 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -1195,3 +1195,25 @@ ROW a = 1 | STATS couNt(*) | SORT `couNt(*)` couNt(*):l 1 ; + +isNullWithStatsCount_On_TextField +FROM airports +| EVAL s = name, x = name +| WHERE s IS NULL +| STATS c = COUNT(x) +; + +c:l +0 +; + +isNotNullWithStatsCount_On_TextField +FROM airports +| EVAL s = name, x = name +| WHERE s IS NOT NULL +| STATS c = COUNT(x) +; + +c:l +891 +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec index bdbcfb3cb49e9..e6c73f9054c51 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec @@ -696,6 +696,50 @@ ROW a=[10, 9, 8] // end::mv_concat-to_string-result[] ; +mvSliceEmp#[skip:-8.13.99, reason:newly added in 8.14] +from employees +| eval a1 = mv_slice(salary_change.keyword, 0, 1) +| keep emp_no, salary_change.keyword, a1 +| sort emp_no +| limit 5; + +emp_no:integer | salary_change.keyword:keyword | a1:keyword +10001 | 1.19 | 1.19 +10002 | [-7.23,11.17] | [-7.23,11.17] +10003 | [12.82,14.68] | [12.82,14.68] +10004 | [-0.35,1.13,13.48,3.65] | [-0.35,1.13] +10005 | [-2.14,13.07] | [-2.14,13.07] +; + +mvZip#[skip:-8.13.99, reason:newly added in 8.14] +// tag::mv_zip[] +ROW a = ["x", "y", "z"], b = ["1", "2"] +| EVAL c = mv_zip(a, b, "-") +| KEEP a, b, c +// end::mv_zip[] +; + +// tag::mv_zip-result[] +a:keyword | b:keyword | c:keyword +[x, y, z] | [1 ,2] | [x-1, y-2, z] +// end::mv_zip-result[] +; + +mvZipEmp#[skip:-8.13.99, reason:newly added in 8.14] +from employees +| eval full_name = mv_zip(first_name, last_name, " "), full_name_2 = mv_zip(last_name, first_name), jobs = mv_zip(job_positions, salary_change.keyword, "#") +| keep emp_no, full_name, full_name_2, job_positions, salary_change.keyword, jobs +| sort emp_no +| limit 5; + +emp_no:integer | full_name:keyword | full_name_2:keyword | job_positions:keyword | salary_change.keyword:keyword | jobs:keyword +10001 | Georgi Facello | Facello,Georgi | [Accountant, Senior Python Developer] | 1.19 | [Accountant#1.19, Senior Python Developer] +10002 | Bezalel Simmel | Simmel,Bezalel | Senior Team Lead | [-7.23,11.17] | [Senior Team Lead#-7.23, 11.17] +10003 | Parto Bamford | Bamford,Parto | null | [12.82, 14.68] | [12.82, 14.68] +10004 | Chirstian Koblick | Koblick,Chirstian | [Head Human Resources, Reporting Analyst, Support Engineer, Tech Lead] | [-0.35, 1.13, 13.48, 3.65] | [Head Human Resources#-0.35, Reporting Analyst#1.13, Support Engineer#13.48, Tech Lead#3.65] +10005 | Kyoichi Maliniak | Maliniak,Kyoichi | null | [-2.14,13.07] | [-2.14,13.07] +; + showTextFields from hosts | where host == "beta" | keep host, host_group, description; ignoreOrder:true diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java index 09f20d7ca4ffd..2b59e6dd1957d 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java @@ -460,6 +460,9 @@ protected EsqlQueryResponse runQuery(String query) { EsqlQueryRequest request = new EsqlQueryRequest(); request.query(query); request.pragmas(AbstractEsqlIntegTestCase.randomPragmas()); + if (randomBoolean()) { + request.profile(true); + } return client(LOCAL_CLUSTER).execute(EsqlQueryAction.INSTANCE, request).actionGet(30, TimeUnit.SECONDS); } diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EnrichIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EnrichIT.java index a589e1cc468a5..3bb6bb35b5210 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EnrichIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EnrichIT.java @@ -18,6 +18,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.operator.DriverProfile; +import org.elasticsearch.compute.operator.DriverStatus; import org.elasticsearch.compute.operator.exchange.ExchangeService; import org.elasticsearch.core.TimeValue; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; @@ -58,12 +60,15 @@ import java.util.concurrent.TimeUnit; import java.util.function.Function; +import static java.util.Collections.emptyList; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.transport.AbstractSimpleTransportTestCase.IGNORE_DESERIALIZATION_ERRORS_SETTING; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.not; public class EnrichIT extends AbstractEsqlIntegTestCase { @@ -121,6 +126,9 @@ protected EsqlQueryResponse run(EsqlQueryRequest request) { } else { client = client(); } + if (request.profile() == false && randomBoolean()) { + request.profile(true); + } if (randomBoolean()) { setRequestCircuitBreakerLimit(ByteSizeValue.ofBytes(between(256, 4096))); try { @@ -318,6 +326,27 @@ public void testTopN() { } } + public void testProfile() { + EsqlQueryRequest request = new EsqlQueryRequest(); + request.pragmas(randomPragmas()); + request.query("from listens* | sort timestamp DESC | limit 1 | " + enrichSongCommand() + " | KEEP timestamp, artist"); + request.profile(true); + try (var resp = run(request)) { + Iterator row = resp.values().next(); + assertThat(row.next(), equalTo(7L)); + assertThat(row.next(), equalTo("Linkin Park")); + EsqlQueryResponse.Profile profile = resp.profile(); + assertNotNull(profile); + List drivers = profile.drivers(); + assertThat(drivers.size(), greaterThanOrEqualTo(2)); + List enrichOperators = drivers.stream() + .flatMap(d -> d.operators().stream()) + .filter(status -> status.operator().startsWith("EnrichOperator")) + .toList(); + assertThat(enrichOperators, not(emptyList())); + } + } + /** * Some enrich queries that could fail without the PushDownEnrich rule. */ diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceBooleanEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceBooleanEvaluator.java new file mode 100644 index 0000000000000..6c4174bd9cca9 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceBooleanEvaluator.java @@ -0,0 +1,140 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.expression.function.Warnings; +import org.elasticsearch.xpack.ql.InvalidArgumentException; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link MvSlice}. + * This class is generated. Do not edit it. + */ +public final class MvSliceBooleanEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator field; + + private final EvalOperator.ExpressionEvaluator start; + + private final EvalOperator.ExpressionEvaluator end; + + private final DriverContext driverContext; + + public MvSliceBooleanEvaluator(Source source, EvalOperator.ExpressionEvaluator field, + EvalOperator.ExpressionEvaluator start, EvalOperator.ExpressionEvaluator end, + DriverContext driverContext) { + this.warnings = new Warnings(source); + this.field = field; + this.start = start; + this.end = end; + this.driverContext = driverContext; + } + + @Override + public Block eval(Page page) { + try (BooleanBlock fieldBlock = (BooleanBlock) field.eval(page)) { + try (IntBlock startBlock = (IntBlock) start.eval(page)) { + try (IntBlock endBlock = (IntBlock) end.eval(page)) { + return eval(page.getPositionCount(), fieldBlock, startBlock, endBlock); + } + } + } + } + + public BooleanBlock eval(int positionCount, BooleanBlock fieldBlock, IntBlock startBlock, + IntBlock endBlock) { + try(BooleanBlock.Builder result = driverContext.blockFactory().newBooleanBlockBuilder(positionCount)) { + position: for (int p = 0; p < positionCount; p++) { + boolean allBlocksAreNulls = true; + if (!fieldBlock.isNull(p)) { + allBlocksAreNulls = false; + } + if (startBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (startBlock.getValueCount(p) != 1) { + if (startBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (endBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (endBlock.getValueCount(p) != 1) { + if (endBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (allBlocksAreNulls) { + result.appendNull(); + continue position; + } + try { + MvSlice.process(result, p, fieldBlock, startBlock.getInt(startBlock.getFirstValueIndex(p)), endBlock.getInt(endBlock.getFirstValueIndex(p))); + } catch (InvalidArgumentException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + @Override + public String toString() { + return "MvSliceBooleanEvaluator[" + "field=" + field + ", start=" + start + ", end=" + end + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(field, start, end); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + private final EvalOperator.ExpressionEvaluator.Factory start; + + private final EvalOperator.ExpressionEvaluator.Factory end; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory field, + EvalOperator.ExpressionEvaluator.Factory start, + EvalOperator.ExpressionEvaluator.Factory end) { + this.source = source; + this.field = field; + this.start = start; + this.end = end; + } + + @Override + public MvSliceBooleanEvaluator get(DriverContext context) { + return new MvSliceBooleanEvaluator(source, field.get(context), start.get(context), end.get(context), context); + } + + @Override + public String toString() { + return "MvSliceBooleanEvaluator[" + "field=" + field + ", start=" + start + ", end=" + end + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceBytesRefEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceBytesRefEvaluator.java new file mode 100644 index 0000000000000..4a4a169e45aee --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceBytesRefEvaluator.java @@ -0,0 +1,140 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.expression.function.Warnings; +import org.elasticsearch.xpack.ql.InvalidArgumentException; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link MvSlice}. + * This class is generated. Do not edit it. + */ +public final class MvSliceBytesRefEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator field; + + private final EvalOperator.ExpressionEvaluator start; + + private final EvalOperator.ExpressionEvaluator end; + + private final DriverContext driverContext; + + public MvSliceBytesRefEvaluator(Source source, EvalOperator.ExpressionEvaluator field, + EvalOperator.ExpressionEvaluator start, EvalOperator.ExpressionEvaluator end, + DriverContext driverContext) { + this.warnings = new Warnings(source); + this.field = field; + this.start = start; + this.end = end; + this.driverContext = driverContext; + } + + @Override + public Block eval(Page page) { + try (BytesRefBlock fieldBlock = (BytesRefBlock) field.eval(page)) { + try (IntBlock startBlock = (IntBlock) start.eval(page)) { + try (IntBlock endBlock = (IntBlock) end.eval(page)) { + return eval(page.getPositionCount(), fieldBlock, startBlock, endBlock); + } + } + } + } + + public BytesRefBlock eval(int positionCount, BytesRefBlock fieldBlock, IntBlock startBlock, + IntBlock endBlock) { + try(BytesRefBlock.Builder result = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + position: for (int p = 0; p < positionCount; p++) { + boolean allBlocksAreNulls = true; + if (!fieldBlock.isNull(p)) { + allBlocksAreNulls = false; + } + if (startBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (startBlock.getValueCount(p) != 1) { + if (startBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (endBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (endBlock.getValueCount(p) != 1) { + if (endBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (allBlocksAreNulls) { + result.appendNull(); + continue position; + } + try { + MvSlice.process(result, p, fieldBlock, startBlock.getInt(startBlock.getFirstValueIndex(p)), endBlock.getInt(endBlock.getFirstValueIndex(p))); + } catch (InvalidArgumentException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + @Override + public String toString() { + return "MvSliceBytesRefEvaluator[" + "field=" + field + ", start=" + start + ", end=" + end + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(field, start, end); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + private final EvalOperator.ExpressionEvaluator.Factory start; + + private final EvalOperator.ExpressionEvaluator.Factory end; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory field, + EvalOperator.ExpressionEvaluator.Factory start, + EvalOperator.ExpressionEvaluator.Factory end) { + this.source = source; + this.field = field; + this.start = start; + this.end = end; + } + + @Override + public MvSliceBytesRefEvaluator get(DriverContext context) { + return new MvSliceBytesRefEvaluator(source, field.get(context), start.get(context), end.get(context), context); + } + + @Override + public String toString() { + return "MvSliceBytesRefEvaluator[" + "field=" + field + ", start=" + start + ", end=" + end + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceDoubleEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceDoubleEvaluator.java new file mode 100644 index 0000000000000..3e4a83cec68b7 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceDoubleEvaluator.java @@ -0,0 +1,140 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.expression.function.Warnings; +import org.elasticsearch.xpack.ql.InvalidArgumentException; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link MvSlice}. + * This class is generated. Do not edit it. + */ +public final class MvSliceDoubleEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator field; + + private final EvalOperator.ExpressionEvaluator start; + + private final EvalOperator.ExpressionEvaluator end; + + private final DriverContext driverContext; + + public MvSliceDoubleEvaluator(Source source, EvalOperator.ExpressionEvaluator field, + EvalOperator.ExpressionEvaluator start, EvalOperator.ExpressionEvaluator end, + DriverContext driverContext) { + this.warnings = new Warnings(source); + this.field = field; + this.start = start; + this.end = end; + this.driverContext = driverContext; + } + + @Override + public Block eval(Page page) { + try (DoubleBlock fieldBlock = (DoubleBlock) field.eval(page)) { + try (IntBlock startBlock = (IntBlock) start.eval(page)) { + try (IntBlock endBlock = (IntBlock) end.eval(page)) { + return eval(page.getPositionCount(), fieldBlock, startBlock, endBlock); + } + } + } + } + + public DoubleBlock eval(int positionCount, DoubleBlock fieldBlock, IntBlock startBlock, + IntBlock endBlock) { + try(DoubleBlock.Builder result = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + position: for (int p = 0; p < positionCount; p++) { + boolean allBlocksAreNulls = true; + if (!fieldBlock.isNull(p)) { + allBlocksAreNulls = false; + } + if (startBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (startBlock.getValueCount(p) != 1) { + if (startBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (endBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (endBlock.getValueCount(p) != 1) { + if (endBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (allBlocksAreNulls) { + result.appendNull(); + continue position; + } + try { + MvSlice.process(result, p, fieldBlock, startBlock.getInt(startBlock.getFirstValueIndex(p)), endBlock.getInt(endBlock.getFirstValueIndex(p))); + } catch (InvalidArgumentException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + @Override + public String toString() { + return "MvSliceDoubleEvaluator[" + "field=" + field + ", start=" + start + ", end=" + end + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(field, start, end); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + private final EvalOperator.ExpressionEvaluator.Factory start; + + private final EvalOperator.ExpressionEvaluator.Factory end; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory field, + EvalOperator.ExpressionEvaluator.Factory start, + EvalOperator.ExpressionEvaluator.Factory end) { + this.source = source; + this.field = field; + this.start = start; + this.end = end; + } + + @Override + public MvSliceDoubleEvaluator get(DriverContext context) { + return new MvSliceDoubleEvaluator(source, field.get(context), start.get(context), end.get(context), context); + } + + @Override + public String toString() { + return "MvSliceDoubleEvaluator[" + "field=" + field + ", start=" + start + ", end=" + end + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceIntEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceIntEvaluator.java new file mode 100644 index 0000000000000..fc54dfb1f8336 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceIntEvaluator.java @@ -0,0 +1,139 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.expression.function.Warnings; +import org.elasticsearch.xpack.ql.InvalidArgumentException; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link MvSlice}. + * This class is generated. Do not edit it. + */ +public final class MvSliceIntEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator field; + + private final EvalOperator.ExpressionEvaluator start; + + private final EvalOperator.ExpressionEvaluator end; + + private final DriverContext driverContext; + + public MvSliceIntEvaluator(Source source, EvalOperator.ExpressionEvaluator field, + EvalOperator.ExpressionEvaluator start, EvalOperator.ExpressionEvaluator end, + DriverContext driverContext) { + this.warnings = new Warnings(source); + this.field = field; + this.start = start; + this.end = end; + this.driverContext = driverContext; + } + + @Override + public Block eval(Page page) { + try (IntBlock fieldBlock = (IntBlock) field.eval(page)) { + try (IntBlock startBlock = (IntBlock) start.eval(page)) { + try (IntBlock endBlock = (IntBlock) end.eval(page)) { + return eval(page.getPositionCount(), fieldBlock, startBlock, endBlock); + } + } + } + } + + public IntBlock eval(int positionCount, IntBlock fieldBlock, IntBlock startBlock, + IntBlock endBlock) { + try(IntBlock.Builder result = driverContext.blockFactory().newIntBlockBuilder(positionCount)) { + position: for (int p = 0; p < positionCount; p++) { + boolean allBlocksAreNulls = true; + if (!fieldBlock.isNull(p)) { + allBlocksAreNulls = false; + } + if (startBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (startBlock.getValueCount(p) != 1) { + if (startBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (endBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (endBlock.getValueCount(p) != 1) { + if (endBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (allBlocksAreNulls) { + result.appendNull(); + continue position; + } + try { + MvSlice.process(result, p, fieldBlock, startBlock.getInt(startBlock.getFirstValueIndex(p)), endBlock.getInt(endBlock.getFirstValueIndex(p))); + } catch (InvalidArgumentException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + @Override + public String toString() { + return "MvSliceIntEvaluator[" + "field=" + field + ", start=" + start + ", end=" + end + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(field, start, end); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + private final EvalOperator.ExpressionEvaluator.Factory start; + + private final EvalOperator.ExpressionEvaluator.Factory end; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory field, + EvalOperator.ExpressionEvaluator.Factory start, + EvalOperator.ExpressionEvaluator.Factory end) { + this.source = source; + this.field = field; + this.start = start; + this.end = end; + } + + @Override + public MvSliceIntEvaluator get(DriverContext context) { + return new MvSliceIntEvaluator(source, field.get(context), start.get(context), end.get(context), context); + } + + @Override + public String toString() { + return "MvSliceIntEvaluator[" + "field=" + field + ", start=" + start + ", end=" + end + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceLongEvaluator.java new file mode 100644 index 0000000000000..d6a1e7e45cabf --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceLongEvaluator.java @@ -0,0 +1,140 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.expression.function.Warnings; +import org.elasticsearch.xpack.ql.InvalidArgumentException; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link MvSlice}. + * This class is generated. Do not edit it. + */ +public final class MvSliceLongEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator field; + + private final EvalOperator.ExpressionEvaluator start; + + private final EvalOperator.ExpressionEvaluator end; + + private final DriverContext driverContext; + + public MvSliceLongEvaluator(Source source, EvalOperator.ExpressionEvaluator field, + EvalOperator.ExpressionEvaluator start, EvalOperator.ExpressionEvaluator end, + DriverContext driverContext) { + this.warnings = new Warnings(source); + this.field = field; + this.start = start; + this.end = end; + this.driverContext = driverContext; + } + + @Override + public Block eval(Page page) { + try (LongBlock fieldBlock = (LongBlock) field.eval(page)) { + try (IntBlock startBlock = (IntBlock) start.eval(page)) { + try (IntBlock endBlock = (IntBlock) end.eval(page)) { + return eval(page.getPositionCount(), fieldBlock, startBlock, endBlock); + } + } + } + } + + public LongBlock eval(int positionCount, LongBlock fieldBlock, IntBlock startBlock, + IntBlock endBlock) { + try(LongBlock.Builder result = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { + position: for (int p = 0; p < positionCount; p++) { + boolean allBlocksAreNulls = true; + if (!fieldBlock.isNull(p)) { + allBlocksAreNulls = false; + } + if (startBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (startBlock.getValueCount(p) != 1) { + if (startBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (endBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (endBlock.getValueCount(p) != 1) { + if (endBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (allBlocksAreNulls) { + result.appendNull(); + continue position; + } + try { + MvSlice.process(result, p, fieldBlock, startBlock.getInt(startBlock.getFirstValueIndex(p)), endBlock.getInt(endBlock.getFirstValueIndex(p))); + } catch (InvalidArgumentException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + @Override + public String toString() { + return "MvSliceLongEvaluator[" + "field=" + field + ", start=" + start + ", end=" + end + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(field, start, end); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + private final EvalOperator.ExpressionEvaluator.Factory start; + + private final EvalOperator.ExpressionEvaluator.Factory end; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory field, + EvalOperator.ExpressionEvaluator.Factory start, + EvalOperator.ExpressionEvaluator.Factory end) { + this.source = source; + this.field = field; + this.start = start; + this.end = end; + } + + @Override + public MvSliceLongEvaluator get(DriverContext context) { + return new MvSliceLongEvaluator(source, field.get(context), start.get(context), end.get(context), context); + } + + @Override + public String toString() { + return "MvSliceLongEvaluator[" + "field=" + field + ", start=" + start + ", end=" + end + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZipEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZipEvaluator.java new file mode 100644 index 0000000000000..b53a1c8f9b3c0 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZipEvaluator.java @@ -0,0 +1,127 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.expression.function.Warnings; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link MvZip}. + * This class is generated. Do not edit it. + */ +public final class MvZipEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator leftField; + + private final EvalOperator.ExpressionEvaluator rightField; + + private final EvalOperator.ExpressionEvaluator delim; + + private final DriverContext driverContext; + + public MvZipEvaluator(Source source, EvalOperator.ExpressionEvaluator leftField, + EvalOperator.ExpressionEvaluator rightField, EvalOperator.ExpressionEvaluator delim, + DriverContext driverContext) { + this.warnings = new Warnings(source); + this.leftField = leftField; + this.rightField = rightField; + this.delim = delim; + this.driverContext = driverContext; + } + + @Override + public Block eval(Page page) { + try (BytesRefBlock leftFieldBlock = (BytesRefBlock) leftField.eval(page)) { + try (BytesRefBlock rightFieldBlock = (BytesRefBlock) rightField.eval(page)) { + try (BytesRefBlock delimBlock = (BytesRefBlock) delim.eval(page)) { + return eval(page.getPositionCount(), leftFieldBlock, rightFieldBlock, delimBlock); + } + } + } + } + + public BytesRefBlock eval(int positionCount, BytesRefBlock leftFieldBlock, + BytesRefBlock rightFieldBlock, BytesRefBlock delimBlock) { + try(BytesRefBlock.Builder result = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + BytesRef delimScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + boolean allBlocksAreNulls = true; + if (!leftFieldBlock.isNull(p)) { + allBlocksAreNulls = false; + } + if (!rightFieldBlock.isNull(p)) { + allBlocksAreNulls = false; + } + if (delimBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (delimBlock.getValueCount(p) != 1) { + if (delimBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (allBlocksAreNulls) { + result.appendNull(); + continue position; + } + MvZip.process(result, p, leftFieldBlock, rightFieldBlock, delimBlock.getBytesRef(delimBlock.getFirstValueIndex(p), delimScratch)); + } + return result.build(); + } + } + + @Override + public String toString() { + return "MvZipEvaluator[" + "leftField=" + leftField + ", rightField=" + rightField + ", delim=" + delim + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(leftField, rightField, delim); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory leftField; + + private final EvalOperator.ExpressionEvaluator.Factory rightField; + + private final EvalOperator.ExpressionEvaluator.Factory delim; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory leftField, + EvalOperator.ExpressionEvaluator.Factory rightField, + EvalOperator.ExpressionEvaluator.Factory delim) { + this.source = source; + this.leftField = leftField; + this.rightField = rightField; + this.delim = delim; + } + + @Override + public MvZipEvaluator get(DriverContext context) { + return new MvZipEvaluator(source, leftField.get(context), rightField.get(context), delim.get(context), context); + } + + @Override + public String toString() { + return "MvZipEvaluator[" + "leftField=" + leftField + ", rightField=" + rightField + ", delim=" + delim + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXFromWKBEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXFromWKBEvaluator.java new file mode 100644 index 0000000000000..937eedc1d8fe0 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXFromWKBEvaluator.java @@ -0,0 +1,127 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.Vector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.AbstractConvertFunction; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link StX}. + * This class is generated. Do not edit it. + */ +public final class StXFromWKBEvaluator extends AbstractConvertFunction.AbstractEvaluator { + public StXFromWKBEvaluator(EvalOperator.ExpressionEvaluator field, Source source, + DriverContext driverContext) { + super(driverContext, field, source); + } + + @Override + public String name() { + return "StXFromWKB"; + } + + @Override + public Block evalVector(Vector v) { + BytesRefVector vector = (BytesRefVector) v; + int positionCount = v.getPositionCount(); + BytesRef scratchPad = new BytesRef(); + if (vector.isConstant()) { + try { + return driverContext.blockFactory().newConstantDoubleBlockWith(evalValue(vector, 0, scratchPad), positionCount); + } catch (IllegalArgumentException e) { + registerException(e); + return driverContext.blockFactory().newConstantNullBlock(positionCount); + } + } + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + try { + builder.appendDouble(evalValue(vector, p, scratchPad)); + } catch (IllegalArgumentException e) { + registerException(e); + builder.appendNull(); + } + } + return builder.build(); + } + } + + private static double evalValue(BytesRefVector container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return StX.fromWellKnownBinary(value); + } + + @Override + public Block evalBlock(Block b) { + BytesRefBlock block = (BytesRefBlock) b; + int positionCount = block.getPositionCount(); + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + BytesRef scratchPad = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + int valueCount = block.getValueCount(p); + int start = block.getFirstValueIndex(p); + int end = start + valueCount; + boolean positionOpened = false; + boolean valuesAppended = false; + for (int i = start; i < end; i++) { + try { + double value = evalValue(block, i, scratchPad); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; + } + builder.appendDouble(value); + valuesAppended = true; + } catch (IllegalArgumentException e) { + registerException(e); + } + } + if (valuesAppended == false) { + builder.appendNull(); + } else if (positionOpened) { + builder.endPositionEntry(); + } + } + return builder.build(); + } + } + + private static double evalValue(BytesRefBlock container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return StX.fromWellKnownBinary(value); + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field, Source source) { + this.field = field; + this.source = source; + } + + @Override + public StXFromWKBEvaluator get(DriverContext context) { + return new StXFromWKBEvaluator(field.get(context), source, context); + } + + @Override + public String toString() { + return "StXFromWKBEvaluator[field=" + field + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYFromWKBEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYFromWKBEvaluator.java new file mode 100644 index 0000000000000..33405f6db5998 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYFromWKBEvaluator.java @@ -0,0 +1,127 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.Vector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.AbstractConvertFunction; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link StY}. + * This class is generated. Do not edit it. + */ +public final class StYFromWKBEvaluator extends AbstractConvertFunction.AbstractEvaluator { + public StYFromWKBEvaluator(EvalOperator.ExpressionEvaluator field, Source source, + DriverContext driverContext) { + super(driverContext, field, source); + } + + @Override + public String name() { + return "StYFromWKB"; + } + + @Override + public Block evalVector(Vector v) { + BytesRefVector vector = (BytesRefVector) v; + int positionCount = v.getPositionCount(); + BytesRef scratchPad = new BytesRef(); + if (vector.isConstant()) { + try { + return driverContext.blockFactory().newConstantDoubleBlockWith(evalValue(vector, 0, scratchPad), positionCount); + } catch (IllegalArgumentException e) { + registerException(e); + return driverContext.blockFactory().newConstantNullBlock(positionCount); + } + } + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + try { + builder.appendDouble(evalValue(vector, p, scratchPad)); + } catch (IllegalArgumentException e) { + registerException(e); + builder.appendNull(); + } + } + return builder.build(); + } + } + + private static double evalValue(BytesRefVector container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return StY.fromWellKnownBinary(value); + } + + @Override + public Block evalBlock(Block b) { + BytesRefBlock block = (BytesRefBlock) b; + int positionCount = block.getPositionCount(); + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + BytesRef scratchPad = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + int valueCount = block.getValueCount(p); + int start = block.getFirstValueIndex(p); + int end = start + valueCount; + boolean positionOpened = false; + boolean valuesAppended = false; + for (int i = start; i < end; i++) { + try { + double value = evalValue(block, i, scratchPad); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; + } + builder.appendDouble(value); + valuesAppended = true; + } catch (IllegalArgumentException e) { + registerException(e); + } + } + if (valuesAppended == false) { + builder.appendNull(); + } else if (positionOpened) { + builder.endPositionEntry(); + } + } + return builder.build(); + } + } + + private static double evalValue(BytesRefBlock container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return StY.fromWellKnownBinary(value); + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field, Source source) { + this.field = field; + this.source = source; + } + + @Override + public StYFromWKBEvaluator get(DriverContext context) { + return new StYFromWKBEvaluator(field.get(context), source, context); + } + + @Override + public String toString() { + return "StYFromWKBEvaluator[field=" + field + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupOperator.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupOperator.java index 844cfde286072..2d433f0732064 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupOperator.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupOperator.java @@ -8,15 +8,21 @@ package org.elasticsearch.xpack.esql.enrich; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.AsyncOperator; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xpack.ql.expression.NamedExpression; +import java.io.IOException; import java.util.List; +import java.util.Objects; public final class EnrichLookupOperator extends AsyncOperator { private final EnrichLookupService enrichLookupService; @@ -27,6 +33,7 @@ public final class EnrichLookupOperator extends AsyncOperator { private final String matchType; private final String matchField; private final List enrichFields; + private long totalTerms = 0L; public record Factory( String sessionId, @@ -95,6 +102,7 @@ public EnrichLookupOperator( @Override protected void performAsync(Page inputPage, ActionListener listener) { final Block inputBlock = inputPage.getBlock(inputChannel); + totalTerms += inputBlock.getTotalValueCount(); enrichLookupService.lookupAsync( sessionId, parentTask, @@ -107,9 +115,83 @@ protected void performAsync(Page inputPage, ActionListener listener) { ); } + @Override + public String toString() { + return "EnrichOperator[index=" + + enrichIndex + + " match_field=" + + matchField + + " enrich_fields=" + + enrichFields + + " inputChannel=" + + inputChannel + + "]"; + } + @Override protected void doClose() { // TODO: Maybe create a sub-task as the parent task of all the lookup tasks // then cancel it when this operator terminates early (e.g., have enough result). } + + @Override + protected Operator.Status status(long receivedPages, long completedPages, long totalTimeInMillis) { + return new EnrichLookupOperator.Status(receivedPages, completedPages, totalTimeInMillis, totalTerms); + } + + public static class Status extends AsyncOperator.Status { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Operator.Status.class, + "enrich", + Status::new + ); + + final long totalTerms; + + Status(long receivedPages, long completedPages, long totalTimeInMillis, long totalTerms) { + super(receivedPages, completedPages, totalTimeInMillis); + this.totalTerms = totalTerms; + } + + Status(StreamInput in) throws IOException { + super(in); + this.totalTerms = in.readVLong(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVLong(totalTerms); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + innerToXContent(builder); + builder.field("total_terms", totalTerms); + return builder.endObject(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass() || super.equals(o) == false) { + return false; + } + Status status = (Status) o; + return totalTerms == status.totalTerms; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), totalTerms); + } + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java index c0a3149dafb4c..b935632874157 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java @@ -52,6 +52,7 @@ import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestHandler; @@ -128,13 +129,13 @@ public EnrichLookupService( this.clusterService = clusterService; this.searchService = searchService; this.transportService = transportService; - this.executor = transportService.getThreadPool().executor(EsqlPlugin.ESQL_THREAD_POOL_NAME); + this.executor = transportService.getThreadPool().executor(ThreadPool.Names.SEARCH); this.bigArrays = bigArrays; this.blockFactory = blockFactory; this.localBreakerSettings = new LocalCircuitBreaker.SizeSettings(clusterService.getSettings()); transportService.registerRequestHandler( LOOKUP_ACTION_NAME, - this.executor, + transportService.getThreadPool().executor(EsqlPlugin.ESQL_WORKER_THREAD_POOL_NAME), in -> new LookupRequest(in, blockFactory), new TransportHandler() ); @@ -332,6 +333,8 @@ private void doLookup( OutputOperator outputOperator = new OutputOperator(List.of(), Function.identity(), result::set); Driver driver = new Driver( "enrich-lookup:" + sessionId, + System.currentTimeMillis(), + System.nanoTime(), driverContext, () -> lookupDescription(sessionId, shardId, matchType, matchField, extractFields, inputPage.getPositionCount()), queryOperator, diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java index 113f8b95ca089..d4f6ea3e510c7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java @@ -37,7 +37,6 @@ import org.elasticsearch.xpack.core.enrich.EnrichPolicy; import org.elasticsearch.xpack.esql.analysis.EnrichResolution; import org.elasticsearch.xpack.esql.plan.logical.Enrich; -import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; import org.elasticsearch.xpack.esql.session.EsqlSession; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.index.EsIndex; @@ -80,7 +79,7 @@ public EnrichPolicyResolver(ClusterService clusterService, TransportService tran this.threadPool = transportService.getThreadPool(); transportService.registerRequestHandler( RESOLVE_ACTION_NAME, - threadPool.executor(EsqlPlugin.ESQL_THREAD_POOL_NAME), + threadPool.executor(ThreadPool.Names.SEARCH), LookupRequest::new, new RequestHandler() ); @@ -272,7 +271,7 @@ private void lookupPolicies( new ActionListenerResponseHandler<>( refs.acquire(resp -> lookupResponses.put(cluster, resp)), LookupResponse::new, - threadPool.executor(EsqlPlugin.ESQL_THREAD_POOL_NAME) + threadPool.executor(ThreadPool.Names.SEARCH) ) ); } @@ -290,7 +289,7 @@ private void lookupPolicies( new ActionListenerResponseHandler<>( refs.acquire(resp -> lookupResponses.put(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, resp)), LookupResponse::new, - threadPool.executor(EsqlPlugin.ESQL_THREAD_POOL_NAME) + threadPool.executor(ThreadPool.Names.SEARCH) ) ); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/execution/PlanExecutor.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/execution/PlanExecutor.java index e90510461551f..a07c963dc0844 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/execution/PlanExecutor.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/execution/PlanExecutor.java @@ -18,6 +18,7 @@ import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.esql.planner.Mapper; import org.elasticsearch.xpack.esql.session.EsqlConfiguration; +import org.elasticsearch.xpack.esql.session.EsqlIndexResolver; import org.elasticsearch.xpack.esql.session.EsqlSession; import org.elasticsearch.xpack.esql.stats.Metrics; import org.elasticsearch.xpack.esql.stats.QueryMetric; @@ -29,14 +30,16 @@ public class PlanExecutor { private final IndexResolver indexResolver; + private final EsqlIndexResolver esqlIndexResolver; private final PreAnalyzer preAnalyzer; private final FunctionRegistry functionRegistry; private final Mapper mapper; private final Metrics metrics; private final Verifier verifier; - public PlanExecutor(IndexResolver indexResolver) { + public PlanExecutor(IndexResolver indexResolver, EsqlIndexResolver esqlIndexResolver) { this.indexResolver = indexResolver; + this.esqlIndexResolver = esqlIndexResolver; this.preAnalyzer = new PreAnalyzer(); this.functionRegistry = new EsqlFunctionRegistry(); this.mapper = new Mapper(functionRegistry); @@ -55,6 +58,7 @@ public void esql( sessionId, cfg, indexResolver, + esqlIndexResolver, enrichPolicyResolver, preAnalyzer, functionRegistry, diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java index 395a9ed16dc67..b577b8a68cd54 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java @@ -73,8 +73,12 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMax; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMedian; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMin; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvSlice; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvSum; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvZip; import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StX; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StY; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Concat; import org.elasticsearch.xpack.esql.expression.function.scalar.string.EndsWith; import org.elasticsearch.xpack.esql.expression.function.scalar.string.LTrim; @@ -174,6 +178,8 @@ private FunctionDefinition[][] functions() { def(Now.class, Now::new, "now") }, // spatial new FunctionDefinition[] { def(SpatialCentroid.class, SpatialCentroid::new, "st_centroid") }, + new FunctionDefinition[] { def(StX.class, StX::new, "st_x") }, + new FunctionDefinition[] { def(StY.class, StY::new, "st_y") }, // conditional new FunctionDefinition[] { def(Case.class, Case::new, "case") }, // null @@ -208,6 +214,8 @@ private FunctionDefinition[][] functions() { def(MvMax.class, MvMax::new, "mv_max"), def(MvMedian.class, MvMedian::new, "mv_median"), def(MvMin.class, MvMin::new, "mv_min"), + def(MvSlice.class, MvSlice::new, "mv_slice"), + def(MvZip.class, MvZip::new, "mv_zip"), def(MvSum.class, MvSum::new, "mv_sum"), def(Split.class, Split::new, "split") } }; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java new file mode 100644 index 0000000000000..b7868b33102a3 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java @@ -0,0 +1,344 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.ann.Evaluator; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; +import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.planner.PlannerUtils; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import org.elasticsearch.xpack.ql.InvalidArgumentException; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.function.OptionalArgument; +import org.elasticsearch.xpack.ql.expression.function.scalar.ScalarFunction; +import org.elasticsearch.xpack.ql.expression.gen.script.ScriptTemplate; +import org.elasticsearch.xpack.ql.tree.NodeInfo; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; + +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.function.Function; + +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.FIRST; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.SECOND; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.THIRD; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isInteger; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isType; + +/** + * Returns a subset of the multivalued field using the start and end index values. + */ +public class MvSlice extends ScalarFunction implements OptionalArgument, EvaluatorMapper { + private final Expression field, start, end; + + @FunctionInfo( + returnType = { + "boolean", + "cartesian_point", + "cartesian_shape", + "date", + "double", + "geo_point", + "geo_shape", + "integer", + "ip", + "keyword", + "long", + "text", + "version" }, + description = "Returns a subset of the multivalued field using the start and end index values." + ) + public MvSlice( + Source source, + @Param( + name = "v", + type = { + "boolean", + "cartesian_point", + "cartesian_shape", + "date", + "double", + "geo_point", + "geo_shape", + "integer", + "ip", + "keyword", + "long", + "text", + "version" }, + description = "A multivalued field" + ) Expression field, + @Param(name = "start", type = { "integer" }, description = "start index") Expression start, + @Param(name = "end", type = { "integer" }, description = "end index (included)", optional = true) Expression end + ) { + super(source, end == null ? Arrays.asList(field, start, start) : Arrays.asList(field, start, end)); + this.field = field; + this.start = start; + this.end = end == null ? start : end; + } + + @Override + protected TypeResolution resolveType() { + if (childrenResolved() == false) { + return new TypeResolution("Unresolved children"); + } + + TypeResolution resolution = isType(field, EsqlDataTypes::isRepresentable, sourceText(), FIRST, "representable"); + if (resolution.unresolved()) { + return resolution; + } + + resolution = isInteger(start, sourceText(), SECOND); + if (resolution.unresolved()) { + return resolution; + } + + if (end != null) { + resolution = isInteger(end, sourceText(), THIRD); + if (resolution.unresolved()) { + return resolution; + } + } + + return resolution; + } + + @Override + public boolean foldable() { + return field.foldable() && start.foldable() && (end == null || end.foldable()); + } + + @Override + public EvalOperator.ExpressionEvaluator.Factory toEvaluator( + Function toEvaluator + ) { + if (start.foldable() && end.foldable()) { + int startOffset = Integer.parseInt(String.valueOf(start.fold())); + int endOffset = Integer.parseInt(String.valueOf(end.fold())); + checkStartEnd(startOffset, endOffset); + } + return switch (PlannerUtils.toElementType(field.dataType())) { + case BOOLEAN -> new MvSliceBooleanEvaluator.Factory( + source(), + toEvaluator.apply(field), + toEvaluator.apply(start), + toEvaluator.apply(end) + ); + case BYTES_REF -> new MvSliceBytesRefEvaluator.Factory( + source(), + toEvaluator.apply(field), + toEvaluator.apply(start), + toEvaluator.apply(end) + ); + case DOUBLE -> new MvSliceDoubleEvaluator.Factory( + source(), + toEvaluator.apply(field), + toEvaluator.apply(start), + toEvaluator.apply(end) + ); + case INT -> new MvSliceIntEvaluator.Factory( + source(), + toEvaluator.apply(field), + toEvaluator.apply(start), + toEvaluator.apply(end) + ); + case LONG -> new MvSliceLongEvaluator.Factory( + source(), + toEvaluator.apply(field), + toEvaluator.apply(start), + toEvaluator.apply(end) + ); + case NULL -> EvalOperator.CONSTANT_NULL_FACTORY; + default -> throw EsqlIllegalArgumentException.illegalDataType(field.dataType()); + }; + } + + @Override + public Object fold() { + return EvaluatorMapper.super.fold(); + } + + @Override + public Expression replaceChildren(List newChildren) { + return new MvSlice(source(), newChildren.get(0), newChildren.get(1), newChildren.size() > 2 ? newChildren.get(2) : null); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, MvSlice::new, field, start, end); + } + + @Override + public DataType dataType() { + return field.dataType(); + } + + @Override + public ScriptTemplate asScript() { + throw new UnsupportedOperationException("functions do not support scripting"); + } + + @Override + public int hashCode() { + return Objects.hash(field, start, end); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + MvSlice other = (MvSlice) obj; + return Objects.equals(other.field, field) && Objects.equals(other.start, start) && Objects.equals(other.end, end); + } + + static int adjustIndex(int oldOffset, int fieldValueCount, int first) { + return oldOffset < 0 ? oldOffset + fieldValueCount + first : oldOffset + first; + } + + static void checkStartEnd(int start, int end) throws InvalidArgumentException { + if (start > end) { + throw new InvalidArgumentException("Start offset is greater than end offset"); + } + if (start < 0 && end >= 0) { + throw new InvalidArgumentException("Start and end offset have different signs"); + } + } + + @Evaluator(extraName = "Boolean", warnExceptions = { InvalidArgumentException.class }) + static void process(BooleanBlock.Builder builder, int position, BooleanBlock field, int start, int end) { + int fieldValueCount = field.getValueCount(position); + checkStartEnd(start, end); + int first = field.getFirstValueIndex(position); + int mvStartIndex = adjustIndex(start, fieldValueCount, first); + mvStartIndex = Math.max(first, mvStartIndex); + int mvEndIndex = adjustIndex(end, fieldValueCount, first); + mvEndIndex = Math.min(fieldValueCount + first - 1, mvEndIndex); + if (mvStartIndex >= fieldValueCount + first || mvEndIndex < first) { + builder.appendNull(); + return; + } + if (mvStartIndex == mvEndIndex) { + builder.appendBoolean(field.getBoolean(mvStartIndex)); + return; + } + builder.beginPositionEntry(); + for (int i = mvStartIndex; i <= mvEndIndex; i++) { + builder.appendBoolean(field.getBoolean(i)); + } + builder.endPositionEntry(); + } + + @Evaluator(extraName = "Int", warnExceptions = { InvalidArgumentException.class }) + static void process(IntBlock.Builder builder, int position, IntBlock field, int start, int end) { + int fieldValueCount = field.getValueCount(position); + checkStartEnd(start, end); + int first = field.getFirstValueIndex(position); + int mvStartIndex = adjustIndex(start, fieldValueCount, first); + mvStartIndex = Math.max(first, mvStartIndex); + int mvEndIndex = adjustIndex(end, fieldValueCount, first); + mvEndIndex = Math.min(fieldValueCount + first - 1, mvEndIndex); + if (mvStartIndex >= fieldValueCount + first || mvEndIndex < first) { + builder.appendNull(); + return; + } + if (mvStartIndex == mvEndIndex) { + builder.appendInt(field.getInt(mvStartIndex)); + return; + } + builder.beginPositionEntry(); + for (int i = mvStartIndex; i <= mvEndIndex; i++) { + builder.appendInt(field.getInt(i)); + } + builder.endPositionEntry(); + } + + @Evaluator(extraName = "Long", warnExceptions = { InvalidArgumentException.class }) + static void process(LongBlock.Builder builder, int position, LongBlock field, int start, int end) { + int fieldValueCount = field.getValueCount(position); + checkStartEnd(start, end); + int first = field.getFirstValueIndex(position); + int mvStartIndex = adjustIndex(start, fieldValueCount, first); + mvStartIndex = Math.max(first, mvStartIndex); + int mvEndIndex = adjustIndex(end, fieldValueCount, first); + mvEndIndex = Math.min(fieldValueCount + first - 1, mvEndIndex); + if (mvStartIndex >= fieldValueCount + first || mvEndIndex < first) { + builder.appendNull(); + return; + } + if (mvStartIndex == mvEndIndex) { + builder.appendLong(field.getLong(mvStartIndex)); + return; + } + builder.beginPositionEntry(); + for (int i = mvStartIndex; i <= mvEndIndex; i++) { + builder.appendLong(field.getLong(i)); + } + builder.endPositionEntry(); + } + + @Evaluator(extraName = "Double", warnExceptions = { InvalidArgumentException.class }) + static void process(DoubleBlock.Builder builder, int position, DoubleBlock field, int start, int end) { + int fieldValueCount = field.getValueCount(position); + checkStartEnd(start, end); + int first = field.getFirstValueIndex(position); + int mvStartIndex = adjustIndex(start, fieldValueCount, first); + mvStartIndex = Math.max(first, mvStartIndex); + int mvEndIndex = adjustIndex(end, fieldValueCount, first); + mvEndIndex = Math.min(fieldValueCount + first - 1, mvEndIndex); + if (mvStartIndex >= fieldValueCount + first || mvEndIndex < first) { + builder.appendNull(); + return; + } + if (mvStartIndex == mvEndIndex) { + builder.appendDouble(field.getDouble(mvStartIndex)); + return; + } + builder.beginPositionEntry(); + for (int i = mvStartIndex; i <= mvEndIndex; i++) { + builder.appendDouble(field.getDouble(i)); + } + builder.endPositionEntry(); + } + + @Evaluator(extraName = "BytesRef", warnExceptions = { InvalidArgumentException.class }) + static void process(BytesRefBlock.Builder builder, int position, BytesRefBlock field, int start, int end) { + int fieldValueCount = field.getValueCount(position); + checkStartEnd(start, end); // append null here ? + int first = field.getFirstValueIndex(position); + int mvStartIndex = adjustIndex(start, fieldValueCount, first); + mvStartIndex = Math.max(first, mvStartIndex); + int mvEndIndex = adjustIndex(end, fieldValueCount, first); + mvEndIndex = Math.min(fieldValueCount + first - 1, mvEndIndex); + if (mvStartIndex >= fieldValueCount + first || mvEndIndex < first) { + builder.appendNull(); + return; + } + BytesRef fieldScratch = new BytesRef(); + if (mvStartIndex == mvEndIndex) { + builder.appendBytesRef(field.getBytesRef(mvStartIndex, fieldScratch)); + return; + } + builder.beginPositionEntry(); + for (int i = mvStartIndex; i <= mvEndIndex; i++) { + builder.appendBytesRef(field.getBytesRef(i, fieldScratch)); + } + builder.endPositionEntry(); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZip.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZip.java new file mode 100644 index 0000000000000..6227efeced36e --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZip.java @@ -0,0 +1,211 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefBuilder; +import org.elasticsearch.compute.ann.Evaluator; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.Literal; +import org.elasticsearch.xpack.ql.expression.function.OptionalArgument; +import org.elasticsearch.xpack.ql.expression.function.scalar.ScalarFunction; +import org.elasticsearch.xpack.ql.expression.gen.script.ScriptTemplate; +import org.elasticsearch.xpack.ql.tree.NodeInfo; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; +import org.elasticsearch.xpack.ql.type.DataTypes; + +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.function.Function; + +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.FIRST; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.SECOND; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.THIRD; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isString; + +/** + * Combines the values from two multivalued fields with a delimiter that joins them together. + */ +public class MvZip extends ScalarFunction implements OptionalArgument, EvaluatorMapper { + private final Expression mvLeft, mvRight, delim; + private static final Literal COMMA = new Literal(Source.EMPTY, ",", DataTypes.TEXT); + + @FunctionInfo( + returnType = { "keyword" }, + description = "Combines the values from two multivalued fields with a delimiter that joins them together." + ) + public MvZip( + Source source, + @Param(name = "mvLeft", type = { "keyword", "text" }, description = "A multivalued field") Expression mvLeft, + @Param(name = "mvRight", type = { "keyword", "text" }, description = "A multivalued field") Expression mvRight, + @Param(name = "delim", type = { "keyword", "text" }, description = "delimiter", optional = true) Expression delim + ) { + super(source, delim == null ? Arrays.asList(mvLeft, mvRight, COMMA) : Arrays.asList(mvLeft, mvRight, delim)); + this.mvLeft = mvLeft; + this.mvRight = mvRight; + this.delim = delim == null ? COMMA : delim; + } + + @Override + protected TypeResolution resolveType() { + if (childrenResolved() == false) { + return new TypeResolution("Unresolved children"); + } + + TypeResolution resolution = isString(mvLeft, sourceText(), FIRST); + if (resolution.unresolved()) { + return resolution; + } + + resolution = isString(mvRight, sourceText(), SECOND); + if (resolution.unresolved()) { + return resolution; + } + + if (delim != null) { + resolution = isString(delim, sourceText(), THIRD); + if (resolution.unresolved()) { + return resolution; + } + } + + return resolution; + } + + @Override + public boolean foldable() { + return mvLeft.foldable() && mvRight.foldable() && (delim == null || delim.foldable()); + } + + @Override + public EvalOperator.ExpressionEvaluator.Factory toEvaluator( + Function toEvaluator + ) { + return new MvZipEvaluator.Factory(source(), toEvaluator.apply(mvLeft), toEvaluator.apply(mvRight), toEvaluator.apply(delim)); + } + + @Override + public Object fold() { + return EvaluatorMapper.super.fold(); + } + + @Override + public Expression replaceChildren(List newChildren) { + return new MvZip(source(), newChildren.get(0), newChildren.get(1), newChildren.size() > 2 ? newChildren.get(2) : null); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, MvZip::new, mvLeft, mvRight, delim); + } + + @Override + public DataType dataType() { + return DataTypes.KEYWORD; + } + + @Override + public ScriptTemplate asScript() { + throw new UnsupportedOperationException("functions do not support scripting"); + } + + @Override + public int hashCode() { + return Objects.hash(mvLeft, mvRight, delim); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + MvZip other = (MvZip) obj; + return Objects.equals(other.mvLeft, mvLeft) && Objects.equals(other.mvRight, mvRight) && Objects.equals(other.delim, delim); + } + + private static void buildOneSide(BytesRefBlock.Builder builder, int start, int end, BytesRefBlock field, BytesRef fieldScratch) { + builder.beginPositionEntry(); + for (int i = start; i < end; i++) { + builder.appendBytesRef(field.getBytesRef(i, fieldScratch)); + } + builder.endPositionEntry(); + } + + @Evaluator + static void process(BytesRefBlock.Builder builder, int position, BytesRefBlock leftField, BytesRefBlock rightField, BytesRef delim) { + int leftFieldValueCount = leftField.getValueCount(position); + int rightFieldValueCount = rightField.getValueCount(position); + + int leftFirst = leftField.getFirstValueIndex(position); + int rightFirst = rightField.getFirstValueIndex(position); + + BytesRef fieldScratch = new BytesRef(); + + // nulls + if (leftField.isNull(position)) { + if (rightFieldValueCount == 1) { + builder.appendBytesRef(rightField.getBytesRef(rightFirst, fieldScratch)); + return; + } + buildOneSide(builder, rightFirst, rightFirst + rightFieldValueCount, rightField, fieldScratch); + return; + } + + if (rightField.isNull(position)) { + if (leftFieldValueCount == 1) { + builder.appendBytesRef(leftField.getBytesRef(leftFirst, fieldScratch)); + return; + } + buildOneSide(builder, leftFirst, leftFirst + leftFieldValueCount, leftField, fieldScratch); + return; + } + + BytesRefBuilder work = new BytesRefBuilder(); + // single value + if (leftFieldValueCount == 1 && rightFieldValueCount == 1) { + work.append(leftField.getBytesRef(leftFirst, fieldScratch)); + work.append(delim); + work.append(rightField.getBytesRef(rightFirst, fieldScratch)); + builder.appendBytesRef(work.get()); + return; + } + // multiple values + int leftIndex = 0, rightIndex = 0; + builder.beginPositionEntry(); + while (leftIndex < leftFieldValueCount && rightIndex < rightFieldValueCount) { + // concat + work.clear(); + work.append(leftField.getBytesRef(leftIndex + leftFirst, fieldScratch)); + work.append(delim); + work.append(rightField.getBytesRef(rightIndex + rightFirst, fieldScratch)); + builder.appendBytesRef(work.get()); + leftIndex++; + rightIndex++; + } + while (leftIndex < leftFieldValueCount) { + work.clear(); + work.append(leftField.getBytesRef(leftIndex + leftFirst, fieldScratch)); + builder.appendBytesRef(work.get()); + leftIndex++; + } + while (rightIndex < rightFieldValueCount) { + work.clear(); + work.append(rightField.getBytesRef(rightIndex + rightFirst, fieldScratch)); + builder.appendBytesRef(work.get()); + rightIndex++; + } + builder.endPositionEntry(); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StX.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StX.java new file mode 100644 index 0000000000000..f86be9290fed1 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StX.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.ann.ConvertEvaluator; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.TypeResolutions; +import org.elasticsearch.xpack.ql.tree.NodeInfo; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; + +import java.util.List; +import java.util.function.Function; + +import static org.elasticsearch.xpack.esql.expression.EsqlTypeResolutions.isSpatialPoint; +import static org.elasticsearch.xpack.ql.type.DataTypes.DOUBLE; +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.UNSPECIFIED; + +/** + * Extracts the x-coordinate from a point geometry. + * For cartesian geometries, the x-coordinate is the first coordinate. + * For geographic geometries, the x-coordinate is the longitude. + * The function `st_x` is defined in the OGC Simple Feature Access standard. + * Alternatively it is well described in PostGIS documentation at PostGIS:ST_X. + */ +public class StX extends UnaryScalarFunction { + @FunctionInfo(returnType = "double", description = "Extracts the x-coordinate from a point geometry.") + public StX(Source source, @Param(name = "point", type = { "geo_point", "cartesian_point" }) Expression field) { + super(source, field); + } + + @Override + protected Expression.TypeResolution resolveType() { + return isSpatialPoint(field(), sourceText(), TypeResolutions.ParamOrdinal.DEFAULT); + } + + @Override + public EvalOperator.ExpressionEvaluator.Factory toEvaluator( + Function toEvaluator + ) { + return new StXFromWKBEvaluator.Factory(toEvaluator.apply(field()), source()); + } + + @Override + public DataType dataType() { + return DOUBLE; + } + + @Override + public Expression replaceChildren(List newChildren) { + return new StX(source(), newChildren.get(0)); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StX::new, field()); + } + + @ConvertEvaluator(extraName = "FromWKB", warnExceptions = { IllegalArgumentException.class }) + static double fromWellKnownBinary(BytesRef in) { + return UNSPECIFIED.wkbAsPoint(in).getX(); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StY.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StY.java new file mode 100644 index 0000000000000..759c23c73374a --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StY.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.ann.ConvertEvaluator; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.TypeResolutions; +import org.elasticsearch.xpack.ql.tree.NodeInfo; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; + +import java.util.List; +import java.util.function.Function; + +import static org.elasticsearch.xpack.esql.expression.EsqlTypeResolutions.isSpatialPoint; +import static org.elasticsearch.xpack.ql.type.DataTypes.DOUBLE; +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.UNSPECIFIED; + +/** + * Extracts the y-coordinate from a point geometry. + * For cartesian geometries, the y-coordinate is the second coordinate. + * For geographic geometries, the y-coordinate is the latitude. + * The function `st_y` is defined in the OGC Simple Feature Access standard. + * Alternatively it is well described in PostGIS documentation at PostGIS:ST_Y. + */ +public class StY extends UnaryScalarFunction { + @FunctionInfo(returnType = "double", description = "Extracts the y-coordinate from a point geometry.") + public StY(Source source, @Param(name = "point", type = { "geo_point", "cartesian_point" }) Expression field) { + super(source, field); + } + + @Override + protected TypeResolution resolveType() { + return isSpatialPoint(field(), sourceText(), TypeResolutions.ParamOrdinal.DEFAULT); + } + + @Override + public EvalOperator.ExpressionEvaluator.Factory toEvaluator( + Function toEvaluator + ) { + return new StYFromWKBEvaluator.Factory(toEvaluator.apply(field()), source()); + } + + @Override + public DataType dataType() { + return DOUBLE; + } + + @Override + public Expression replaceChildren(List newChildren) { + return new StY(source(), newChildren.get(0)); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StY::new, field()); + } + + @ConvertEvaluator(extraName = "FromWKB", warnExceptions = { IllegalArgumentException.class }) + static double fromWellKnownBinary(BytesRef in) { + return UNSPECIFIED.wkbAsPoint(in).getY(); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java index 95892ac42e587..384bfd164b0a7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java @@ -97,8 +97,12 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMax; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMedian; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMin; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvSlice; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvSum; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvZip; import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StX; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StY; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Concat; import org.elasticsearch.xpack.esql.expression.function.scalar.string.EndsWith; import org.elasticsearch.xpack.esql.expression.function.scalar.string.LTrim; @@ -340,6 +344,8 @@ public static List namedTypeEntries() { of(ESQL_UNARY_SCLR_CLS, Sin.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar), of(ESQL_UNARY_SCLR_CLS, Sinh.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar), of(ESQL_UNARY_SCLR_CLS, Sqrt.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar), + of(ESQL_UNARY_SCLR_CLS, StX.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar), + of(ESQL_UNARY_SCLR_CLS, StY.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar), of(ESQL_UNARY_SCLR_CLS, Tan.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar), of(ESQL_UNARY_SCLR_CLS, Tanh.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar), of(ESQL_UNARY_SCLR_CLS, ToBoolean.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar), @@ -415,7 +421,9 @@ public static List namedTypeEntries() { of(ScalarFunction.class, MvMax.class, PlanNamedTypes::writeMvFunction, PlanNamedTypes::readMvFunction), of(ScalarFunction.class, MvMedian.class, PlanNamedTypes::writeMvFunction, PlanNamedTypes::readMvFunction), of(ScalarFunction.class, MvMin.class, PlanNamedTypes::writeMvFunction, PlanNamedTypes::readMvFunction), + of(ScalarFunction.class, MvSlice.class, PlanNamedTypes::writeMvSlice, PlanNamedTypes::readMvSlice), of(ScalarFunction.class, MvSum.class, PlanNamedTypes::writeMvFunction, PlanNamedTypes::readMvFunction), + of(ScalarFunction.class, MvZip.class, PlanNamedTypes::writeMvZip, PlanNamedTypes::readMvZip), // Expressions (other) of(Expression.class, Literal.class, PlanNamedTypes::writeLiteral, PlanNamedTypes::readLiteral), of(Expression.class, Order.class, PlanNamedTypes::writeOrder, PlanNamedTypes::readOrder) @@ -1248,6 +1256,8 @@ static void writeBinaryLogic(PlanStreamOutput out, BinaryLogic binaryLogic) thro entry(name(Sin.class), Sin::new), entry(name(Sinh.class), Sinh::new), entry(name(Sqrt.class), Sqrt::new), + entry(name(StX.class), StX::new), + entry(name(StY.class), StY::new), entry(name(Tan.class), Tan::new), entry(name(Tanh.class), Tanh::new), entry(name(ToBoolean.class), ToBoolean::new), @@ -1825,4 +1835,30 @@ static void writeLog(PlanStreamOutput out, Log log) throws IOException { out.writeExpression(fields.get(0)); out.writeOptionalWriteable(fields.size() == 2 ? o -> out.writeExpression(fields.get(1)) : null); } + + static MvSlice readMvSlice(PlanStreamInput in) throws IOException { + return new MvSlice(in.readSource(), in.readExpression(), in.readExpression(), in.readOptionalNamed(Expression.class)); + } + + static void writeMvSlice(PlanStreamOutput out, MvSlice fn) throws IOException { + out.writeNoSource(); + List fields = fn.children(); + assert fields.size() == 2 || fields.size() == 3; + out.writeExpression(fields.get(0)); + out.writeExpression(fields.get(1)); + out.writeOptionalWriteable(fields.size() == 3 ? o -> out.writeExpression(fields.get(2)) : null); + } + + static MvZip readMvZip(PlanStreamInput in) throws IOException { + return new MvZip(in.readSource(), in.readExpression(), in.readExpression(), in.readOptionalNamed(Expression.class)); + } + + static void writeMvZip(PlanStreamOutput out, MvZip fn) throws IOException { + out.writeNoSource(); + List fields = fn.children(); + assert fields.size() == 2 || fields.size() == 3; + out.writeExpression(fields.get(0)); + out.writeExpression(fields.get(1)); + out.writeOptionalWriteable(fields.size() == 3 ? o -> out.writeExpression(fields.get(2)) : null); + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java index 279ce3185d4aa..546f34d1b474c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java @@ -23,6 +23,7 @@ import org.elasticsearch.xpack.esql.plan.physical.EsSourceExec; import org.elasticsearch.xpack.esql.plan.physical.EsStatsQueryExec; import org.elasticsearch.xpack.esql.plan.physical.EsStatsQueryExec.Stat; +import org.elasticsearch.xpack.esql.plan.physical.EsTimeseriesQueryExec; import org.elasticsearch.xpack.esql.plan.physical.ExchangeExec; import org.elasticsearch.xpack.esql.plan.physical.FieldExtractExec; import org.elasticsearch.xpack.esql.plan.physical.FilterExec; @@ -83,9 +84,11 @@ public class LocalPhysicalPlanOptimizer extends ParameterizedRuleExecutor> rules(boolean optimizeForEsSource) { List> esSourceRules = new ArrayList<>(4); - esSourceRules.add(new ReplaceAttributeSourceWithDocId()); + esSourceRules.add(new ReplaceAttributeSourceWithDocId(timeSeriesMode)); if (optimizeForEsSource) { esSourceRules.add(new PushTopNToSource()); @@ -127,13 +130,20 @@ protected List> batches() { private static class ReplaceAttributeSourceWithDocId extends OptimizerRule { - ReplaceAttributeSourceWithDocId() { + private final boolean timeSeriesMode; + + ReplaceAttributeSourceWithDocId(boolean timeSeriesMode) { super(UP); + this.timeSeriesMode = timeSeriesMode; } @Override protected PhysicalPlan rule(EsSourceExec plan) { - return new EsQueryExec(plan.source(), plan.index(), plan.query()); + if (timeSeriesMode) { + return new EsTimeseriesQueryExec(plan.source(), plan.index(), plan.query()); + } else { + return new EsQueryExec(plan.source(), plan.index(), plan.query()); + } } } @@ -249,6 +259,11 @@ public static boolean canPushToSource(Expression exp, Predicate return canPushToSource(not.field(), hasIdenticalDelegate); } else if (exp instanceof UnaryScalarFunction usf) { if (usf instanceof RegexMatch || usf instanceof IsNull || usf instanceof IsNotNull) { + if (usf instanceof IsNull || usf instanceof IsNotNull) { + if (usf.field() instanceof FieldAttribute fa && fa.dataType().equals(DataTypes.TEXT)) { + return true; + } + } return isAttributePushable(usf.field(), usf, hasIdenticalDelegate); } } else if (exp instanceof CIDRMatch cidrMatch) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java index ab413bd89f0a6..7a5e39fea8f95 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java @@ -17,6 +17,7 @@ import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; import org.elasticsearch.xpack.esql.expression.SurrogateExpression; import org.elasticsearch.xpack.esql.expression.function.aggregate.Count; +import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.Eval; @@ -44,7 +45,6 @@ import org.elasticsearch.xpack.ql.expression.predicate.logical.Or; import org.elasticsearch.xpack.ql.expression.predicate.regex.RegexMatch; import org.elasticsearch.xpack.ql.optimizer.OptimizerRules; -import org.elasticsearch.xpack.ql.optimizer.OptimizerRules.BinaryComparisonSimplification; import org.elasticsearch.xpack.ql.optimizer.OptimizerRules.BooleanFunctionEqualsElimination; import org.elasticsearch.xpack.ql.optimizer.OptimizerRules.ConstantFolding; import org.elasticsearch.xpack.ql.optimizer.OptimizerRules.LiteralsOnTheRight; @@ -72,6 +72,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -81,9 +82,7 @@ import static java.util.Collections.singleton; import static org.elasticsearch.xpack.esql.expression.NamedExpressions.mergeOutputExpressions; import static org.elasticsearch.xpack.ql.expression.Expressions.asAttributes; -import static org.elasticsearch.xpack.ql.optimizer.OptimizerRules.FoldNull; import static org.elasticsearch.xpack.ql.optimizer.OptimizerRules.PropagateEquals; -import static org.elasticsearch.xpack.ql.optimizer.OptimizerRules.PropagateNullable; import static org.elasticsearch.xpack.ql.optimizer.OptimizerRules.TransformDirection; public class LogicalPlanOptimizer extends ParameterizedRuleExecutor { @@ -119,12 +118,11 @@ protected static Batch operators() { new ConvertStringToByteRef(), new FoldNull(), new SplitInWithFoldableValue(), - new ConstantFolding(), new PropagateEvalFoldables(), + new ConstantFolding(), // boolean new BooleanSimplification(), new LiteralsOnTheRight(), - new BinaryComparisonSimplification(), // needs to occur before BinaryComparison combinations (see class) new PropagateEquals(), new PropagateNullable(), @@ -259,7 +257,11 @@ protected LogicalPlan rule(Aggregate aggregate) { static String temporaryName(Expression inner, Expression outer, int suffix) { String in = toString(inner); String out = toString(outer); - return "$$" + in + "$" + out + "$" + suffix; + return rawTemporaryName(in, out, String.valueOf(suffix)); + } + + static String rawTemporaryName(String inner, String outer, String suffix) { + return "$$" + inner + "$" + outer + "$" + suffix; } static int TO_STRING_LIMIT = 16; @@ -839,9 +841,31 @@ private static LogicalPlan maybePushDownPastUnary(Filter filter, UnaryPlan unary } } + protected static class PushDownEval extends OptimizerRules.OptimizerRule { + @Override + protected LogicalPlan rule(Eval eval) { + return pushGeneratingPlanPastProjectAndOrderBy(eval, asAttributes(eval.fields())); + } + } + + protected static class PushDownRegexExtract extends OptimizerRules.OptimizerRule { + @Override + protected LogicalPlan rule(RegexExtract re) { + return pushGeneratingPlanPastProjectAndOrderBy(re, re.extractedFields()); + } + } + + protected static class PushDownEnrich extends OptimizerRules.OptimizerRule { + @Override + protected LogicalPlan rule(Enrich en) { + return pushGeneratingPlanPastProjectAndOrderBy(en, asAttributes(en.enrichFields())); + } + } + /** - * Pushes Evals past OrderBys. Although it seems arbitrary whether the OrderBy or the Eval is executed first, - * this transformation ensures that OrderBys only separated by an eval can be combined by PushDownAndCombineOrderBy. + * Pushes LogicalPlans which generate new attributes (Eval, Grok/Dissect, Enrich), past OrderBys and Projections. + * Although it seems arbitrary whether the OrderBy or the Eval is executed first, this transformation ensures that OrderBys only + * separated by an eval can be combined by PushDownAndCombineOrderBy. * * E.g.: * @@ -851,59 +875,82 @@ private static LogicalPlan maybePushDownPastUnary(Filter filter, UnaryPlan unary * * ... | eval x = b + 1 | sort a | sort x * - * Ordering the evals before the orderBys has the advantage that it's always possible to order the plans like this. + * Ordering the Evals before the OrderBys has the advantage that it's always possible to order the plans like this. * E.g., in the example above it would not be possible to put the eval after the two orderBys. + * + * In case one of the Eval's fields would shadow the orderBy's attributes, we rename the attribute first. + * + * E.g. + * + * ... | sort a | eval a = b + 1 | ... + * + * becomes + * + * ... | eval $$a = a | eval a = b + 1 | sort $$a | drop $$a */ - protected static class PushDownEval extends OptimizerRules.OptimizerRule { - @Override - protected LogicalPlan rule(Eval eval) { - LogicalPlan child = eval.child(); + private static LogicalPlan pushGeneratingPlanPastProjectAndOrderBy(UnaryPlan generatingPlan, List generatedAttributes) { + LogicalPlan child = generatingPlan.child(); - if (child instanceof OrderBy orderBy) { - return orderBy.replaceChild(eval.replaceChild(orderBy.child())); - } else if (child instanceof Project) { - var projectWithEvalChild = pushDownPastProject(eval); - var fieldProjections = asAttributes(eval.fields()); - return projectWithEvalChild.withProjections(mergeOutputExpressions(fieldProjections, projectWithEvalChild.projections())); - } + if (child instanceof OrderBy orderBy) { + Set evalFieldNames = new LinkedHashSet<>(Expressions.names(generatedAttributes)); - return eval; - } - } + // Look for attributes in the OrderBy's expressions and create aliases with temporary names for them. + AttributeReplacement nonShadowedOrders = renameAttributesInExpressions(evalFieldNames, orderBy.order()); - // same as for PushDownEval - protected static class PushDownRegexExtract extends OptimizerRules.OptimizerRule { - @Override - protected LogicalPlan rule(RegexExtract re) { - LogicalPlan child = re.child(); + AttributeMap aliasesForShadowedOrderByAttrs = nonShadowedOrders.replacedAttributes; + @SuppressWarnings("unchecked") + List newOrder = (List) (List) nonShadowedOrders.rewrittenExpressions; - if (child instanceof OrderBy orderBy) { - return orderBy.replaceChild(re.replaceChild(orderBy.child())); - } else if (child instanceof Project) { - var projectWithChild = pushDownPastProject(re); - return projectWithChild.withProjections(mergeOutputExpressions(re.extractedFields(), projectWithChild.projections())); + if (aliasesForShadowedOrderByAttrs.isEmpty() == false) { + List newAliases = new ArrayList<>(aliasesForShadowedOrderByAttrs.values()); + + LogicalPlan plan = new Eval(orderBy.source(), orderBy.child(), newAliases); + plan = generatingPlan.replaceChild(plan); + plan = new OrderBy(orderBy.source(), plan, newOrder); + plan = new Project(generatingPlan.source(), plan, generatingPlan.output()); + + return plan; } - return re; + return orderBy.replaceChild(generatingPlan.replaceChild(orderBy.child())); + } else if (child instanceof Project) { + var projectWithEvalChild = pushDownPastProject(generatingPlan); + return projectWithEvalChild.withProjections(mergeOutputExpressions(generatedAttributes, projectWithEvalChild.projections())); } + + return generatingPlan; } - // TODO double-check: this should be the same as EVAL and GROK/DISSECT, needed to avoid unbounded sort - protected static class PushDownEnrich extends OptimizerRules.OptimizerRule { - @Override - protected LogicalPlan rule(Enrich re) { - LogicalPlan child = re.child(); + private record AttributeReplacement(List rewrittenExpressions, AttributeMap replacedAttributes) {}; - if (child instanceof OrderBy orderBy) { - return orderBy.replaceChild(re.replaceChild(orderBy.child())); - } else if (child instanceof Project) { - var projectWithChild = pushDownPastProject(re); - var attrs = asAttributes(re.enrichFields()); - return projectWithChild.withProjections(mergeOutputExpressions(attrs, projectWithChild.projections())); - } + /** + * Replace attributes in the given expressions by assigning them temporary names. + * Returns the rewritten expressions and a map with an alias for each replaced attribute; the rewritten expressions reference + * these aliases. + */ + private static AttributeReplacement renameAttributesInExpressions( + Set attributeNamesToRename, + List expressions + ) { + AttributeMap aliasesForReplacedAttributes = new AttributeMap<>(); + List rewrittenExpressions = new ArrayList<>(); + + for (Expression expr : expressions) { + rewrittenExpressions.add(expr.transformUp(Attribute.class, attr -> { + if (attributeNamesToRename.contains(attr.name())) { + Alias renamedAttribute = aliasesForReplacedAttributes.computeIfAbsent(attr, a -> { + String tempName = SubstituteSurrogates.rawTemporaryName(a.name(), "temp_name", a.id().toString()); + // TODO: this should be synthetic + return new Alias(a.source(), tempName, null, a, null, false); + }); + return renamedAttribute.toAttribute(); + } - return re; + return attr; + })); } + + return new AttributeReplacement(rewrittenExpressions, aliasesForReplacedAttributes); } protected static class PushDownAndCombineOrderBy extends OptimizerRules.OptimizerRule { @@ -1537,4 +1584,26 @@ private static LogicalPlan normalize(Aggregate aggregate, AttributeMap newChildren = new ArrayList<>(exp.children()); + newChildren.removeIf(e -> e.semanticEquals(nullExp)); + if (newChildren.size() != exp.children().size() && newChildren.size() > 0) { // coalesce needs at least one input + return exp.replaceChildren(newChildren); + } + } + return Literal.of(exp, null); + } + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java index a1064b5b7d6bc..b9018f56e60de 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java @@ -28,7 +28,7 @@ import org.elasticsearch.xpack.esql.plan.physical.RegexExtractExec; import org.elasticsearch.xpack.esql.plan.physical.RowExec; import org.elasticsearch.xpack.esql.plan.physical.ShowExec; -import org.elasticsearch.xpack.ql.common.Failure; +import org.elasticsearch.xpack.ql.common.Failures; import org.elasticsearch.xpack.ql.expression.AttributeSet; import org.elasticsearch.xpack.ql.expression.Expressions; import org.elasticsearch.xpack.ql.plan.QueryPlan; @@ -36,8 +36,6 @@ import org.elasticsearch.xpack.ql.plan.logical.EsRelation; import org.elasticsearch.xpack.ql.plan.logical.LogicalPlan; -import java.util.Collection; - import static org.elasticsearch.xpack.ql.common.Failure.fail; class OptimizerRules { @@ -46,7 +44,7 @@ private OptimizerRules() {} static class DependencyConsistency

    > { - void checkPlan(P p, Collection failures) { + void checkPlan(P p, Failures failures) { AttributeSet refs = references(p); AttributeSet input = p.inputSet(); AttributeSet generated = generates(p); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EsQueryExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EsQueryExec.java index 9add95c28f433..779df60416f0b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EsQueryExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EsQueryExec.java @@ -105,6 +105,10 @@ public List sorts() { return sorts; } + public List attrs() { + return attrs; + } + /** * Estimate of the number of bytes that'll be loaded per position before * the stream of pages is consumed. @@ -128,10 +132,6 @@ public PhysicalPlan estimateRowSize(State state) { return Objects.equals(this.estimatedRowSize, size) ? this : new EsQueryExec(source(), index, attrs, query, limit, sorts, size); } - public EsQueryExec withQuery(QueryBuilder query) { - return Objects.equals(this.query, query) ? this : new EsQueryExec(source(), index, attrs, query, limit, sorts, estimatedRowSize); - } - public EsQueryExec withLimit(Expression limit) { return Objects.equals(this.limit, limit) ? this : new EsQueryExec(source(), index, attrs, query, limit, sorts, estimatedRowSize); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EsTimeseriesQueryExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EsTimeseriesQueryExec.java new file mode 100644 index 0000000000000..0d92a52e6053c --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EsTimeseriesQueryExec.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.plan.physical; + +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.ql.expression.Attribute; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.FieldAttribute; +import org.elasticsearch.xpack.ql.index.EsIndex; +import org.elasticsearch.xpack.ql.tree.NodeInfo; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataTypes; +import org.elasticsearch.xpack.ql.type.EsField; + +import java.util.List; +import java.util.Map; +import java.util.Objects; + +public class EsTimeseriesQueryExec extends EsQueryExec { + + static final EsField TSID_FIELD = new EsField("_tsid", DataTypes.KEYWORD, Map.of(), true); + static final EsField TIMESTAMP_FIELD = new EsField("@timestamp", DataTypes.DATETIME, Map.of(), true); + + public EsTimeseriesQueryExec(Source source, EsIndex index, QueryBuilder query) { + this( + source, + index, + List.of( + new FieldAttribute(source, DOC_ID_FIELD.getName(), DOC_ID_FIELD), + new FieldAttribute(source, TSID_FIELD.getName(), TSID_FIELD), + new FieldAttribute(source, TIMESTAMP_FIELD.getName(), TSID_FIELD) + ), + query, + null, + null, + null + ); + } + + public EsTimeseriesQueryExec( + Source source, + EsIndex index, + List attrs, + QueryBuilder query, + Expression limit, + List sorts, + Integer estimatedRowSize + ) { + super(source, index, attrs, query, limit, sorts, estimatedRowSize); + } + + protected NodeInfo info() { + return NodeInfo.create(this, EsTimeseriesQueryExec::new, index(), attrs(), query(), limit(), sorts(), estimatedRowSize()); + } + + @Override + public PhysicalPlan estimateRowSize(State state) { + int size; + if (sorts() == null || sorts().isEmpty()) { + // track doc ids + state.add(false, Integer.BYTES); + size = state.consumeAllFields(false); + } else { + // track doc ids and segment ids + state.add(false, Integer.BYTES * 2); + size = state.consumeAllFields(true); + } + return Objects.equals(this.estimatedRowSize(), size) + ? this + : new EsTimeseriesQueryExec(source(), index(), attrs(), query(), limit(), sorts(), size); + } + + @Override + public EsQueryExec withLimit(Expression limit) { + return Objects.equals(this.limit(), limit) + ? this + : new EsTimeseriesQueryExec(source(), index(), attrs(), query(), limit, sorts(), estimatedRowSize()); + } + + @Override + public EsQueryExec withSorts(List sorts) { + return Objects.equals(this.sorts(), sorts) + ? this + : new EsTimeseriesQueryExec(source(), index(), attrs(), query(), limit(), sorts, estimatedRowSize()); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java index 4721c7e2cf08e..234e01ed11633 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java @@ -19,6 +19,7 @@ import org.elasticsearch.compute.lucene.LuceneOperator; import org.elasticsearch.compute.lucene.LuceneSourceOperator; import org.elasticsearch.compute.lucene.LuceneTopNSourceOperator; +import org.elasticsearch.compute.lucene.TimeSeriesSortedSourceOperatorFactory; import org.elasticsearch.compute.lucene.ValuesSourceReaderOperator; import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.compute.operator.OrdinalsGroupingOperator; @@ -139,14 +140,24 @@ public final PhysicalOperation sourcePhysicalOperation(EsQueryExec esQueryExec, fieldSorts ); } else { - luceneFactory = new LuceneSourceOperator.Factory( - shardContexts, - querySupplier(esQueryExec.query()), - context.queryPragmas().dataPartitioning(), - context.queryPragmas().taskConcurrency(), - context.pageSize(rowEstimatedSize), - limit - ); + if (context.queryPragmas().timeSeriesMode()) { + luceneFactory = TimeSeriesSortedSourceOperatorFactory.create( + limit, + context.pageSize(rowEstimatedSize), + context.queryPragmas().taskConcurrency(), + shardContexts, + querySupplier(esQueryExec.query()) + ); + } else { + luceneFactory = new LuceneSourceOperator.Factory( + shardContexts, + querySupplier(esQueryExec.query()), + context.queryPragmas().dataPartitioning(), + context.queryPragmas().taskConcurrency(), + context.pageSize(rowEstimatedSize), + limit + ); + } } Layout.Builder layout = new Layout.Builder(); layout.append(esQueryExec.output()); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java index 8f4dd902a44e4..d7d2e99426a97 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java @@ -163,7 +163,6 @@ public LocalExecutionPlan plan(PhysicalPlan node) { AggregateExec.class, a -> a.getMode() == AggregateExec.Mode.FINAL ? new ProjectExec(a.source(), a, Expressions.asAttributes(a.aggregates())) : a ); - PhysicalOperation physicalOperation = plan(node, context); final TimeValue statusInterval = configuration.pragmas().statusInterval(); @@ -716,6 +715,8 @@ public Driver apply(String sessionId) { success = true; return new Driver( sessionId, + System.currentTimeMillis(), + System.nanoTime(), driverContext, physicalOperation::describe, source, diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java index b747026dcbfb1..7af37a3eeb114 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java @@ -81,7 +81,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Supplier; -import static org.elasticsearch.xpack.esql.plugin.EsqlPlugin.ESQL_THREAD_POOL_NAME; import static org.elasticsearch.xpack.esql.plugin.EsqlPlugin.ESQL_WORKER_THREAD_POOL_NAME; /** @@ -116,7 +115,7 @@ public ComputeService( this.transportService = transportService; this.bigArrays = bigArrays.withCircuitBreaking(); this.blockFactory = blockFactory; - this.esqlExecutor = threadPool.executor(ESQL_THREAD_POOL_NAME); + this.esqlExecutor = threadPool.executor(ThreadPool.Names.SEARCH); transportService.registerRequestHandler(DATA_ACTION_NAME, this.esqlExecutor, DataNodeRequest::new, new DataNodeRequestHandler()); transportService.registerRequestHandler( CLUSTER_ACTION_NAME, @@ -196,7 +195,7 @@ public void execute( final List collectedProfiles = configuration.profile() ? Collections.synchronizedList(new ArrayList<>()) : List.of(); final var exchangeSource = new ExchangeSourceHandler( queryPragmas.exchangeBufferSize(), - transportService.getThreadPool().executor(ESQL_THREAD_POOL_NAME) + transportService.getThreadPool().executor(ThreadPool.Names.SEARCH) ); try ( Releasable ignored = exchangeSource.addEmptySink(); @@ -426,7 +425,7 @@ void runCompute(CancellableTask task, ComputeContext context, PhysicalPlan plan, } ActionListener listenerCollectingStatus = listener.map(ignored -> { if (context.configuration.profile()) { - return drivers.stream().map(d -> new DriverProfile(d.status().completedOperators())).toList(); + return drivers.stream().map(Driver::profile).toList(); } return null; }); @@ -628,7 +627,7 @@ private void runBatch(int startBatchIndex) { final int endBatchIndex = Math.min(startBatchIndex + maxConcurrentShards, request.shardIds().size()); List shardIds = request.shardIds().subList(startBatchIndex, endBatchIndex); acquireSearchContexts(clusterAlias, shardIds, configuration, request.aliasFilters(), ActionListener.wrap(searchContexts -> { - assert ThreadPool.assertCurrentThreadPool(ESQL_THREAD_POOL_NAME, ESQL_WORKER_THREAD_POOL_NAME); + assert ThreadPool.assertCurrentThreadPool(ThreadPool.Names.SEARCH, ESQL_WORKER_THREAD_POOL_NAME); var computeContext = new ComputeContext(sessionId, clusterAlias, searchContexts, configuration, null, exchangeSink); runCompute( parentTask, @@ -734,7 +733,7 @@ void runComputeOnRemoteCluster( final String localSessionId = clusterAlias + ":" + globalSessionId; var exchangeSource = new ExchangeSourceHandler( configuration.pragmas().exchangeBufferSize(), - transportService.getThreadPool().executor(ESQL_THREAD_POOL_NAME) + transportService.getThreadPool().executor(ThreadPool.Names.SEARCH) ); try ( Releasable ignored = exchangeSource.addEmptySink(); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java index 14ebf3da2cd7e..fded9339567bd 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java @@ -25,7 +25,10 @@ import org.elasticsearch.compute.lucene.LuceneOperator; import org.elasticsearch.compute.lucene.ValuesSourceReaderOperator; import org.elasticsearch.compute.operator.AbstractPageMappingOperator; +import org.elasticsearch.compute.operator.AggregationOperator; +import org.elasticsearch.compute.operator.AsyncOperator; import org.elasticsearch.compute.operator.DriverStatus; +import org.elasticsearch.compute.operator.HashAggregationOperator; import org.elasticsearch.compute.operator.LimitOperator; import org.elasticsearch.compute.operator.MvExpandOperator; import org.elasticsearch.compute.operator.exchange.ExchangeService; @@ -50,8 +53,10 @@ import org.elasticsearch.xpack.esql.action.RestEsqlDeleteAsyncResultAction; import org.elasticsearch.xpack.esql.action.RestEsqlGetAsyncResultAction; import org.elasticsearch.xpack.esql.action.RestEsqlQueryAction; +import org.elasticsearch.xpack.esql.enrich.EnrichLookupOperator; import org.elasticsearch.xpack.esql.execution.PlanExecutor; import org.elasticsearch.xpack.esql.querydsl.query.SingleValueQuery; +import org.elasticsearch.xpack.esql.session.EsqlIndexResolver; import org.elasticsearch.xpack.esql.type.EsqlDataTypeRegistry; import org.elasticsearch.xpack.ql.index.IndexResolver; @@ -65,7 +70,6 @@ public class EsqlPlugin extends Plugin implements ActionPlugin { - public static final String ESQL_THREAD_POOL_NAME = "esql"; public static final String ESQL_WORKER_THREAD_POOL_NAME = "esql_worker"; public static final Setting QUERY_RESULT_TRUNCATION_MAX_SIZE = Setting.intSetting( @@ -104,14 +108,10 @@ public Collection createComponents(PluginServices services) { services.clusterService().getClusterName().value(), EsqlDataTypeRegistry.INSTANCE, Set::of - ) - ), - new ExchangeService( - services.clusterService().getSettings(), - services.threadPool(), - EsqlPlugin.ESQL_THREAD_POOL_NAME, - blockFactory + ), + new EsqlIndexResolver(services.client(), EsqlDataTypeRegistry.INSTANCE) ), + new ExchangeService(services.clusterService().getSettings(), services.threadPool(), ThreadPool.Names.SEARCH, blockFactory), blockFactory ); } @@ -163,31 +163,26 @@ public List getNamedWriteables() { List.of( DriverStatus.ENTRY, AbstractPageMappingOperator.Status.ENTRY, + AggregationOperator.Status.ENTRY, ExchangeSinkOperator.Status.ENTRY, ExchangeSourceOperator.Status.ENTRY, + HashAggregationOperator.Status.ENTRY, LimitOperator.Status.ENTRY, LuceneOperator.Status.ENTRY, TopNOperatorStatus.ENTRY, MvExpandOperator.Status.ENTRY, ValuesSourceReaderOperator.Status.ENTRY, - SingleValueQuery.ENTRY + SingleValueQuery.ENTRY, + AsyncOperator.Status.ENTRY, + EnrichLookupOperator.Status.ENTRY ).stream(), Block.getNamedWriteables().stream() ).toList(); } - @Override public List> getExecutorBuilders(Settings settings) { final int allocatedProcessors = EsExecutors.allocatedProcessors(settings); return List.of( - new FixedExecutorBuilder( - settings, - ESQL_THREAD_POOL_NAME, - allocatedProcessors, - 1000, - ESQL_THREAD_POOL_NAME, - EsExecutors.TaskTrackingConfig.DEFAULT - ), // TODO: Maybe have two types of threadpools for workers: one for CPU-bound and one for I/O-bound tasks. // And we should also reduce the number of threads of the CPU-bound threadpool to allocatedProcessors. new FixedExecutorBuilder( diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/QueryPragmas.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/QueryPragmas.java index 2ceee9de9001e..fd76edf46229e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/QueryPragmas.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/QueryPragmas.java @@ -41,6 +41,8 @@ public final class QueryPragmas implements Writeable { DataPartitioning.SEGMENT ); + public static final Setting TIME_SERIES_MODE = Setting.boolSetting("time_series", false); + /** * Size of a page in entries with {@code 0} being a special value asking * to adaptively size based on the number of columns in the page. @@ -128,6 +130,10 @@ public boolean isEmpty() { return settings.isEmpty(); } + public boolean timeSeriesMode() { + return TIME_SERIES_MODE.get(settings); + } + @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java index baaa4abe23b3d..366046d39dc43 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java @@ -82,7 +82,7 @@ public TransportEsqlQueryAction( super(EsqlQueryAction.NAME, transportService, actionFilters, EsqlQueryRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.planExecutor = planExecutor; this.clusterService = clusterService; - this.requestExecutor = threadPool.executor(EsqlPlugin.ESQL_THREAD_POOL_NAME); + this.requestExecutor = threadPool.executor(ThreadPool.Names.SEARCH); exchangeService.registerTransportHandler(transportService); this.exchangeService = exchangeService; this.enrichPolicyResolver = new EnrichPolicyResolver(clusterService, transportService, planExecutor.indexResolver()); @@ -124,7 +124,7 @@ protected void doExecute(Task task, EsqlQueryRequest request, ActionListener listener) { - assert ThreadPool.assertCurrentThreadPool(EsqlPlugin.ESQL_THREAD_POOL_NAME); + assert ThreadPool.assertCurrentThreadPool(ThreadPool.Names.SEARCH); if (requestIsAsync(request)) { asyncTaskManagementService.asyncExecute( request, diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlIndexResolver.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlIndexResolver.java new file mode 100644 index 0000000000000..b573de7cc3435 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlIndexResolver.java @@ -0,0 +1,252 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.esql.session; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesIndexResponse; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; +import org.elasticsearch.action.fieldcaps.IndexFieldCapabilities; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.common.Strings; +import org.elasticsearch.index.mapper.TimeSeriesParams; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.ql.index.EsIndex; +import org.elasticsearch.xpack.ql.index.IndexResolution; +import org.elasticsearch.xpack.ql.index.IndexResolver; +import org.elasticsearch.xpack.ql.type.DataType; +import org.elasticsearch.xpack.ql.type.DataTypeRegistry; +import org.elasticsearch.xpack.ql.type.DateEsField; +import org.elasticsearch.xpack.ql.type.EsField; +import org.elasticsearch.xpack.ql.type.InvalidMappedField; +import org.elasticsearch.xpack.ql.type.KeywordEsField; +import org.elasticsearch.xpack.ql.type.TextEsField; +import org.elasticsearch.xpack.ql.type.UnsupportedEsField; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.TreeSet; + +import static org.elasticsearch.xpack.ql.type.DataTypes.DATETIME; +import static org.elasticsearch.xpack.ql.type.DataTypes.KEYWORD; +import static org.elasticsearch.xpack.ql.type.DataTypes.OBJECT; +import static org.elasticsearch.xpack.ql.type.DataTypes.TEXT; +import static org.elasticsearch.xpack.ql.type.DataTypes.UNSUPPORTED; + +public class EsqlIndexResolver { + private final Client client; + private final DataTypeRegistry typeRegistry; + + public EsqlIndexResolver(Client client, DataTypeRegistry typeRegistry) { + this.client = client; + this.typeRegistry = typeRegistry; + } + + /** + * Resolves a pattern to one (potentially compound meaning that spawns multiple indices) mapping. + */ + public void resolveAsMergedMapping(String indexWildcard, Set fieldNames, ActionListener listener) { + client.fieldCaps( + createFieldCapsRequest(indexWildcard, fieldNames), + listener.delegateFailureAndWrap((l, response) -> l.onResponse(mergedMappings(indexWildcard, response))) + ); + } + + public IndexResolution mergedMappings(String indexPattern, FieldCapabilitiesResponse fieldCapsResponse) { + assert ThreadPool.assertCurrentThreadPool(ThreadPool.Names.SEARCH_COORDINATION); // too expensive to run this on a transport worker + if (fieldCapsResponse.getIndexResponses().isEmpty()) { + return IndexResolution.notFound(indexPattern); + } + + Map> fieldsCaps = collectFieldCaps(fieldCapsResponse); + + // Build hierarchical fields - it's easier to do it in sorted order so the object fields come first. + // TODO flattened is simpler - could we get away with that? + String[] names = fieldsCaps.keySet().toArray(new String[0]); + Arrays.sort(names); + Map rootFields = new HashMap<>(); + for (String name : names) { + Map fields = rootFields; + String fullName = name; + boolean isAlias = false; + UnsupportedEsField firstUnsupportedParent = null; + while (true) { + int nextDot = name.indexOf('.'); + if (nextDot < 0) { + break; + } + String parent = name.substring(0, nextDot); + EsField obj = fields.get(parent); + if (obj == null) { + obj = new EsField(parent, OBJECT, new HashMap<>(), false, true); + isAlias = true; + fields.put(parent, obj); + } else if (firstUnsupportedParent == null && obj instanceof UnsupportedEsField unsupportedParent) { + firstUnsupportedParent = unsupportedParent; + } + fields = obj.getProperties(); + name = name.substring(nextDot + 1); + } + // TODO we're careful to make isAlias match IndexResolver - but do we use it? + EsField field = firstUnsupportedParent == null + ? createField(fieldCapsResponse, name, fullName, fieldsCaps.get(fullName), isAlias) + : new UnsupportedEsField( + fullName, + firstUnsupportedParent.getOriginalType(), + firstUnsupportedParent.getName(), + new HashMap<>() + ); + fields.put(name, field); + } + + boolean allEmpty = true; + for (FieldCapabilitiesIndexResponse ir : fieldCapsResponse.getIndexResponses()) { + allEmpty &= ir.get().isEmpty(); + } + if (allEmpty) { + // If all the mappings are empty we return an empty set of resolved indices to line up with QL + return IndexResolution.valid(new EsIndex(indexPattern, rootFields, Set.of())); + } + + Set concreteIndices = new HashSet<>(fieldCapsResponse.getIndexResponses().size()); + for (FieldCapabilitiesIndexResponse ir : fieldCapsResponse.getIndexResponses()) { + concreteIndices.add(ir.getIndexName()); + } + return IndexResolution.valid(new EsIndex(indexPattern, rootFields, concreteIndices)); + } + + private static Map> collectFieldCaps(FieldCapabilitiesResponse fieldCapsResponse) { + Set seenHashes = new HashSet<>(); + Map> fieldsCaps = new HashMap<>(); + for (FieldCapabilitiesIndexResponse response : fieldCapsResponse.getIndexResponses()) { + if (seenHashes.add(response.getIndexMappingHash()) == false) { + continue; + } + for (IndexFieldCapabilities fc : response.get().values()) { + if (fc.isMetadatafield()) { + // ESQL builds the metadata fields if they are asked for without using the resolution. + continue; + } + List all = fieldsCaps.computeIfAbsent(fc.name(), (_key) -> new ArrayList<>()); + all.add(fc); + } + } + return fieldsCaps; + } + + private EsField createField( + FieldCapabilitiesResponse fieldCapsResponse, + String name, + String fullName, + List fcs, + boolean isAlias + ) { + IndexFieldCapabilities first = fcs.get(0); + List rest = fcs.subList(1, fcs.size()); + DataType type = typeRegistry.fromEs(first.type(), first.metricType()); + boolean aggregatable = first.isAggregatable(); + if (rest.isEmpty() == false) { + for (IndexFieldCapabilities fc : rest) { + if (first.metricType() != fc.metricType()) { + return conflictingMetricTypes(name, fullName, fieldCapsResponse); + } + } + for (IndexFieldCapabilities fc : rest) { + if (type != typeRegistry.fromEs(fc.type(), fc.metricType())) { + return conflictingTypes(name, fullName, fieldCapsResponse); + } + } + for (IndexFieldCapabilities fc : rest) { + aggregatable &= fc.isAggregatable(); + } + } + + // TODO I think we only care about unmapped fields if we're aggregating on them. do we even then? + + if (type == TEXT) { + return new TextEsField(name, new HashMap<>(), false, isAlias); + } + if (type == KEYWORD) { + int length = Short.MAX_VALUE; + // TODO: to check whether isSearchable/isAggregateable takes into account the presence of the normalizer + boolean normalized = false; + return new KeywordEsField(name, new HashMap<>(), aggregatable, length, normalized, isAlias); + } + if (type == DATETIME) { + return DateEsField.dateEsField(name, new HashMap<>(), aggregatable); + } + if (type == UNSUPPORTED) { + return unsupported(name, first); + } + + return new EsField(name, type, new HashMap<>(), aggregatable, isAlias); + } + + private UnsupportedEsField unsupported(String name, IndexFieldCapabilities fc) { + String originalType = fc.metricType() == TimeSeriesParams.MetricType.COUNTER ? "counter" : fc.type(); + return new UnsupportedEsField(name, originalType); + } + + private EsField conflictingTypes(String name, String fullName, FieldCapabilitiesResponse fieldCapsResponse) { + Map> typesToIndices = new TreeMap<>(); + for (FieldCapabilitiesIndexResponse ir : fieldCapsResponse.getIndexResponses()) { + IndexFieldCapabilities fc = ir.get().get(fullName); + if (fc != null) { + DataType type = typeRegistry.fromEs(fc.type(), fc.metricType()); + if (type == UNSUPPORTED) { + return unsupported(name, fc); + } + typesToIndices.computeIfAbsent(type.esType(), _key -> new TreeSet<>()).add(ir.getIndexName()); + } + } + StringBuilder errorMessage = new StringBuilder(); + errorMessage.append("mapped as ["); + errorMessage.append(typesToIndices.size()); + errorMessage.append("] incompatible types: "); + boolean first = true; + for (Map.Entry> e : typesToIndices.entrySet()) { + if (first) { + first = false; + } else { + errorMessage.append(", "); + } + errorMessage.append("["); + errorMessage.append(e.getKey()); + errorMessage.append("] in "); + errorMessage.append(e.getValue()); + } + return new InvalidMappedField(name, errorMessage.toString()); + } + + private EsField conflictingMetricTypes(String name, String fullName, FieldCapabilitiesResponse fieldCapsResponse) { + TreeSet indices = new TreeSet<>(); + for (FieldCapabilitiesIndexResponse ir : fieldCapsResponse.getIndexResponses()) { + IndexFieldCapabilities fc = ir.get().get(fullName); + if (fc != null) { + indices.add(ir.getIndexName()); + } + } + return new InvalidMappedField(name, "mapped as different metric types in indices: " + indices); + } + + private static FieldCapabilitiesRequest createFieldCapsRequest(String index, Set fieldNames) { + FieldCapabilitiesRequest req = new FieldCapabilitiesRequest().indices(Strings.commaDelimitedListToStringArray(index)); + req.fields(fieldNames.toArray(String[]::new)); + req.includeUnmapped(true); + // lenient because we throw our own errors looking at the response e.g. if something was not resolved + // also because this way security doesn't throw authorization exceptions but rather honors ignore_unavailable + req.indicesOptions(IndexResolver.FIELD_CAPS_INDICES_OPTIONS); + req.setMergeResults(false); + return req; + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java index fa573c7731c13..683460243ecbd 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.fieldcaps.FieldCapabilities; import org.elasticsearch.common.Strings; import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.core.Assertions; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; @@ -51,11 +52,14 @@ import org.elasticsearch.xpack.ql.plan.logical.Aggregate; import org.elasticsearch.xpack.ql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.ql.plan.logical.Project; +import org.elasticsearch.xpack.ql.type.DataTypes; +import org.elasticsearch.xpack.ql.type.EsField; import org.elasticsearch.xpack.ql.type.InvalidMappedField; import org.elasticsearch.xpack.ql.util.Holder; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Set; @@ -75,6 +79,7 @@ public class EsqlSession { private final String sessionId; private final EsqlConfiguration configuration; private final IndexResolver indexResolver; + private final EsqlIndexResolver esqlIndexResolver; private final EnrichPolicyResolver enrichPolicyResolver; private final PreAnalyzer preAnalyzer; @@ -89,6 +94,7 @@ public EsqlSession( String sessionId, EsqlConfiguration configuration, IndexResolver indexResolver, + EsqlIndexResolver esqlIndexResolver, EnrichPolicyResolver enrichPolicyResolver, PreAnalyzer preAnalyzer, FunctionRegistry functionRegistry, @@ -99,6 +105,7 @@ public EsqlSession( this.sessionId = sessionId; this.configuration = configuration; this.indexResolver = indexResolver; + this.esqlIndexResolver = esqlIndexResolver; this.enrichPolicyResolver = enrichPolicyResolver; this.preAnalyzer = preAnalyzer; this.verifier = verifier; @@ -201,18 +208,11 @@ private void preAnalyzeIndices(LogicalPlan parsed, ActionListener void preAnalyzeIndices(LogicalPlan parsed, ActionListener fieldNames, + ActionListener listener + ) { + indexResolver.resolveAsMergedMapping(indexWildcard, fieldNames, false, Map.of(), new ActionListener<>() { + @Override + public void onResponse(IndexResolution fromQl) { + esqlIndexResolver.resolveAsMergedMapping(indexWildcard, fieldNames, new ActionListener<>() { + @Override + public void onResponse(IndexResolution fromEsql) { + if (fromQl.isValid() == false) { + if (fromEsql.isValid()) { + throw new IllegalArgumentException( + "ql and esql didn't make the same resolution: validity differs " + fromQl + " != " + fromEsql + ); + } + } else { + assertSameMappings("", fromQl.get().mapping(), fromEsql.get().mapping()); + if (fromQl.get().concreteIndices().equals(fromEsql.get().concreteIndices()) == false) { + throw new IllegalArgumentException( + "ql and esql didn't make the same resolution: concrete indices differ " + + fromQl.get().concreteIndices() + + " != " + + fromEsql.get().concreteIndices() + ); + } + } + listener.onResponse(fromEsql); + } + + private void assertSameMappings(String prefix, Map fromQl, Map fromEsql) { + List qlFields = new ArrayList<>(); + qlFields.addAll(fromQl.keySet()); + Collections.sort(qlFields); + + List esqlFields = new ArrayList<>(); + esqlFields.addAll(fromEsql.keySet()); + Collections.sort(esqlFields); + if (qlFields.equals(esqlFields) == false) { + throw new IllegalArgumentException( + prefix + ": ql and esql didn't make the same resolution: fields differ \n" + qlFields + " !=\n" + esqlFields + ); + } + + for (int f = 0; f < qlFields.size(); f++) { + String name = qlFields.get(f); + EsField qlField = fromQl.get(name); + EsField esqlField = fromEsql.get(name); + + if (qlField.getProperties().isEmpty() == false || esqlField.getProperties().isEmpty() == false) { + assertSameMappings( + prefix.equals("") ? name : prefix + "." + name, + qlField.getProperties(), + esqlField.getProperties() + ); + } + + /* + * Check that the field itself is the same, skipping isAlias because + * we don't actually use it in ESQL and the EsqlIndexResolver doesn't + * produce exactly the same result. + */ + if (qlField.getDataType().equals(DataTypes.UNSUPPORTED) == false + && qlField.getName().equals(esqlField.getName()) == false + // QL uses full paths for unsupported fields. ESQL does not. This particular difference is fine. + ) { + throw new IllegalArgumentException( + prefix + + "." + + name + + ": ql and esql didn't make the same resolution: names differ [" + + qlField.getName() + + "] != [" + + esqlField.getName() + + "]" + ); + } + if (qlField.getDataType() != esqlField.getDataType()) { + throw new IllegalArgumentException( + prefix + + "." + + name + + ": ql and esql didn't make the same resolution: types differ [" + + qlField.getDataType() + + "] != [" + + esqlField.getDataType() + + "]" + ); + } + if (qlField.isAggregatable() != esqlField.isAggregatable()) { + throw new IllegalArgumentException( + prefix + + "." + + name + + ": ql and esql didn't make the same resolution: aggregability differ [" + + qlField.isAggregatable() + + "] != [" + + esqlField.isAggregatable() + + "]" + ); + } + } + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }, + EsqlSession::specificValidity, + IndexResolver.PRESERVE_PROPERTIES, + // TODO no matter what metadata fields are asked in a query, the "allowedMetadataFields" is always _index, does it make + // sense to reflect the actual list of metadata fields instead? + IndexResolver.INDEX_METADATA_FIELD + ); + } + static Set fieldNames(LogicalPlan parsed, Set enrichPolicyMatchFields) { if (false == parsed.anyMatch(plan -> plan instanceof Aggregate || plan instanceof Project)) { // no explicit columns selection, for example "from employees" diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index 20714cc5633b6..dd937c11c9642 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -96,6 +96,7 @@ import java.util.Map; import java.util.Set; import java.util.TreeMap; +import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; import static org.elasticsearch.test.ListMatcher.matchesList; @@ -107,7 +108,6 @@ import static org.elasticsearch.xpack.esql.CsvTestsDataLoader.CSV_DATASET_MAP; import static org.elasticsearch.xpack.esql.EsqlTestUtils.TEST_VERIFIER; import static org.elasticsearch.xpack.esql.EsqlTestUtils.loadMapping; -import static org.elasticsearch.xpack.esql.plugin.EsqlPlugin.ESQL_THREAD_POOL_NAME; import static org.elasticsearch.xpack.ql.CsvSpecReader.specParser; import static org.elasticsearch.xpack.ql.TestUtils.classpathResources; import static org.hamcrest.Matchers.equalTo; @@ -161,6 +161,7 @@ public class CsvTests extends ESTestCase { private final Mapper mapper = new Mapper(functionRegistry); private final PhysicalPlanOptimizer physicalPlanOptimizer = new TestPhysicalPlanOptimizer(new PhysicalOptimizerContext(configuration)); private ThreadPool threadPool; + private Executor executor; @ParametersFactory(argumentFormatting = "%2$s.%3$s") public static List readScriptSpec() throws Exception { @@ -174,18 +175,17 @@ public static List readScriptSpec() throws Exception { @Before public void setUp() throws Exception { super.setUp(); - int numThreads = randomBoolean() ? 1 : between(2, 16); - threadPool = new TestThreadPool( - "CsvTests", - new FixedExecutorBuilder( - Settings.EMPTY, - ESQL_THREAD_POOL_NAME, - numThreads, - 1024, - "esql", - EsExecutors.TaskTrackingConfig.DEFAULT - ) - ); + if (randomBoolean()) { + int numThreads = randomBoolean() ? 1 : between(2, 16); + threadPool = new TestThreadPool( + "CsvTests", + new FixedExecutorBuilder(Settings.EMPTY, "esql_test", numThreads, 1024, "esql", EsExecutors.TaskTrackingConfig.DEFAULT) + ); + executor = threadPool.executor("esql_test"); + } else { + threadPool = new TestThreadPool(getTestName()); + executor = threadPool.executor(ThreadPool.Names.SEARCH); + } HeaderWarning.setThreadContext(threadPool.getThreadContext()); } @@ -343,7 +343,7 @@ private ActualResults executePlan(BigArrays bigArrays) throws Exception { bigArrays, ByteSizeValue.ofBytes(randomLongBetween(1, BlockFactory.DEFAULT_MAX_BLOCK_PRIMITIVE_ARRAY_SIZE.getBytes() * 2)) ); - ExchangeSourceHandler exchangeSource = new ExchangeSourceHandler(between(1, 64), threadPool.executor(ESQL_THREAD_POOL_NAME)); + ExchangeSourceHandler exchangeSource = new ExchangeSourceHandler(between(1, 64), executor); ExchangeSinkHandler exchangeSink = new ExchangeSinkHandler(blockFactory, between(1, 64), threadPool::relativeTimeInMillis); LocalExecutionPlanner executionPlanner = new LocalExecutionPlanner( sessionId, @@ -406,13 +406,7 @@ private ActualResults executePlan(BigArrays bigArrays) throws Exception { DriverRunner runner = new DriverRunner(threadPool.getThreadContext()) { @Override protected void start(Driver driver, ActionListener driverListener) { - Driver.start( - threadPool.getThreadContext(), - threadPool.executor(ESQL_THREAD_POOL_NAME), - driver, - between(1, 1000), - driverListener - ); + Driver.start(threadPool.getThreadContext(), executor, driver, between(1, 1000), driverListener); } }; PlainActionFuture future = new PlainActionFuture<>(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseProfileTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseProfileTests.java index af8f6dcd550c4..782e1fb4333d8 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseProfileTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseProfileTests.java @@ -47,12 +47,19 @@ private List randomDriverProfiles() { } private DriverProfile randomDriverProfile() { - return new DriverProfile(randomList(10, this::randomOperatorStatus)); + return new DriverProfile( + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomList(10, this::randomOperatorStatus) + ); } private DriverStatus.OperatorStatus randomOperatorStatus() { String name = randomAlphaOfLength(4); - Operator.Status status = randomBoolean() ? null : new AbstractPageMappingOperator.Status(between(0, Integer.MAX_VALUE)); + Operator.Status status = randomBoolean() + ? null + : new AbstractPageMappingOperator.Status(randomNonNegativeLong(), between(0, Integer.MAX_VALUE)); return new DriverStatus.OperatorStatus(name, status); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java index 3b64870a15839..839e9c323bf74 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java @@ -458,15 +458,54 @@ public void testProfileXContent() { List.of(new ColumnInfo("foo", "integer")), List.of(new Page(blockFactory.newIntArrayVector(new int[] { 40, 80 }, 2).asBlock())), new EsqlQueryResponse.Profile( - List.of(new DriverProfile(List.of(new DriverStatus.OperatorStatus("asdf", new AbstractPageMappingOperator.Status(10))))) + List.of( + new DriverProfile( + 20021, + 20000, + 12, + List.of(new DriverStatus.OperatorStatus("asdf", new AbstractPageMappingOperator.Status(10021, 10))) + ) + ) ), false, false ); ) { - assertThat(Strings.toString(response), equalTo(""" - {"columns":[{"name":"foo","type":"integer"}],"values":[[40],[80]],"profile":{"drivers":[""" + """ - {"operators":[{"operator":"asdf","status":{"pages_processed":10}}]}]}}""")); + assertThat(Strings.toString(response, true, false), equalTo(""" + { + "columns" : [ + { + "name" : "foo", + "type" : "integer" + } + ], + "values" : [ + [ + 40 + ], + [ + 80 + ] + ], + "profile" : { + "drivers" : [ + { + "took_nanos" : 20021, + "cpu_nanos" : 20000, + "iterations" : 12, + "operators" : [ + { + "operator" : "asdf", + "status" : { + "process_nanos" : 10021, + "pages_processed" : 10 + } + } + ] + } + ] + } + }""")); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index b9c0e9b34b552..0d406d19d3d16 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.analysis; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesIndexResponse; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.Streams; @@ -28,7 +29,7 @@ import org.elasticsearch.xpack.esql.plan.logical.Row; import org.elasticsearch.xpack.esql.plan.logical.local.EsqlProject; import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; -import org.elasticsearch.xpack.esql.session.EsqlSession; +import org.elasticsearch.xpack.esql.session.EsqlIndexResolver; import org.elasticsearch.xpack.esql.type.EsqlDataTypeRegistry; import org.elasticsearch.xpack.ql.expression.Alias; import org.elasticsearch.xpack.ql.expression.Attribute; @@ -40,7 +41,6 @@ import org.elasticsearch.xpack.ql.expression.UnresolvedAttribute; import org.elasticsearch.xpack.ql.index.EsIndex; import org.elasticsearch.xpack.ql.index.IndexResolution; -import org.elasticsearch.xpack.ql.index.IndexResolver; import org.elasticsearch.xpack.ql.plan.TableIdentifier; import org.elasticsearch.xpack.ql.plan.logical.Aggregate; import org.elasticsearch.xpack.ql.plan.logical.EsRelation; @@ -1767,14 +1767,9 @@ protected List filteredWarnings() { } private static LogicalPlan analyzeWithEmptyFieldCapsResponse(String query) throws IOException { - IndexResolution resolution = IndexResolver.mergedMappings( - EsqlDataTypeRegistry.INSTANCE, - "test*", - readFieldCapsResponse("empty_field_caps_response.json"), - EsqlSession::specificValidity, - IndexResolver.PRESERVE_PROPERTIES, - IndexResolver.INDEX_METADATA_FIELD - ); + List idxResponses = List.of(new FieldCapabilitiesIndexResponse("idx", "idx", Map.of(), true)); + FieldCapabilitiesResponse caps = new FieldCapabilitiesResponse(idxResponses, List.of()); + IndexResolution resolution = new EsqlIndexResolver(null, EsqlDataTypeRegistry.INSTANCE).mergedMappings("test*", caps); var analyzer = analyzer(resolution, TEST_VERIFIER, configuration(query)); return analyze(query, analyzer); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/EnrichOperatorStatusTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/EnrichOperatorStatusTests.java new file mode 100644 index 0000000000000..4fc67f85cc062 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/EnrichOperatorStatusTests.java @@ -0,0 +1,80 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.enrich; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; + +public class EnrichOperatorStatusTests extends AbstractWireSerializingTestCase { + @Override + protected Writeable.Reader instanceReader() { + return EnrichLookupOperator.Status::new; + } + + @Override + protected EnrichLookupOperator.Status createTestInstance() { + return new EnrichLookupOperator.Status( + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomLongBetween(1, TimeValue.timeValueHours(1).millis()) + ); + } + + @Override + protected EnrichLookupOperator.Status mutateInstance(EnrichLookupOperator.Status in) throws IOException { + int field = randomIntBetween(0, 3); + return switch (field) { + case 0 -> new EnrichLookupOperator.Status( + randomValueOtherThan(in.receivedPages(), ESTestCase::randomNonNegativeLong), + in.completedPages(), + in.totalTerms, + in.totalTimeInMillis() + ); + case 1 -> new EnrichLookupOperator.Status( + in.receivedPages(), + randomValueOtherThan(in.completedPages(), ESTestCase::randomNonNegativeLong), + in.totalTerms, + in.totalTimeInMillis() + ); + case 2 -> new EnrichLookupOperator.Status( + in.receivedPages(), + in.completedPages(), + randomValueOtherThan(in.totalTerms, ESTestCase::randomNonNegativeLong), + in.totalTimeInMillis() + ); + case 3 -> new EnrichLookupOperator.Status( + in.receivedPages(), + in.completedPages(), + in.totalTerms, + randomValueOtherThan(in.totalTimeInMillis(), ESTestCase::randomNonNegativeLong) + ); + default -> throw new AssertionError("unknown "); + }; + } + + public void testToXContent() { + var status = new EnrichLookupOperator.Status(100, 50, TimeValue.timeValueSeconds(10).millis(), 120); + String json = Strings.toString(status, true, true); + assertThat(json, equalTo(""" + { + "received_pages" : 100, + "completed_pages" : 50, + "total_time_in_millis" : 10000, + "total_time" : "10s", + "total_terms" : 120 + }""")); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index dded86fdd8aee..9daf043714efc 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -446,7 +446,7 @@ private void testEvaluateBlock(BlockFactory inputBlockFactory, DriverContext con // TODO cranky time - public final void testSimpleWithNulls() { // TODO replace this with nulls inserted into the test case like anyNullIsNull + public void testSimpleWithNulls() { // TODO replace this with nulls inserted into the test case like anyNullIsNull assumeTrue("nothing to do if a type error", testCase.getExpectedTypeError() == null); assumeTrue("All test data types must be representable in order to build fields", testCase.allTypesAreRepresentable()); List simpleData = testCase.getDataValues(); @@ -967,7 +967,8 @@ protected static String typeErrorMessage(boolean includeOrdinal, List argNames) throws IOException { @@ -1116,12 +1117,18 @@ private static void renderTypesTable(List argNames) throws IOException { [%header.monospaced.styled,format=dsv,separator=|] |=== """ + header + "\n" + table.stream().collect(Collectors.joining("\n")) + "\n|===\n"; - LogManager.getLogger(getTestClass()).info("Writing function types:\n{}", rendered); + LogManager.getLogger(getTestClass()).info("Writing function types for [{}]:\n{}", functionName(), rendered); writeToTempDir("types", rendered, "asciidoc"); } private static String functionName() { - return StringUtils.camelCaseToUnderscore(getTestClass().getSimpleName().replace("Tests", "")).toLowerCase(Locale.ROOT); + Class testClass = getTestClass(); + if (testClass.isAnnotationPresent(FunctionName.class)) { + FunctionName functionNameAnnotation = testClass.getAnnotation(FunctionName.class); + return functionNameAnnotation.value(); + } else { + return StringUtils.camelCaseToUnderscore(testClass.getSimpleName().replace("Tests", "")).toLowerCase(Locale.ROOT); + } } private static FunctionDefinition definition(String name) { @@ -1178,6 +1185,7 @@ private static void writeToTempDir(String subdir, String str, String extension) Files.createDirectories(dir); Path file = dir.resolve(functionName() + "." + extension); Files.writeString(file, str); + LogManager.getLogger(getTestClass()).info("Wrote function types for [{}] to file: {}", functionName(), file); } private final List breakers = Collections.synchronizedList(new ArrayList<>()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/FunctionName.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/FunctionName.java new file mode 100644 index 0000000000000..b4a5d3bdc2b92 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/FunctionName.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Tests that extend AbstractFunctionTestCase can use this annotation to specify the name of the function + * to use when generating documentation files while running tests. + * If this is not used, the name will be deduced from the test class name, by removing the "Test" suffix, and converting + * the class name to snake case. This annotation can be used to override that behavior, for cases where the deduced name + * is not correct. For example, in Elasticsearch the class name for `GeoPoint` capitalizes the `P` in `Point`, but the + * function name is `to_geopoint`, not `to_geo_point`. In some cases, even when compatible class names are used, + * like `StX` for the function `st_x`, the annotation is needed because the name deduction does not allow only a single + * character after the underscore. + */ +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.TYPE) +public @interface FunctionName { + /** The function name to use in generating documentation files while running tests */ + String value(); +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPointTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPointTests.java index 88910320c962e..4eadf88992582 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPointTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPointTests.java @@ -13,6 +13,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.geo.ShapeTestUtils; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.FunctionName; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Expression; @@ -26,6 +27,7 @@ import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; +@FunctionName("to_cartesianpoint") public class ToCartesianPointTests extends AbstractFunctionTestCase { public ToCartesianPointTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShapeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShapeTests.java index 117968de5148f..ad92b6578d71b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShapeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShapeTests.java @@ -13,6 +13,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.geo.GeometryTestUtils; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.FunctionName; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Expression; @@ -26,6 +27,7 @@ import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; +@FunctionName("to_cartesianshape") public class ToCartesianShapeTests extends AbstractFunctionTestCase { public ToCartesianShapeTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPointTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPointTests.java index 4a5534e1d5d1a..342325a63d96e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPointTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPointTests.java @@ -13,6 +13,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.geo.GeometryTestUtils; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.FunctionName; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Expression; @@ -26,6 +27,7 @@ import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; +@FunctionName("to_geopoint") public class ToGeoPointTests extends AbstractFunctionTestCase { public ToGeoPointTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShapeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShapeTests.java index 15db74d71d21f..290d0a08db725 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShapeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShapeTests.java @@ -13,6 +13,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.geo.GeometryTestUtils; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.FunctionName; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Expression; @@ -26,6 +27,7 @@ import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; +@FunctionName("to_geoshape") public class ToGeoShapeTests extends AbstractFunctionTestCase { public ToGeoShapeTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceTests.java new file mode 100644 index 0000000000000..4d1e58893739a --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceTests.java @@ -0,0 +1,346 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.geo.GeometryTestUtils; +import org.elasticsearch.geo.ShapeTestUtils; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; +import org.elasticsearch.xpack.ql.type.DataTypes; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + +public class MvSliceTests extends AbstractScalarFunctionTestCase { + public MvSliceTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + List suppliers = new ArrayList<>(); + booleans(suppliers); + ints(suppliers); + longs(suppliers); + doubles(suppliers); + bytesRefs(suppliers); + return parameterSuppliersFromTypedData(suppliers); + } + + @Override + protected DataType expectedType(List argTypes) { + return argTypes.get(0); + } + + @Override + protected List argSpec() { + return List.of(required(representableTypes()), required(integers()), optional(integers())); + } + + @Override + protected Expression build(Source source, List args) { + return new MvSlice(source, args.get(0), args.get(1), args.size() > 2 ? args.get(2) : null); + } + + private static void booleans(List suppliers) { + // Positive + suppliers.add(new TestCaseSupplier(List.of(DataTypes.BOOLEAN, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> randomBoolean()); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.BOOLEAN, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceBooleanEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataTypes.BOOLEAN, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + // Positive Start IndexOutofBound + suppliers.add(new TestCaseSupplier(List.of(DataTypes.BOOLEAN, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> randomBoolean()); + int length = field.size(); + int start = randomIntBetween(length, length + 1); + int end = randomIntBetween(start, length + 10); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.BOOLEAN, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceBooleanEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataTypes.BOOLEAN, + nullValue() + ); + })); + // Positive End IndexOutofBound + suppliers.add(new TestCaseSupplier(List.of(DataTypes.BOOLEAN, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> randomBoolean()); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(length, length + 10); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.BOOLEAN, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceBooleanEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataTypes.BOOLEAN, + equalTo(start == length - 1 ? field.get(start) : field.subList(start, length)) + ); + })); + // Negative + suppliers.add(new TestCaseSupplier(List.of(DataTypes.BOOLEAN, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> randomBoolean()); + int length = field.size(); + int start = randomIntBetween(0 - length, -1); + int end = randomIntBetween(start, -1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.BOOLEAN, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceBooleanEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataTypes.BOOLEAN, + equalTo(start == end ? field.get(start + length) : field.subList(start + length, end + 1 + length)) + ); + })); + } + + private static void ints(List suppliers) { + suppliers.add(new TestCaseSupplier(List.of(DataTypes.INTEGER, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> randomInt()); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.INTEGER, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceIntEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataTypes.INTEGER, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + } + + private static void longs(List suppliers) { + suppliers.add(new TestCaseSupplier(List.of(DataTypes.LONG, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> randomLong()); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.LONG, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceLongEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataTypes.LONG, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + + suppliers.add(new TestCaseSupplier(List.of(DataTypes.DATETIME, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> randomLong()); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.DATETIME, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceLongEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataTypes.DATETIME, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + } + + private static void doubles(List suppliers) { + suppliers.add(new TestCaseSupplier(List.of(DataTypes.DOUBLE, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> randomDouble()); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.DOUBLE, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceDoubleEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataTypes.DOUBLE, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + } + + private static void bytesRefs(List suppliers) { + suppliers.add(new TestCaseSupplier(List.of(DataTypes.KEYWORD, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> randomLiteral(DataTypes.KEYWORD).value()); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.KEYWORD, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceBytesRefEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataTypes.KEYWORD, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + + suppliers.add(new TestCaseSupplier(List.of(DataTypes.TEXT, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> randomLiteral(DataTypes.TEXT).value()); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.TEXT, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceBytesRefEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataTypes.TEXT, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + + suppliers.add(new TestCaseSupplier(List.of(DataTypes.IP, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> randomLiteral(DataTypes.IP).value()); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.IP, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceBytesRefEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataTypes.IP, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + + suppliers.add(new TestCaseSupplier(List.of(DataTypes.VERSION, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> randomLiteral(DataTypes.VERSION).value()); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.VERSION, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceBytesRefEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataTypes.VERSION, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + + suppliers.add(new TestCaseSupplier(List.of(EsqlDataTypes.GEO_POINT, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> new BytesRef(GEO.asWkt(GeometryTestUtils.randomPoint()))); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, EsqlDataTypes.GEO_POINT, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceBytesRefEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + EsqlDataTypes.GEO_POINT, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + + suppliers.add(new TestCaseSupplier(List.of(EsqlDataTypes.CARTESIAN_POINT, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> new BytesRef(CARTESIAN.asWkt(ShapeTestUtils.randomPoint()))); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, EsqlDataTypes.CARTESIAN_POINT, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceBytesRefEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + EsqlDataTypes.CARTESIAN_POINT, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + + suppliers.add(new TestCaseSupplier(List.of(EsqlDataTypes.GEO_SHAPE, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> new BytesRef(GEO.asWkt(GeometryTestUtils.randomGeometry(randomBoolean())))); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, EsqlDataTypes.GEO_SHAPE, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceBytesRefEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + EsqlDataTypes.GEO_SHAPE, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + + suppliers.add(new TestCaseSupplier(List.of(EsqlDataTypes.CARTESIAN_SHAPE, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> new BytesRef(CARTESIAN.asWkt(ShapeTestUtils.randomGeometry(randomBoolean())))); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, EsqlDataTypes.CARTESIAN_SHAPE, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceBytesRefEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + EsqlDataTypes.CARTESIAN_SHAPE, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZipTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZipTests.java new file mode 100644 index 0000000000000..c4162f6ddc367 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZipTests.java @@ -0,0 +1,120 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefBuilder; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; +import org.elasticsearch.xpack.ql.type.DataTypes; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Supplier; + +import static java.lang.Math.max; +import static org.hamcrest.Matchers.equalTo; + +public class MvZipTests extends AbstractScalarFunctionTestCase { + public MvZipTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + List suppliers = new ArrayList<>(); + suppliers.add(new TestCaseSupplier(List.of(DataTypes.KEYWORD, DataTypes.KEYWORD, DataTypes.KEYWORD), () -> { + List left = randomList(1, 3, () -> randomLiteral(DataTypes.KEYWORD).value()); + List right = randomList(1, 3, () -> randomLiteral(DataTypes.KEYWORD).value()); + String delim = randomAlphaOfLengthBetween(1, 1); + List expected = calculateExpected(left, right, delim); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(left, DataTypes.KEYWORD, "mvLeft"), + new TestCaseSupplier.TypedData(right, DataTypes.KEYWORD, "mvRight"), + new TestCaseSupplier.TypedData(delim, DataTypes.KEYWORD, "delim") + ), + "MvZipEvaluator[leftField=Attribute[channel=0], rightField=Attribute[channel=1], delim=Attribute[channel=2]]", + DataTypes.KEYWORD, + equalTo(expected.size() == 1 ? expected.iterator().next() : expected) + ); + })); + + suppliers.add(new TestCaseSupplier(List.of(DataTypes.TEXT, DataTypes.TEXT, DataTypes.TEXT), () -> { + List left = randomList(1, 10, () -> randomLiteral(DataTypes.TEXT).value()); + List right = randomList(1, 10, () -> randomLiteral(DataTypes.TEXT).value()); + String delim = randomAlphaOfLengthBetween(1, 1); + List expected = calculateExpected(left, right, delim); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(left, DataTypes.TEXT, "mvLeft"), + new TestCaseSupplier.TypedData(right, DataTypes.TEXT, "mvRight"), + new TestCaseSupplier.TypedData(delim, DataTypes.TEXT, "delim") + ), + "MvZipEvaluator[leftField=Attribute[channel=0], rightField=Attribute[channel=1], delim=Attribute[channel=2]]", + DataTypes.KEYWORD, + equalTo(expected.size() == 1 ? expected.iterator().next() : expected) + ); + })); + + return parameterSuppliersFromTypedData(suppliers); + } + + @Override + protected DataType expectedType(List argTypes) { + return DataTypes.KEYWORD; + } + + @Override + protected List argSpec() { + return List.of(required(strings()), required(strings()), optional(strings())); + } + + @Override + protected Expression build(Source source, List args) { + return new MvZip(source, args.get(0), args.get(1), args.size() > 2 ? args.get(2) : null); + } + + private static List calculateExpected(List left, List right, String delim) { + List expected = new ArrayList<>(max(left.size(), right.size())); + int i = 0, j = 0; + while (i < left.size() && j < right.size()) { + BytesRefBuilder work = new BytesRefBuilder(); + work.append((BytesRef) left.get(i)); + work.append(new BytesRef(delim)); + work.append((BytesRef) right.get(j)); + expected.add(work.get()); + i++; + j++; + } + while (i < left.size()) { + BytesRefBuilder work = new BytesRefBuilder(); + work.append((BytesRef) left.get(i)); + expected.add(work.get()); + i++; + } + while (j < right.size()) { + BytesRefBuilder work = new BytesRefBuilder(); + work.append((BytesRef) right.get(j)); + expected.add(work.get()); + j++; + } + return expected; + } + + @Override + public void testSimpleWithNulls() { + assumeFalse("mv_zip returns null only if both left and right inputs are nulls", false); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXTests.java new file mode 100644 index 0000000000000..3227faa4417fa --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXTests.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.FunctionName; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.tree.Source; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.ql.type.DataTypes.DOUBLE; +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.UNSPECIFIED; + +@FunctionName("st_x") +public class StXTests extends AbstractFunctionTestCase { + public StXTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + String expectedEvaluator = "StXFromWKBEvaluator[field=Attribute[channel=0]]"; + final List suppliers = new ArrayList<>(); + TestCaseSupplier.forUnaryGeoPoint(suppliers, expectedEvaluator, DOUBLE, StXTests::valueOf, List.of()); + TestCaseSupplier.forUnaryCartesianPoint(suppliers, expectedEvaluator, DOUBLE, StXTests::valueOf, List.of()); + return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + } + + private static double valueOf(BytesRef wkb) { + return UNSPECIFIED.wkbAsPoint(wkb).getX(); + } + + @Override + protected Expression build(Source source, List args) { + return new StX(source, args.get(0)); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYTests.java new file mode 100644 index 0000000000000..9416b7ba8cad4 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYTests.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.FunctionName; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.tree.Source; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.ql.type.DataTypes.DOUBLE; +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.UNSPECIFIED; + +@FunctionName("st_y") +public class StYTests extends AbstractFunctionTestCase { + public StYTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + String expectedEvaluator = "StYFromWKBEvaluator[field=Attribute[channel=0]]"; + final List suppliers = new ArrayList<>(); + TestCaseSupplier.forUnaryGeoPoint(suppliers, expectedEvaluator, DOUBLE, StYTests::valueOf, List.of()); + TestCaseSupplier.forUnaryCartesianPoint(suppliers, expectedEvaluator, DOUBLE, StYTests::valueOf, List.of()); + return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + } + + private static double valueOf(BytesRef wkb) { + return UNSPECIFIED.wkbAsPoint(wkb).getY(); + } + + @Override + protected Expression build(Source source, List args) { + return new StY(source, args.get(0)); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/FoldNull.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/FoldNull.java index 40b03be668606..5fa3dae744251 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/FoldNull.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/FoldNull.java @@ -8,9 +8,8 @@ package org.elasticsearch.xpack.esql.optimizer; import org.elasticsearch.xpack.ql.expression.Expression; -import org.elasticsearch.xpack.ql.optimizer.OptimizerRules; -public class FoldNull extends OptimizerRules.FoldNull { +public class FoldNull extends LogicalPlanOptimizer.FoldNull { @Override public Expression rule(Expression e) { return super.rule(e); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java index 1b2210dbd5f4e..6370b0198ae88 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java @@ -14,18 +14,22 @@ import org.elasticsearch.xpack.esql.analysis.AnalyzerContext; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; +import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Add; import org.elasticsearch.xpack.esql.parser.EsqlParser; import org.elasticsearch.xpack.esql.plan.logical.Eval; import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; import org.elasticsearch.xpack.esql.plan.logical.local.LocalSupplier; import org.elasticsearch.xpack.esql.stats.SearchStats; import org.elasticsearch.xpack.ql.expression.Alias; +import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.Expressions; import org.elasticsearch.xpack.ql.expression.Literal; import org.elasticsearch.xpack.ql.expression.ReferenceAttribute; +import org.elasticsearch.xpack.ql.expression.predicate.logical.And; import org.elasticsearch.xpack.ql.expression.predicate.nulls.IsNotNull; import org.elasticsearch.xpack.ql.index.EsIndex; import org.elasticsearch.xpack.ql.index.IndexResolution; +import org.elasticsearch.xpack.ql.optimizer.OptimizerRulesTests; import org.elasticsearch.xpack.ql.plan.logical.EsRelation; import org.elasticsearch.xpack.ql.plan.logical.Filter; import org.elasticsearch.xpack.ql.plan.logical.Limit; @@ -50,6 +54,10 @@ import static org.elasticsearch.xpack.esql.EsqlTestUtils.statsForExistingField; import static org.elasticsearch.xpack.esql.EsqlTestUtils.statsForMissingField; import static org.elasticsearch.xpack.esql.EsqlTestUtils.withDefaultLimitWarning; +import static org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizerTests.getFieldAttribute; +import static org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizerTests.greaterThanOf; +import static org.elasticsearch.xpack.ql.TestUtils.relation; +import static org.elasticsearch.xpack.ql.tree.Source.EMPTY; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; @@ -64,6 +72,8 @@ public class LocalLogicalPlanOptimizerTests extends ESTestCase { private static Map mapping; private static final Literal ONE = L(1); + private static final Literal TWO = L(2); + private static final Literal THREE = L(3); @BeforeClass public static void init() { @@ -348,6 +358,73 @@ public void testSparseDocument() throws Exception { assertThat(Alias.unwrap(field).fold(), Matchers.nullValue()); } + // InferIsNotNull + + public void testIsNotNullOnIsNullField() { + EsRelation relation = relation(); + var fieldA = getFieldAttribute("a"); + Expression inn = isNotNull(fieldA); + Filter f = new Filter(EMPTY, relation, inn); + + assertEquals(f, new LocalLogicalPlanOptimizer.InferIsNotNull().apply(f)); + } + + public void testIsNotNullOnOperatorWithOneField() { + EsRelation relation = relation(); + var fieldA = getFieldAttribute("a"); + Expression inn = isNotNull(new Add(EMPTY, fieldA, ONE)); + Filter f = new Filter(EMPTY, relation, inn); + Filter expected = new Filter(EMPTY, relation, new And(EMPTY, isNotNull(fieldA), inn)); + + assertEquals(expected, new LocalLogicalPlanOptimizer.InferIsNotNull().apply(f)); + } + + public void testIsNotNullOnOperatorWithTwoFields() { + EsRelation relation = relation(); + var fieldA = getFieldAttribute("a"); + var fieldB = getFieldAttribute("b"); + Expression inn = isNotNull(new Add(EMPTY, fieldA, fieldB)); + Filter f = new Filter(EMPTY, relation, inn); + Filter expected = new Filter(EMPTY, relation, new And(EMPTY, new And(EMPTY, isNotNull(fieldA), isNotNull(fieldB)), inn)); + + assertEquals(expected, new LocalLogicalPlanOptimizer.InferIsNotNull().apply(f)); + } + + public void testIsNotNullOnFunctionWithOneField() { + EsRelation relation = relation(); + var fieldA = getFieldAttribute("a"); + var pattern = L("abc"); + Expression inn = isNotNull( + new And( + EMPTY, + new OptimizerRulesTests.TestStartsWith(EMPTY, fieldA, pattern, false), + greaterThanOf(new Add(EMPTY, ONE, TWO), THREE) + ) + ); + + Filter f = new Filter(EMPTY, relation, inn); + Filter expected = new Filter(EMPTY, relation, new And(EMPTY, isNotNull(fieldA), inn)); + + assertEquals(expected, new LocalLogicalPlanOptimizer.InferIsNotNull().apply(f)); + } + + public void testIsNotNullOnFunctionWithTwoFields() { + EsRelation relation = relation(); + var fieldA = getFieldAttribute("a"); + var fieldB = getFieldAttribute("b"); + var pattern = L("abc"); + Expression inn = isNotNull(new OptimizerRulesTests.TestStartsWith(EMPTY, fieldA, fieldB, false)); + + Filter f = new Filter(EMPTY, relation, inn); + Filter expected = new Filter(EMPTY, relation, new And(EMPTY, new And(EMPTY, isNotNull(fieldA), isNotNull(fieldB)), inn)); + + assertEquals(expected, new LocalLogicalPlanOptimizer.InferIsNotNull().apply(f)); + } + + private IsNotNull isNotNull(Expression field) { + return new IsNotNull(EMPTY, field); + } + private LocalRelation asEmptyRelation(Object o) { var empty = as(o, LocalRelation.class); assertThat(empty.supplier(), is(LocalSupplier.EMPTY)); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java index 55320cfbeca32..cf387245a5968 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java @@ -57,6 +57,7 @@ import java.util.ArrayList; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; @@ -399,6 +400,7 @@ public void testIsNotNullPushdownFilter() { /** * Expects + * * LimitExec[1000[INTEGER]] * \_ExchangeExec[[],false] * \_ProjectExec[[_meta_field{f}#9, emp_no{f}#3, first_name{f}#4, gender{f}#5, job{f}#10, job.raw{f}#11, languages{f}#6, last_n @@ -420,6 +422,115 @@ public void testIsNullPushdownFilter() { assertThat(query.query().toString(), is(expected.toString())); } + /** + * Expects + * + * LimitExec[500[INTEGER]] + * \_AggregateExec[[],[COUNT(gender{f}#7) AS count(gender)],FINAL,null] + * \_ExchangeExec[[count{r}#15, seen{r}#16],true] + * \_AggregateExec[[],[COUNT(gender{f}#7) AS count(gender)],PARTIAL,8] + * \_FieldExtractExec[gender{f}#7] + * \_EsQueryExec[test], query[{"exists":{"field":"gender","boost":1.0}}][_doc{f}#17], limit[], sort[] estimatedRowSize[54] + */ + public void testIsNotNull_TextField_Pushdown() { + String textField = randomFrom("gender", "job"); + var plan = plan(String.format(Locale.ROOT, "from test | where %s is not null | stats count(%s)", textField, textField)); + + var limit = as(plan, LimitExec.class); + var finalAgg = as(limit.child(), AggregateExec.class); + var exchange = as(finalAgg.child(), ExchangeExec.class); + var partialAgg = as(exchange.child(), AggregateExec.class); + var fieldExtract = as(partialAgg.child(), FieldExtractExec.class); + var query = as(fieldExtract.child(), EsQueryExec.class); + var expected = QueryBuilders.existsQuery(textField); + assertThat(query.query().toString(), is(expected.toString())); + } + + /** + * Expects + * LimitExec[1000[INTEGER]] + * \_ExchangeExec[[],false] + * \_ProjectExec[[_meta_field{f}#9, emp_no{f}#3, first_name{f}#4, gender{f}#5, job{f}#10, job.raw{f}#11, languages{f}#6, last_n + * ame{f}#7, long_noidx{f}#12, salary{f}#8]] + * \_FieldExtractExec[_meta_field{f}#9, emp_no{f}#3, first_name{f}#4, gen..] + * \_EsQueryExec[test], query[{"bool":{"must_not":[{"exists":{"field":"gender","boost":1.0}}],"boost":1.0}}] + * [_doc{f}#13], limit[1000], sort[] estimatedRowSize[324] + */ + public void testIsNull_TextField_Pushdown() { + String textField = randomFrom("gender", "job"); + var plan = plan(String.format(Locale.ROOT, "from test | where %s is null", textField, textField)); + + var limit = as(plan, LimitExec.class); + var exchange = as(limit.child(), ExchangeExec.class); + var project = as(exchange.child(), ProjectExec.class); + var fieldExtract = as(project.child(), FieldExtractExec.class); + var query = as(fieldExtract.child(), EsQueryExec.class); + var expected = QueryBuilders.boolQuery().mustNot(QueryBuilders.existsQuery(textField)); + assertThat(query.query().toString(), is(expected.toString())); + } + + /** + * count(x) adds an implicit "exists(x)" filter in the pushed down query + * This test checks this "exists" doesn't clash with the "is null" pushdown on the text field. + * In this particular query, "exists(x)" and "x is null" cancel each other out. + * + * Expects + * + * LimitExec[1000[INTEGER]] + * \_AggregateExec[[],[COUNT(job{f}#19) AS c],FINAL,8] + * \_ExchangeExec[[count{r}#22, seen{r}#23],true] + * \_LocalSourceExec[[count{r}#22, seen{r}#23],[LongVectorBlock[vector=ConstantLongVector[positions=1, value=0]], BooleanVectorBlock + * [vector=ConstantBooleanVector[positions=1, value=true]]]] + */ + public void testIsNull_TextField_Pushdown_WithCount() { + var plan = plan(""" + from test + | eval filtered_job = job, count_job = job + | where filtered_job IS NULL + | stats c = COUNT(count_job) + """, IS_SV_STATS); + + var limit = as(plan, LimitExec.class); + var agg = as(limit.child(), AggregateExec.class); + var exg = as(agg.child(), ExchangeExec.class); + as(exg.child(), LocalSourceExec.class); + } + + /** + * count(x) adds an implicit "exists(x)" filter in the pushed down query. + * This test checks this "exists" doesn't clash with the "is null" pushdown on the text field. + * In this particular query, "exists(x)" and "x is not null" go hand in hand and the query is pushed down to Lucene. + * + * Expects + * + * LimitExec[1000[INTEGER]] + * \_AggregateExec[[],[COUNT(job{f}#19) AS c],FINAL,8] + * \_ExchangeExec[[count{r}#22, seen{r}#23],true] + * \_EsStatsQueryExec[test], stats[Stat[name=job, type=COUNT, query={ + * "exists" : { + * "field" : "job", + * "boost" : 1.0 + * } + * }]]], query[{"exists":{"field":"job","boost":1.0}}][count{r}#25, seen{r}#26], limit[], + */ + public void testIsNotNull_TextField_Pushdown_WithCount() { + var plan = plan(""" + from test + | eval filtered_job = job, count_job = job + | where filtered_job IS NOT NULL + | stats c = COUNT(count_job) + """, IS_SV_STATS); + + var limit = as(plan, LimitExec.class); + var agg = as(limit.child(), AggregateExec.class); + var exg = as(agg.child(), ExchangeExec.class); + var esStatsQuery = as(exg.child(), EsStatsQueryExec.class); + assertThat(esStatsQuery.limit(), is(nullValue())); + assertThat(Expressions.names(esStatsQuery.output()), contains("count", "seen")); + var stat = as(esStatsQuery.stats().get(0), Stat.class); + assertThat(stat.query(), is(QueryBuilders.existsQuery("job"))); + } + /** * Expects * LimitExec[1000[INTEGER]] diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index 9dfcffbf48e6e..adcb1f611a343 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.optimizer; +import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.compute.aggregation.QuantileStates; import org.elasticsearch.test.ESTestCase; @@ -21,27 +22,49 @@ import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThan; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqual; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThan; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanOrEqual; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEquals; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.regex.RLike; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.regex.WildcardLike; import org.elasticsearch.xpack.esql.expression.Order; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; +import org.elasticsearch.xpack.esql.expression.function.aggregate.Avg; import org.elasticsearch.xpack.esql.expression.function.aggregate.Count; +import org.elasticsearch.xpack.esql.expression.function.aggregate.CountDistinct; import org.elasticsearch.xpack.esql.expression.function.aggregate.Max; +import org.elasticsearch.xpack.esql.expression.function.aggregate.Median; +import org.elasticsearch.xpack.esql.expression.function.aggregate.MedianAbsoluteDeviation; import org.elasticsearch.xpack.esql.expression.function.aggregate.Min; import org.elasticsearch.xpack.esql.expression.function.aggregate.Percentile; +import org.elasticsearch.xpack.esql.expression.function.aggregate.SpatialCentroid; import org.elasticsearch.xpack.esql.expression.function.aggregate.Sum; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToString; +import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateExtract; import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateFormat; import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateParse; import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateTrunc; +import org.elasticsearch.xpack.esql.expression.function.scalar.ip.CIDRMatch; +import org.elasticsearch.xpack.esql.expression.function.scalar.math.Cos; import org.elasticsearch.xpack.esql.expression.function.scalar.math.Pow; import org.elasticsearch.xpack.esql.expression.function.scalar.math.Round; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.AbstractMultivalueFunction; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvAvg; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvCount; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvDedupe; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvFirst; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvLast; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMax; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMedian; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMin; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvSum; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Concat; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.LTrim; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Substring; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Add; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Div; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Mod; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Mul; +import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Neg; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Sub; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; import org.elasticsearch.xpack.esql.parser.EsqlParser; @@ -57,6 +80,7 @@ import org.elasticsearch.xpack.esql.plan.logical.local.LocalSupplier; import org.elasticsearch.xpack.ql.expression.Alias; import org.elasticsearch.xpack.ql.expression.Attribute; +import org.elasticsearch.xpack.ql.expression.AttributeSet; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.Expressions; import org.elasticsearch.xpack.ql.expression.FieldAttribute; @@ -65,9 +89,11 @@ import org.elasticsearch.xpack.ql.expression.Nullability; import org.elasticsearch.xpack.ql.expression.ReferenceAttribute; import org.elasticsearch.xpack.ql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.ql.expression.predicate.Predicates; import org.elasticsearch.xpack.ql.expression.predicate.logical.And; import org.elasticsearch.xpack.ql.expression.predicate.logical.Or; import org.elasticsearch.xpack.ql.expression.predicate.nulls.IsNotNull; +import org.elasticsearch.xpack.ql.expression.predicate.nulls.IsNull; import org.elasticsearch.xpack.ql.expression.predicate.regex.RLikePattern; import org.elasticsearch.xpack.ql.expression.predicate.regex.WildcardPattern; import org.elasticsearch.xpack.ql.index.EsIndex; @@ -85,6 +111,7 @@ import org.elasticsearch.xpack.ql.type.EsField; import org.junit.BeforeClass; +import java.lang.reflect.Constructor; import java.util.List; import java.util.Map; import java.util.Set; @@ -97,15 +124,28 @@ import static org.elasticsearch.xpack.esql.EsqlTestUtils.L; import static org.elasticsearch.xpack.esql.EsqlTestUtils.TEST_VERIFIER; import static org.elasticsearch.xpack.esql.EsqlTestUtils.as; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.configuration; import static org.elasticsearch.xpack.esql.EsqlTestUtils.emptySource; import static org.elasticsearch.xpack.esql.EsqlTestUtils.loadMapping; import static org.elasticsearch.xpack.esql.EsqlTestUtils.localSource; import static org.elasticsearch.xpack.esql.EsqlTestUtils.withDefaultLimitWarning; import static org.elasticsearch.xpack.esql.analysis.Analyzer.NO_FIELDS; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.GEO_POINT; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.GEO_SHAPE; import static org.elasticsearch.xpack.ql.TestUtils.relation; +import static org.elasticsearch.xpack.ql.expression.Literal.FALSE; +import static org.elasticsearch.xpack.ql.expression.Literal.NULL; +import static org.elasticsearch.xpack.ql.expression.Literal.TRUE; import static org.elasticsearch.xpack.ql.tree.Source.EMPTY; +import static org.elasticsearch.xpack.ql.type.DataTypes.BOOLEAN; +import static org.elasticsearch.xpack.ql.type.DataTypes.DOUBLE; import static org.elasticsearch.xpack.ql.type.DataTypes.INTEGER; +import static org.elasticsearch.xpack.ql.type.DataTypes.IP; +import static org.elasticsearch.xpack.ql.type.DataTypes.KEYWORD; +import static org.elasticsearch.xpack.ql.type.DataTypes.LONG; +import static org.elasticsearch.xpack.ql.type.DataTypes.TEXT; +import static org.elasticsearch.xpack.ql.type.DataTypes.UNSIGNED_LONG; +import static org.elasticsearch.xpack.ql.type.DataTypes.VERSION; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; @@ -826,17 +866,6 @@ public void testDoNotEliminateHigherLimitDueToDescendantLimit() throws Exception as(filter.child(), Limit.class); } - public void testBasicNullFolding() { - FoldNull rule = new FoldNull(); - assertNullLiteral(rule.rule(new Add(EMPTY, L(randomInt()), Literal.NULL))); - assertNullLiteral(rule.rule(new Round(EMPTY, Literal.NULL, null))); - assertNullLiteral(rule.rule(new Pow(EMPTY, Literal.NULL, Literal.NULL))); - assertNullLiteral(rule.rule(new DateFormat(EMPTY, Literal.NULL, Literal.NULL, null))); - assertNullLiteral(rule.rule(new DateParse(EMPTY, Literal.NULL, Literal.NULL))); - assertNullLiteral(rule.rule(new DateTrunc(EMPTY, Literal.NULL, Literal.NULL))); - assertNullLiteral(rule.rule(new Substring(EMPTY, Literal.NULL, Literal.NULL, Literal.NULL))); - } - public void testPruneSortBeforeStats() { LogicalPlan plan = optimizedPlan(""" from test @@ -3266,6 +3295,116 @@ public void testPlanSanityCheck() throws Exception { assertThat(e.getMessage(), containsString(" optimized incorrectly due to missing references [salary")); } + // https://github.com/elastic/elasticsearch/issues/104995 + public void testNoWrongIsNotNullPruning() { + var plan = optimizedPlan(""" + ROW a = 5, b = [ 1, 2 ] + | EVAL sum = a + b + | LIMIT 1 + | WHERE sum IS NOT NULL + """); + + var local = as(plan, LocalRelation.class); + assertThat(local.supplier(), equalTo(LocalSupplier.EMPTY)); + assertWarnings( + "Line 2:16: evaluation of [a + b] failed, treating result as null. Only first 20 failures recorded.", + "Line 2:16: java.lang.IllegalArgumentException: single-value function encountered multi-value" + ); + } + + /** + * Pushing down EVAL/GROK/DISSECT/ENRICH must not accidentally shadow attributes required by SORT. + * + * For DISSECT expects the following; the others are similar. + * + * EsqlProject[[first_name{f}#37, emp_no{r}#33, salary{r}#34]] + * \_TopN[[Order[$$emp_no$temp_name$36{r}#46 + $$salary$temp_name$41{r}#47 * 13[INTEGER],ASC,LAST], Order[NEG($$salary$t + * emp_name$41{r}#47),DESC,FIRST]],3[INTEGER]] + * \_Dissect[first_name{f}#37,Parser[pattern=%{emp_no} %{salary}, appendSeparator=, parser=org.elasticsearch.dissect.Dissect + * Parser@b6858b],[emp_no{r}#33, salary{r}#34]] + * \_Eval[[emp_no{f}#36 AS $$emp_no$temp_name$36, salary{f}#41 AS $$salary$temp_name$41]] + * \_EsRelation[test][_meta_field{f}#42, emp_no{f}#36, first_name{f}#37, ..] + */ + public void testPushdownWithOverwrittenName() { + List overwritingCommands = List.of( + "EVAL emp_no = 3*emp_no, salary = -2*emp_no-salary", + "DISSECT first_name \"%{emp_no} %{salary}\"", + "GROK first_name \"%{WORD:emp_no} %{WORD:salary}\"", + "ENRICH languages_idx ON first_name WITH emp_no = language_code, salary = language_code" + ); + + String queryTemplateKeepAfter = """ + FROM test + | SORT 13*(emp_no+salary) ASC, -salary DESC + | {} + | KEEP first_name, emp_no, salary + | LIMIT 3 + """; + // Equivalent but with KEEP first - ensures that attributes in the final projection are correct after pushdown rules were applied. + String queryTemplateKeepFirst = """ + FROM test + | KEEP emp_no, salary, first_name + | SORT 13*(emp_no+salary) ASC, -salary DESC + | {} + | LIMIT 3 + """; + + for (String overwritingCommand : overwritingCommands) { + String queryTemplate = randomBoolean() ? queryTemplateKeepFirst : queryTemplateKeepAfter; + var plan = optimizedPlan(LoggerMessageFormat.format(null, queryTemplate, overwritingCommand)); + + var project = as(plan, Project.class); + var projections = project.projections(); + assertThat(projections.size(), equalTo(3)); + assertThat(projections.get(0).name(), equalTo("first_name")); + assertThat(projections.get(1).name(), equalTo("emp_no")); + assertThat(projections.get(2).name(), equalTo("salary")); + + var topN = as(project.child(), TopN.class); + assertThat(topN.order().size(), is(2)); + + var firstOrderExpr = as(topN.order().get(0), Order.class); + var mul = as(firstOrderExpr.child(), Mul.class); + var add = as(mul.left(), Add.class); + var renamed_emp_no = as(add.left(), ReferenceAttribute.class); + var renamed_salary = as(add.right(), ReferenceAttribute.class); + assertThat(renamed_emp_no.toString(), startsWith("$$emp_no$temp_name")); + assertThat(renamed_salary.toString(), startsWith("$$salary$temp_name")); + + var secondOrderExpr = as(topN.order().get(1), Order.class); + var neg = as(secondOrderExpr.child(), Neg.class); + var renamed_salary2 = as(neg.field(), ReferenceAttribute.class); + assert (renamed_salary2.semanticEquals(renamed_salary) && renamed_salary2.equals(renamed_salary)); + + Eval renamingEval = null; + if (overwritingCommand.startsWith("EVAL")) { + // Multiple EVALs should be merged, so there's only one. + renamingEval = as(topN.child(), Eval.class); + } + if (overwritingCommand.startsWith("DISSECT")) { + var dissect = as(topN.child(), Dissect.class); + renamingEval = as(dissect.child(), Eval.class); + } + if (overwritingCommand.startsWith("GROK")) { + var grok = as(topN.child(), Grok.class); + renamingEval = as(grok.child(), Eval.class); + } + if (overwritingCommand.startsWith("ENRICH")) { + var enrich = as(topN.child(), Enrich.class); + renamingEval = as(enrich.child(), Eval.class); + } + + AttributeSet attributesCreatedInEval = new AttributeSet(); + for (Alias field : renamingEval.fields()) { + attributesCreatedInEval.add(field.toAttribute()); + } + assert (attributesCreatedInEval.contains(renamed_emp_no)); + assert (attributesCreatedInEval.contains(renamed_salary)); + + assertThat(renamingEval.child(), instanceOf(EsRelation.class)); + } + } + private LogicalPlan optimizedPlan(String query) { return plan(query); } @@ -3292,7 +3431,7 @@ private void assertNullLiteral(Expression expression) { } // TODO: move these from org.elasticsearch.xpack.ql.optimizer.OptimizerRulesTests to org.elasticsearch.xpack.ql.TestUtils - private static FieldAttribute getFieldAttribute(String name) { + public static FieldAttribute getFieldAttribute(String name) { return getFieldAttribute(name, INTEGER); } @@ -3312,4 +3451,305 @@ public static RLike rlike(Expression left, String exp) { protected List filteredWarnings() { return withDefaultLimitWarning(super.filteredWarnings()); } + + // Null folding + + public void testBasicNullFolding() { + FoldNull rule = new FoldNull(); + assertNullLiteral(rule.rule(new Add(EMPTY, L(randomInt()), Literal.NULL))); + assertNullLiteral(rule.rule(new Round(EMPTY, Literal.NULL, null))); + assertNullLiteral(rule.rule(new Pow(EMPTY, Literal.NULL, Literal.NULL))); + assertNullLiteral(rule.rule(new DateFormat(EMPTY, Literal.NULL, Literal.NULL, null))); + assertNullLiteral(rule.rule(new DateParse(EMPTY, Literal.NULL, Literal.NULL))); + assertNullLiteral(rule.rule(new DateTrunc(EMPTY, Literal.NULL, Literal.NULL))); + assertNullLiteral(rule.rule(new Substring(EMPTY, Literal.NULL, Literal.NULL, Literal.NULL))); + } + + public void testNullFoldingIsNull() { + FoldNull foldNull = new FoldNull(); + assertEquals(true, foldNull.rule(new IsNull(EMPTY, NULL)).fold()); + assertEquals(false, foldNull.rule(new IsNull(EMPTY, TRUE)).fold()); + } + + public void testNullFoldingIsNotNull() { + FoldNull foldNull = new FoldNull(); + assertEquals(true, foldNull.rule(new IsNotNull(EMPTY, TRUE)).fold()); + assertEquals(false, foldNull.rule(new IsNotNull(EMPTY, NULL)).fold()); + } + + public void testGenericNullableExpression() { + FoldNull rule = new FoldNull(); + // arithmetic + assertNullLiteral(rule.rule(new Add(EMPTY, getFieldAttribute("a"), NULL))); + // comparison + assertNullLiteral(rule.rule(greaterThanOf(getFieldAttribute("a"), NULL))); + // regex + assertNullLiteral(rule.rule(new RLike(EMPTY, NULL, new RLikePattern("123")))); + // date functions + assertNullLiteral(rule.rule(new DateExtract(EMPTY, NULL, NULL, configuration("")))); + // math functions + assertNullLiteral(rule.rule(new Cos(EMPTY, NULL))); + // string functions + assertNullLiteral(rule.rule(new LTrim(EMPTY, NULL))); + // spatial + assertNullLiteral(rule.rule(new SpatialCentroid(EMPTY, NULL))); + // ip + assertNullLiteral(rule.rule(new CIDRMatch(EMPTY, NULL, List.of(NULL)))); + // conversion + assertNullLiteral(rule.rule(new ToString(EMPTY, NULL))); + } + + public void testNullFoldingDoesNotApplyOnLogicalExpressions() { + FoldNull rule = new FoldNull(); + + Or or = new Or(EMPTY, NULL, TRUE); + assertEquals(or, rule.rule(or)); + or = new Or(EMPTY, NULL, NULL); + assertEquals(or, rule.rule(or)); + + And and = new And(EMPTY, NULL, TRUE); + assertEquals(and, rule.rule(and)); + and = new And(EMPTY, NULL, NULL); + assertEquals(and, rule.rule(and)); + } + + @SuppressWarnings("unchecked") + public void testNullFoldingDoesNotApplyOnAbstractMultivalueFunction() throws Exception { + FoldNull rule = new FoldNull(); + + List> items = List.of( + MvDedupe.class, + MvFirst.class, + MvLast.class, + MvMax.class, + MvMedian.class, + MvMin.class, + MvSum.class + ); + for (Class clazz : items) { + Constructor ctor = clazz.getConstructor(Source.class, Expression.class); + AbstractMultivalueFunction conditionalFunction = ctor.newInstance(EMPTY, getFieldAttribute("a")); + assertEquals(conditionalFunction, rule.rule(conditionalFunction)); + + conditionalFunction = ctor.newInstance(EMPTY, NULL); + assertEquals(NULL, rule.rule(conditionalFunction)); + } + + // avg and count ar different just because they know the return type in advance (all the others infer the type from the input) + MvAvg avg = new MvAvg(EMPTY, getFieldAttribute("a")); + assertEquals(avg, rule.rule(avg)); + avg = new MvAvg(EMPTY, NULL); + assertEquals(new Literal(EMPTY, null, DOUBLE), rule.rule(avg)); + + MvCount count = new MvCount(EMPTY, getFieldAttribute("a")); + assertEquals(count, rule.rule(count)); + count = new MvCount(EMPTY, NULL); + assertEquals(new Literal(EMPTY, null, INTEGER), rule.rule(count)); + } + + @SuppressWarnings("unchecked") + public void testNullFoldingDoesNotApplyOnAggregate() throws Exception { + FoldNull rule = new FoldNull(); + + List> items = List.of(Max.class, Min.class); + for (Class clazz : items) { + Constructor ctor = clazz.getConstructor(Source.class, Expression.class); + AggregateFunction conditionalFunction = ctor.newInstance(EMPTY, getFieldAttribute("a")); + assertEquals(conditionalFunction, rule.rule(conditionalFunction)); + + conditionalFunction = ctor.newInstance(EMPTY, NULL); + assertEquals(NULL, rule.rule(conditionalFunction)); + } + + Avg avg = new Avg(EMPTY, getFieldAttribute("a")); + assertEquals(avg, rule.rule(avg)); + avg = new Avg(EMPTY, NULL); + assertEquals(new Literal(EMPTY, null, DOUBLE), rule.rule(avg)); + + Count count = new Count(EMPTY, getFieldAttribute("a")); + assertEquals(count, rule.rule(count)); + count = new Count(EMPTY, NULL); + assertEquals(count, rule.rule(count)); + + CountDistinct countd = new CountDistinct(EMPTY, getFieldAttribute("a"), getFieldAttribute("a")); + assertEquals(countd, rule.rule(countd)); + countd = new CountDistinct(EMPTY, NULL, NULL); + assertEquals(new Literal(EMPTY, null, LONG), rule.rule(countd)); + + Median median = new Median(EMPTY, getFieldAttribute("a")); + assertEquals(median, rule.rule(median)); + median = new Median(EMPTY, NULL); + assertEquals(new Literal(EMPTY, null, DOUBLE), rule.rule(median)); + + MedianAbsoluteDeviation medianad = new MedianAbsoluteDeviation(EMPTY, getFieldAttribute("a")); + assertEquals(medianad, rule.rule(medianad)); + medianad = new MedianAbsoluteDeviation(EMPTY, NULL); + assertEquals(new Literal(EMPTY, null, DOUBLE), rule.rule(medianad)); + + Percentile percentile = new Percentile(EMPTY, getFieldAttribute("a"), getFieldAttribute("a")); + assertEquals(percentile, rule.rule(percentile)); + percentile = new Percentile(EMPTY, NULL, NULL); + assertEquals(new Literal(EMPTY, null, DOUBLE), rule.rule(percentile)); + + Sum sum = new Sum(EMPTY, getFieldAttribute("a")); + assertEquals(sum, rule.rule(sum)); + sum = new Sum(EMPTY, NULL); + assertEquals(new Literal(EMPTY, null, DOUBLE), rule.rule(sum)); + + } + + public void testNullFoldableDoesNotApplyToIsNullAndNotNull() { + FoldNull rule = new FoldNull(); + + DataType numericType = randomFrom(INTEGER, LONG, DOUBLE); + DataType genericType = randomFrom(INTEGER, LONG, DOUBLE, UNSIGNED_LONG, KEYWORD, TEXT, GEO_POINT, GEO_SHAPE, VERSION, IP); + List items = List.of( + new Add(EMPTY, getFieldAttribute("a", numericType), getFieldAttribute("b", numericType)), + new Add(EMPTY, new Literal(EMPTY, 1, INTEGER), new Literal(EMPTY, List.of(1, 2, 3), INTEGER)), + new Sub(EMPTY, getFieldAttribute("a", numericType), getFieldAttribute("b", numericType)), + new Sub(EMPTY, new Literal(EMPTY, 1, INTEGER), new Literal(EMPTY, List.of(1, 2, 3), INTEGER)), + new Mul(EMPTY, getFieldAttribute("a", numericType), getFieldAttribute("b", numericType)), + new Mul(EMPTY, new Literal(EMPTY, 1, INTEGER), new Literal(EMPTY, List.of(1, 2, 3), INTEGER)), + new Div(EMPTY, getFieldAttribute("a", numericType), getFieldAttribute("b", numericType)), + new Div(EMPTY, new Literal(EMPTY, 1, INTEGER), new Literal(EMPTY, List.of(1, 2, 3), INTEGER)), + + new GreaterThan(EMPTY, getFieldAttribute("a", numericType), getFieldAttribute("b", numericType), randomZone()), + new GreaterThan(EMPTY, new Literal(EMPTY, 1, INTEGER), new Literal(EMPTY, List.of(1, 2, 3), INTEGER), randomZone()), + new GreaterThanOrEqual(EMPTY, getFieldAttribute("a", numericType), getFieldAttribute("b", numericType), randomZone()), + new GreaterThanOrEqual(EMPTY, new Literal(EMPTY, 1, INTEGER), new Literal(EMPTY, List.of(1, 2, 3), INTEGER), randomZone()), + new LessThan(EMPTY, getFieldAttribute("a", numericType), getFieldAttribute("b", numericType), randomZone()), + new LessThan(EMPTY, new Literal(EMPTY, 1, INTEGER), new Literal(EMPTY, List.of(1, 2, 3), INTEGER), randomZone()), + new LessThanOrEqual(EMPTY, getFieldAttribute("a", numericType), getFieldAttribute("b", numericType), randomZone()), + new LessThanOrEqual(EMPTY, new Literal(EMPTY, 1, INTEGER), new Literal(EMPTY, List.of(1, 2, 3), INTEGER), randomZone()), + new NotEquals(EMPTY, getFieldAttribute("a", numericType), getFieldAttribute("b", numericType), randomZone()), + new NotEquals(EMPTY, new Literal(EMPTY, 1, INTEGER), new Literal(EMPTY, List.of(1, 2, 3), INTEGER), randomZone()), + + new Equals(EMPTY, getFieldAttribute("a", genericType), getFieldAttribute("b", genericType)), + new Equals(EMPTY, new Literal(EMPTY, 1, INTEGER), new Literal(EMPTY, List.of(1, 2, 3), INTEGER)) + ); + for (Expression item : items) { + Expression isNull = new IsNull(EMPTY, item); + Expression transformed = rule.rule(isNull); + assertEquals(isNull, transformed); + + IsNotNull isNotNull = new IsNotNull(EMPTY, item); + transformed = rule.rule(isNotNull); + assertEquals(isNotNull, transformed); + } + + } + + // + // Propagate nullability (IS NULL / IS NOT NULL) + // + + // a IS NULL AND a IS NOT NULL => false + public void testIsNullAndNotNull() throws Exception { + FieldAttribute fa = getFieldAttribute("a"); + + And and = new And(EMPTY, new IsNull(EMPTY, fa), new IsNotNull(EMPTY, fa)); + assertEquals(FALSE, new PropagateNullable().rule(and)); + } + + // a IS NULL AND b IS NOT NULL AND c IS NULL AND d IS NOT NULL AND e IS NULL AND a IS NOT NULL => false + public void testIsNullAndNotNullMultiField() throws Exception { + FieldAttribute fa = getFieldAttribute("a"); + + And andOne = new And(EMPTY, new IsNull(EMPTY, fa), new IsNotNull(EMPTY, getFieldAttribute("b"))); + And andTwo = new And(EMPTY, new IsNull(EMPTY, getFieldAttribute("c")), new IsNotNull(EMPTY, getFieldAttribute("d"))); + And andThree = new And(EMPTY, new IsNull(EMPTY, getFieldAttribute("e")), new IsNotNull(EMPTY, fa)); + + And and = new And(EMPTY, andOne, new And(EMPTY, andTwo, andThree)); + + assertEquals(FALSE, new PropagateNullable().rule(and)); + } + + // a IS NULL AND a > 1 => a IS NULL AND NULL + public void testIsNullAndComparison() { + FieldAttribute fa = getFieldAttribute("a"); + IsNull isNull = new IsNull(EMPTY, fa); + + And and = new And(EMPTY, isNull, greaterThanOf(fa, ONE)); + assertEquals(new And(EMPTY, isNull, nullOf(BOOLEAN)), new PropagateNullable().rule(and)); + } + + // a IS NULL AND b < 1 AND c < 1 AND a < 1 => a IS NULL AND b < 1 AND c < 1 AND NULL + public void testIsNullAndMultipleComparison() { + FieldAttribute fa = getFieldAttribute("a"); + IsNull aIsNull = new IsNull(EMPTY, fa); + + And bLT1_AND_cLT1 = new And(EMPTY, lessThanOf(getFieldAttribute("b"), ONE), lessThanOf(getFieldAttribute("c"), ONE)); + And aIsNull_AND_bLT1_AND_cLT1 = new And(EMPTY, aIsNull, bLT1_AND_cLT1); + And aIsNull_AND_bLT1_AND_cLT1_AND_aLT1 = new And(EMPTY, aIsNull_AND_bLT1_AND_cLT1, lessThanOf(fa, ONE)); + + Expression optimized = new PropagateNullable().rule(aIsNull_AND_bLT1_AND_cLT1_AND_aLT1); + Expression aIsNull_AND_bLT1_AND_cLT1_AND_NULL = new And(EMPTY, aIsNull_AND_bLT1_AND_cLT1, nullOf(BOOLEAN)); + assertEquals(Predicates.splitAnd(aIsNull_AND_bLT1_AND_cLT1_AND_NULL), Predicates.splitAnd(optimized)); + } + + public void testDoNotOptimizeIsNullAndMultipleComparisonWithConstants() { + Literal a = ONE; + Literal b = ONE; + IsNull aIsNull = new IsNull(EMPTY, a); + + And bLT1_AND_cLT1 = new And(EMPTY, lessThanOf(b, ONE), lessThanOf(getFieldAttribute("c"), ONE)); + And aIsNull_AND_bLT1_AND_cLT1 = new And(EMPTY, aIsNull, bLT1_AND_cLT1); + And aIsNull_AND_bLT1_AND_cLT1_AND_aLT1 = new And(EMPTY, aIsNull_AND_bLT1_AND_cLT1, lessThanOf(a, ONE)); + + Expression optimized = new PropagateNullable().rule(aIsNull_AND_bLT1_AND_cLT1_AND_aLT1); + Literal nullLiteral = new Literal(EMPTY, null, BOOLEAN); + assertEquals(asList(aIsNull, nullLiteral, nullLiteral, nullLiteral), Predicates.splitAnd(optimized)); + } + + // ((a+1)/2) > 1 AND a + 2 AND a IS NULL AND b < 3 => NULL AND NULL AND a IS NULL AND b < 3 + public void testIsNullAndDeeplyNestedExpression() throws Exception { + FieldAttribute fa = getFieldAttribute("a"); + IsNull isNull = new IsNull(EMPTY, fa); + + Expression nullified = new And( + EMPTY, + greaterThanOf(new Div(EMPTY, new Add(EMPTY, fa, ONE), TWO), ONE), + greaterThanOf(new Add(EMPTY, fa, TWO), ONE) + ); + Expression kept = new And(EMPTY, isNull, lessThanOf(getFieldAttribute("b"), THREE)); + And and = new And(EMPTY, nullified, kept); + + Expression optimized = new PropagateNullable().rule(and); + Expression expected = new And(EMPTY, new And(EMPTY, nullOf(BOOLEAN), nullOf(BOOLEAN)), kept); + + assertEquals(Predicates.splitAnd(expected), Predicates.splitAnd(optimized)); + } + + // a IS NULL OR a IS NOT NULL => no change + // a IS NULL OR a > 1 => no change + public void testIsNullInDisjunction() throws Exception { + FieldAttribute fa = getFieldAttribute("a"); + + Or or = new Or(EMPTY, new IsNull(EMPTY, fa), new IsNotNull(EMPTY, fa)); + Filter dummy = new Filter(EMPTY, relation(), or); + LogicalPlan transformed = new PropagateNullable().apply(dummy); + assertSame(dummy, transformed); + assertEquals(or, ((Filter) transformed).condition()); + + or = new Or(EMPTY, new IsNull(EMPTY, fa), greaterThanOf(fa, ONE)); + dummy = new Filter(EMPTY, relation(), or); + transformed = new PropagateNullable().apply(dummy); + assertSame(dummy, transformed); + assertEquals(or, ((Filter) transformed).condition()); + } + + // a + 1 AND (a IS NULL OR a > 3) => no change + public void testIsNullDisjunction() throws Exception { + FieldAttribute fa = getFieldAttribute("a"); + IsNull isNull = new IsNull(EMPTY, fa); + + Or or = new Or(EMPTY, isNull, greaterThanOf(fa, THREE)); + And and = new And(EMPTY, new Add(EMPTY, fa, ONE), or); + + assertEquals(and, new PropagateNullable().rule(and)); + } + + private Literal nullOf(DataType dataType) { + return new Literal(Source.EMPTY, null, dataType); + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PropagateNullable.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PropagateNullable.java new file mode 100644 index 0000000000000..eee5d9b4c49dc --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PropagateNullable.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.optimizer; + +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.predicate.logical.And; + +public class PropagateNullable extends LogicalPlanOptimizer.PropagateNullable { + @Override + public Expression rule(And and) { + return super.rule(and); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java index f773904ed8973..1324b3977786a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java @@ -50,7 +50,7 @@ interface Setup { List> build(RandomIndexWriter iw) throws IOException; - void assertStats(SingleValueQuery.Builder builder, boolean subHasTwoPhase); + void assertStats(SingleValueQuery.Builder builder, YesNoSometimes subHasTwoPhase); } @ParametersFactory @@ -74,15 +74,15 @@ public SingleValueQueryTests(Setup setup) { } public void testMatchAll() throws IOException { - testCase(new SingleValueQuery(new MatchAll(Source.EMPTY), "foo").asBuilder(), false, false, this::runCase); + testCase(new SingleValueQuery(new MatchAll(Source.EMPTY), "foo").asBuilder(), YesNoSometimes.NO, YesNoSometimes.NO, this::runCase); } public void testMatchSome() throws IOException { int max = between(1, 100); testCase( new SingleValueQuery.Builder(new RangeQueryBuilder("i").lt(max), "foo", new SingleValueQuery.Stats(), Source.EMPTY), - false, - false, + YesNoSometimes.SOMETIMES, + YesNoSometimes.NO, (fieldValues, count) -> runCase(fieldValues, count, null, max, false) ); } @@ -95,8 +95,8 @@ public void testSubPhrase() throws IOException { new SingleValueQuery.Stats(), Source.EMPTY ), - false, - true, + YesNoSometimes.NO, + YesNoSometimes.YES, this::runCase ); } @@ -104,8 +104,8 @@ public void testSubPhrase() throws IOException { public void testMatchNone() throws IOException { testCase( new SingleValueQuery.Builder(new MatchNoneQueryBuilder(), "foo", new SingleValueQuery.Stats(), Source.EMPTY), - true, - false, + YesNoSometimes.YES, + YesNoSometimes.NO, (fieldValues, count) -> assertThat(count, equalTo(0)) ); } @@ -113,8 +113,8 @@ public void testMatchNone() throws IOException { public void testRewritesToMatchNone() throws IOException { testCase( new SingleValueQuery.Builder(new TermQueryBuilder("missing", 0), "foo", new SingleValueQuery.Stats(), Source.EMPTY), - true, - false, + YesNoSometimes.YES, + YesNoSometimes.NO, (fieldValues, count) -> assertThat(count, equalTo(0)) ); } @@ -122,8 +122,8 @@ public void testRewritesToMatchNone() throws IOException { public void testNotMatchAll() throws IOException { testCase( new SingleValueQuery(new MatchAll(Source.EMPTY), "foo").negate(Source.EMPTY).asBuilder(), - true, - false, + YesNoSometimes.YES, + YesNoSometimes.NO, (fieldValues, count) -> assertThat(count, equalTo(0)) ); } @@ -131,8 +131,8 @@ public void testNotMatchAll() throws IOException { public void testNotMatchNone() throws IOException { testCase( new SingleValueQuery(new MatchAll(Source.EMPTY).negate(Source.EMPTY), "foo").negate(Source.EMPTY).asBuilder(), - false, - false, + YesNoSometimes.NO, + YesNoSometimes.NO, this::runCase ); } @@ -141,8 +141,8 @@ public void testNotMatchSome() throws IOException { int max = between(1, 100); testCase( new SingleValueQuery(new RangeQuery(Source.EMPTY, "i", null, false, max, false, null), "foo").negate(Source.EMPTY).asBuilder(), - false, - true, + YesNoSometimes.SOMETIMES, + YesNoSometimes.SOMETIMES, (fieldValues, count) -> runCase(fieldValues, count, max, 100, true) ); } @@ -191,8 +191,18 @@ private void runCase(List> fieldValues, int count) { runCase(fieldValues, count, null, null, false); } - private void testCase(SingleValueQuery.Builder builder, boolean rewritesToMatchNone, boolean subHasTwoPhase, TestCase testCase) - throws IOException { + enum YesNoSometimes { + YES, + NO, + SOMETIMES; + } + + private void testCase( + SingleValueQuery.Builder builder, + YesNoSometimes rewritesToMatchNone, + YesNoSometimes subHasTwoPhase, + TestCase testCase + ) throws IOException { MapperService mapper = createMapperService(mapping(setup::mapping)); try (Directory d = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), d)) { List> fieldValues = setup.build(iw); @@ -201,7 +211,7 @@ private void testCase(SingleValueQuery.Builder builder, boolean rewritesToMatchN QueryBuilder rewritten = builder.rewrite(ctx); Query query = rewritten.toQuery(ctx); testCase.run(fieldValues, ctx.searcher().count(query)); - if (rewritesToMatchNone) { + if (rewritesToMatchNone == YesNoSometimes.YES) { assertThat(rewritten, instanceOf(MatchNoneQueryBuilder.class)); assertThat(builder.stats().missingField(), equalTo(0)); assertThat(builder.stats().rewrittenToMatchNone(), equalTo(1)); @@ -217,7 +227,9 @@ private void testCase(SingleValueQuery.Builder builder, boolean rewritesToMatchN assertThat(builder.stats().rewrittenToMatchNone(), equalTo(0)); setup.assertStats(builder, subHasTwoPhase); } - assertThat(builder.stats().noNextScorer(), equalTo(0)); + if (rewritesToMatchNone != YesNoSometimes.SOMETIMES) { + assertThat(builder.stats().noNextScorer(), equalTo(0)); + } } } } @@ -300,7 +312,7 @@ private List docFor(int i, Iterable values) { } @Override - public void assertStats(SingleValueQuery.Builder builder, boolean subHasTwoPhase) { + public void assertStats(SingleValueQuery.Builder builder, YesNoSometimes subHasTwoPhase) { assertThat(builder.stats().missingField(), equalTo(0)); switch (fieldType) { case "long", "integer", "short", "byte", "double", "float" -> { @@ -312,12 +324,20 @@ public void assertStats(SingleValueQuery.Builder builder, boolean subHasTwoPhase if (multivaluedField || empty) { assertThat(builder.stats().numericSingle(), greaterThanOrEqualTo(0)); - if (subHasTwoPhase) { - assertThat(builder.stats().numericMultiNoApprox(), equalTo(0)); - assertThat(builder.stats().numericMultiApprox(), greaterThan(0)); - } else { - assertThat(builder.stats().numericMultiNoApprox(), greaterThan(0)); - assertThat(builder.stats().numericMultiApprox(), equalTo(0)); + switch (subHasTwoPhase) { + case YES -> { + assertThat(builder.stats().numericMultiNoApprox(), equalTo(0)); + assertThat(builder.stats().numericMultiApprox(), greaterThan(0)); + } + case NO -> { + assertThat(builder.stats().numericMultiNoApprox(), greaterThan(0)); + assertThat(builder.stats().numericMultiApprox(), equalTo(0)); + } + case SOMETIMES -> { + assertThat(builder.stats().numericMultiNoApprox() + builder.stats().numericMultiApprox(), greaterThan(0)); + assertThat(builder.stats().numericMultiNoApprox(), greaterThanOrEqualTo(0)); + assertThat(builder.stats().numericMultiApprox(), greaterThanOrEqualTo(0)); + } } } else { assertThat(builder.stats().numericSingle(), greaterThan(0)); @@ -333,12 +353,20 @@ public void assertStats(SingleValueQuery.Builder builder, boolean subHasTwoPhase assertThat(builder.stats().bytesNoApprox(), equalTo(0)); if (multivaluedField || empty) { assertThat(builder.stats().ordinalsSingle(), greaterThanOrEqualTo(0)); - if (subHasTwoPhase) { - assertThat(builder.stats().ordinalsMultiNoApprox(), equalTo(0)); - assertThat(builder.stats().ordinalsMultiApprox(), greaterThan(0)); - } else { - assertThat(builder.stats().ordinalsMultiNoApprox(), greaterThan(0)); - assertThat(builder.stats().ordinalsMultiApprox(), equalTo(0)); + switch (subHasTwoPhase) { + case YES -> { + assertThat(builder.stats().ordinalsMultiNoApprox(), equalTo(0)); + assertThat(builder.stats().ordinalsMultiApprox(), greaterThan(0)); + } + case NO -> { + assertThat(builder.stats().ordinalsMultiNoApprox(), greaterThan(0)); + assertThat(builder.stats().ordinalsMultiApprox(), equalTo(0)); + } + case SOMETIMES -> { + assertThat(builder.stats().ordinalsMultiNoApprox() + builder.stats().ordinalsMultiApprox(), greaterThan(0)); + assertThat(builder.stats().ordinalsMultiNoApprox(), greaterThanOrEqualTo(0)); + assertThat(builder.stats().ordinalsMultiApprox(), greaterThanOrEqualTo(0)); + } } } else { assertThat(builder.stats().ordinalsSingle(), greaterThan(0)); @@ -371,7 +399,7 @@ public List> build(RandomIndexWriter iw) throws IOException { } @Override - public void assertStats(SingleValueQuery.Builder builder, boolean subHasTwoPhase) { + public void assertStats(SingleValueQuery.Builder builder, YesNoSometimes subHasTwoPhase) { assertThat(builder.stats().missingField(), equalTo(1)); assertThat(builder.stats().numericSingle(), equalTo(0)); assertThat(builder.stats().numericMultiNoApprox(), equalTo(0)); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/PlanExecutorMetricsTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/PlanExecutorMetricsTests.java index 06eae5d57cf16..f90e441b8c308 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/PlanExecutorMetricsTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/PlanExecutorMetricsTests.java @@ -9,7 +9,9 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.fieldcaps.FieldCapabilities; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesIndexResponse; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; +import org.elasticsearch.action.fieldcaps.IndexFieldCapabilities; import org.elasticsearch.client.internal.Client; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; @@ -21,19 +23,19 @@ import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolver; import org.elasticsearch.xpack.esql.execution.PlanExecutor; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; +import org.elasticsearch.xpack.esql.session.EsqlIndexResolver; import org.elasticsearch.xpack.esql.type.EsqlDataTypeRegistry; import org.elasticsearch.xpack.ql.index.IndexResolver; import org.junit.After; import org.junit.Before; import org.mockito.stubbing.Answer; +import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; -import static java.util.Collections.emptyMap; -import static java.util.Collections.singletonMap; import static org.elasticsearch.xpack.esql.EsqlTestUtils.withDefaultLimitWarning; import static org.hamcrest.Matchers.instanceOf; import static org.mockito.ArgumentMatchers.any; @@ -68,11 +70,10 @@ EnrichPolicyResolver mockEnrichResolver() { } public void testFailedMetric() { - Client client = mock(Client.class); - IndexResolver idxResolver = new IndexResolver(client, randomAlphaOfLength(10), EsqlDataTypeRegistry.INSTANCE, Set::of); - var planExecutor = new PlanExecutor(idxResolver); String[] indices = new String[] { "test" }; - var enrichResolver = mockEnrichResolver(); + + Client qlClient = mock(Client.class); + IndexResolver idxResolver = new IndexResolver(qlClient, randomAlphaOfLength(10), EsqlDataTypeRegistry.INSTANCE, Set::of); // simulate a valid field_caps response so we can parse and correctly analyze de query FieldCapabilitiesResponse fieldCapabilitiesResponse = mock(FieldCapabilitiesResponse.class); when(fieldCapabilitiesResponse.getIndices()).thenReturn(indices); @@ -80,9 +81,23 @@ public void testFailedMetric() { doAnswer((Answer) invocation -> { @SuppressWarnings("unchecked") ActionListener listener = (ActionListener) invocation.getArguments()[1]; + // simulate a valid field_caps response so we can parse and correctly analyze de query listener.onResponse(fieldCapabilitiesResponse); return null; - }).when(client).fieldCaps(any(), any()); + }).when(qlClient).fieldCaps(any(), any()); + + Client esqlClient = mock(Client.class); + EsqlIndexResolver esqlIndexResolver = new EsqlIndexResolver(esqlClient, EsqlDataTypeRegistry.INSTANCE); + doAnswer((Answer) invocation -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + // simulate a valid field_caps response so we can parse and correctly analyze de query + listener.onResponse(new FieldCapabilitiesResponse(indexFieldCapabilities(indices), List.of())); + return null; + }).when(esqlClient).fieldCaps(any(), any()); + + var planExecutor = new PlanExecutor(idxResolver, esqlIndexResolver); + var enrichResolver = mockEnrichResolver(); var request = new EsqlQueryRequest(); // test a failed query: xyz field doesn't exist @@ -122,12 +137,30 @@ public void onFailure(Exception e) { assertEquals(1, planExecutor.metrics().stats().get("features.stats")); } + private List indexFieldCapabilities(String[] indices) { + List responses = new ArrayList<>(); + for (String idx : indices) { + responses.add( + new FieldCapabilitiesIndexResponse( + idx, + idx, + Map.ofEntries( + Map.entry("foo", new IndexFieldCapabilities("foo", "integer", false, true, true, false, null, Map.of())), + Map.entry("bar", new IndexFieldCapabilities("bar", "long", false, true, true, false, null, Map.of())) + ), + true + ) + ); + } + return responses; + } + private Map> fields(String[] indices) { - FieldCapabilities fooField = new FieldCapabilities("foo", "integer", false, true, true, indices, null, null, emptyMap()); - FieldCapabilities barField = new FieldCapabilities("bar", "long", false, true, true, indices, null, null, emptyMap()); + FieldCapabilities fooField = new FieldCapabilities("foo", "integer", false, true, true, indices, null, null, Map.of()); + FieldCapabilities barField = new FieldCapabilities("bar", "long", false, true, true, indices, null, null, Map.of()); Map> fields = new HashMap<>(); - fields.put(fooField.getName(), singletonMap(fooField.getName(), fooField)); - fields.put(barField.getName(), singletonMap(barField.getName(), barField)); + fields.put(fooField.getName(), Map.of(fooField.getName(), fooField)); + fields.put(barField.getName(), Map.of(barField.getName(), barField)); return fields; } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistryTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistryTests.java index e4fa78fac0dee..93f58398d267f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistryTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistryTests.java @@ -6,17 +6,18 @@ */ package org.elasticsearch.xpack.esql.type; -import org.elasticsearch.action.fieldcaps.FieldCapabilities; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesIndexResponse; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; +import org.elasticsearch.action.fieldcaps.IndexFieldCapabilities; import org.elasticsearch.index.mapper.TimeSeriesParams; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.esql.session.EsqlSession; +import org.elasticsearch.xpack.esql.session.EsqlIndexResolver; import org.elasticsearch.xpack.ql.index.IndexResolution; -import org.elasticsearch.xpack.ql.index.IndexResolver; import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypes; import org.elasticsearch.xpack.ql.type.EsField; +import java.util.List; import java.util.Map; import static org.hamcrest.Matchers.equalTo; @@ -35,33 +36,20 @@ public void testLong() { } private void resolve(String esTypeName, TimeSeriesParams.MetricType metricType, DataType expected) { - String[] indices = new String[] { "idx-" + randomAlphaOfLength(5) }; - FieldCapabilities fieldCap = new FieldCapabilities( - randomAlphaOfLength(3), - esTypeName, - false, - true, - true, - false, - metricType, - indices, - null, - null, - null, - null, - Map.of() - ); - FieldCapabilitiesResponse caps = new FieldCapabilitiesResponse(indices, Map.of(fieldCap.getName(), Map.of(esTypeName, fieldCap))); - IndexResolution resolution = IndexResolver.mergedMappings( - EsqlDataTypeRegistry.INSTANCE, - "idx-*", - caps, - EsqlSession::specificValidity, - IndexResolver.PRESERVE_PROPERTIES, - null + String idx = "idx-" + randomAlphaOfLength(5); + String field = "f" + randomAlphaOfLength(3); + List idxResponses = List.of( + new FieldCapabilitiesIndexResponse( + idx, + idx, + Map.of(field, new IndexFieldCapabilities(field, esTypeName, false, true, true, false, metricType, Map.of())), + true + ) ); - EsField f = resolution.get().mapping().get(fieldCap.getName()); + FieldCapabilitiesResponse caps = new FieldCapabilitiesResponse(idxResponses, List.of()); + IndexResolution resolution = new EsqlIndexResolver(null, EsqlDataTypeRegistry.INSTANCE).mergedMappings("idx-*", caps); + EsField f = resolution.get().mapping().get(field); assertThat(f.getDataType(), equalTo(expected)); } } diff --git a/x-pack/plugin/esql/src/test/resources/empty_field_caps_response.json b/x-pack/plugin/esql/src/test/resources/empty_field_caps_response.json deleted file mode 100644 index fe8b293e3c0b9..0000000000000 --- a/x-pack/plugin/esql/src/test/resources/empty_field_caps_response.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "indices": [ - "test1", - "test2" - ], - "fields": { - "_index": { - "_index": { - "type": "_index", - "metadata_field": true, - "searchable": true, - "aggregatable": true - } - } - } -} diff --git a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/TimeSeriesDataStreamsIT.java b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/TimeSeriesDataStreamsIT.java index cb4685a0564ed..95735ffbe8a87 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/TimeSeriesDataStreamsIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/TimeSeriesDataStreamsIT.java @@ -11,12 +11,14 @@ import org.elasticsearch.client.Response; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.ilm.CheckNotDataStreamWriteIndexStep; import org.elasticsearch.xpack.core.ilm.DeleteAction; +import org.elasticsearch.xpack.core.ilm.DeleteStep; import org.elasticsearch.xpack.core.ilm.ForceMergeAction; import org.elasticsearch.xpack.core.ilm.FreezeAction; import org.elasticsearch.xpack.core.ilm.PhaseCompleteStep; @@ -37,6 +39,7 @@ import static org.elasticsearch.xpack.TimeSeriesRestDriver.createNewSingletonPolicy; import static org.elasticsearch.xpack.TimeSeriesRestDriver.createSnapshotRepo; import static org.elasticsearch.xpack.TimeSeriesRestDriver.explainIndex; +import static org.elasticsearch.xpack.TimeSeriesRestDriver.getBackingIndices; import static org.elasticsearch.xpack.TimeSeriesRestDriver.getOnlyIndexSettings; import static org.elasticsearch.xpack.TimeSeriesRestDriver.getStepKeyForIndex; import static org.elasticsearch.xpack.TimeSeriesRestDriver.getTemplate; @@ -45,6 +48,7 @@ import static org.elasticsearch.xpack.TimeSeriesRestDriver.waitAndGetShrinkIndexName; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; public class TimeSeriesDataStreamsIT extends ESRestTestCase { @@ -303,4 +307,46 @@ public void testDeleteOnlyIndexInDataStreamDeletesDataStream() throws Exception }); } + @SuppressWarnings("unchecked") + public void testDataStreamWithMultipleIndicesAndWriteIndexInDeletePhase() throws Exception { + createComposableTemplate(client(), template, dataStream + "*", new Template(null, null, null, null)); + indexDocument(client(), dataStream, true); + + createNewSingletonPolicy(client(), policyName, "delete", DeleteAction.NO_SNAPSHOT_DELETE); + // let's update the index template so the new write index (after rollover) is managed by an ILM policy that sents it to the + // delete step - note that we'll have here a data stream with generation 000001 not managed and the write index 000002 in the + // delete phase (the write index in this case, being not the only backing index must NOT be deleted). + createComposableTemplate(client(), template, dataStream + "*", getTemplate(policyName)); + + client().performRequest(new Request("POST", dataStream + "/_rollover")); + indexDocument(client(), dataStream, true); + + String secondGenerationIndex = getBackingIndices(client(), dataStream).get(1); + assertBusy(() -> { + Request explainRequest = new Request("GET", "/_data_stream/" + dataStream); + Response response = client().performRequest(explainRequest); + Map responseMap; + try (InputStream is = response.getEntity().getContent()) { + responseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); + } + + List dataStreams = (List) responseMap.get("data_streams"); + assertThat(dataStreams.size(), is(1)); + Map dataStream = (Map) dataStreams.get(0); + + List indices = (List) dataStream.get("indices"); + // no index should be deleted + assertThat(indices.size(), is(2)); + + Map explainIndex = explainIndex(client(), secondGenerationIndex); + assertThat(explainIndex.get("failed_step"), is(DeleteStep.NAME)); + assertThat((Integer) explainIndex.get("failed_step_retry_count"), is(greaterThan(1))); + }); + + // rolling the data stream again would see 000002 not be the write index anymore and should be deleted automatically + client().performRequest(new Request("POST", dataStream + "/_rollover")); + + assertBusy(() -> assertThat(indexExists(secondGenerationIndex), is(false))); + } + } diff --git a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java index ec9fad3e5077d..5eabacbf1ab3c 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java @@ -23,6 +23,7 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.rest.action.admin.indices.RestPutIndexTemplateAction; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; @@ -395,7 +396,7 @@ public void testILMWaitsForTimeSeriesEndTimeToLapse() throws Exception { }, 30, TimeUnit.SECONDS); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103981") + @TestLogging(value = "org.elasticsearch.xpack.ilm:TRACE", reason = "https://github.com/elastic/elasticsearch/issues/103981") public void testRollupNonTSIndex() throws Exception { createIndex(index, alias, false); index(client(), index, true, null, "@timestamp", "2020-01-01T05:10:00Z", "volume", 11.0, "metricset", randomAlphaOfLength(5)); @@ -504,6 +505,7 @@ public void testDownsampleTwice() throws Exception { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105485") public void testDownsampleTwiceSameInterval() throws Exception { // Create the ILM policy Request request = new Request("PUT", "_ilm/policy/" + policy); diff --git a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataStreamAndIndexLifecycleMixingTests.java b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataStreamAndIndexLifecycleMixingTests.java index 637fbc8f8bf82..b9c58f728d1e3 100644 --- a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataStreamAndIndexLifecycleMixingTests.java +++ b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataStreamAndIndexLifecycleMixingTests.java @@ -238,7 +238,7 @@ public void testIndexTemplateSwapsILMForDataStreamLifecycle() throws Exception { // let's migrate this data stream to use the custom data stream lifecycle client().execute( PutDataStreamLifecycleAction.INSTANCE, - new PutDataStreamLifecycleAction.Request(new String[] { dataStreamName }, customLifecycle.getEffectiveDataRetention()) + new PutDataStreamLifecycleAction.Request(new String[] { dataStreamName }, customLifecycle.getDataStreamRetention()) ).actionGet(); assertBusy(() -> { @@ -580,7 +580,7 @@ public void testUpdateIndexTemplateToDataStreamLifecyclePreference() throws Exce // let's migrate this data stream to use the custom data stream lifecycle client().execute( PutDataStreamLifecycleAction.INSTANCE, - new PutDataStreamLifecycleAction.Request(new String[] { dataStreamName }, customLifecycle.getEffectiveDataRetention()) + new PutDataStreamLifecycleAction.Request(new String[] { dataStreamName }, customLifecycle.getDataStreamRetention()) ).actionGet(); // data stream was rolled over and has 4 indices, 2 managed by ILM, and 2 managed by the custom data stream lifecycle diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java index 55e4200a3b5ed..c38b427200744 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java @@ -23,6 +23,8 @@ import org.elasticsearch.xpack.inference.services.cohere.CohereServiceSettings; import org.elasticsearch.xpack.inference.services.cohere.embeddings.CohereEmbeddingsServiceSettings; import org.elasticsearch.xpack.inference.services.cohere.embeddings.CohereEmbeddingsTaskSettings; +import org.elasticsearch.xpack.inference.services.elasticsearch.ElasticsearchInternalServiceSettings; +import org.elasticsearch.xpack.inference.services.elasticsearch.MultilingualE5SmallInternalServiceSettings; import org.elasticsearch.xpack.inference.services.elser.ElserInternalServiceSettings; import org.elasticsearch.xpack.inference.services.elser.ElserMlNodeTaskSettings; import org.elasticsearch.xpack.inference.services.huggingface.HuggingFaceServiceSettings; @@ -31,8 +33,6 @@ import org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsServiceSettings; import org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsTaskSettings; import org.elasticsearch.xpack.inference.services.settings.DefaultSecretSettings; -import org.elasticsearch.xpack.inference.services.textembedding.MultilingualE5SmallInternalServiceSettings; -import org.elasticsearch.xpack.inference.services.textembedding.TextEmbeddingInternalServiceSettings; import java.util.ArrayList; import java.util.List; @@ -96,8 +96,8 @@ public static List getNamedWriteables() { namedWriteables.add( new NamedWriteableRegistry.Entry( ServiceSettings.class, - TextEmbeddingInternalServiceSettings.NAME, - TextEmbeddingInternalServiceSettings::new + ElasticsearchInternalServiceSettings.NAME, + ElasticsearchInternalServiceSettings::new ) ); namedWriteables.add( diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java index eb43941cdcac2..c598a58d014f9 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java @@ -50,7 +50,7 @@ import org.elasticsearch.xpack.inference.external.http.HttpClientManager; import org.elasticsearch.xpack.inference.external.http.HttpSettings; import org.elasticsearch.xpack.inference.external.http.retry.RetrySettings; -import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.external.http.sender.RequestExecutorServiceSettings; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; import org.elasticsearch.xpack.inference.registry.ModelRegistryImpl; @@ -60,11 +60,11 @@ import org.elasticsearch.xpack.inference.rest.RestPutInferenceModelAction; import org.elasticsearch.xpack.inference.services.ServiceComponents; import org.elasticsearch.xpack.inference.services.cohere.CohereService; +import org.elasticsearch.xpack.inference.services.elasticsearch.ElasticsearchInternalService; import org.elasticsearch.xpack.inference.services.elser.ElserInternalService; import org.elasticsearch.xpack.inference.services.huggingface.HuggingFaceService; import org.elasticsearch.xpack.inference.services.huggingface.elser.HuggingFaceElserService; import org.elasticsearch.xpack.inference.services.openai.OpenAiService; -import org.elasticsearch.xpack.inference.services.textembedding.TextEmbeddingInternalService; import java.util.ArrayList; import java.util.Collection; @@ -95,7 +95,7 @@ public class InferencePlugin extends Plugin implements ActionPlugin, ExtensibleP public static final String NAME = "inference"; public static final String UTILITY_THREAD_POOL_NAME = "inference_utility"; private final Settings settings; - private final SetOnce httpFactory = new SetOnce<>(); + private final SetOnce httpFactory = new SetOnce<>(); private final SetOnce serviceComponents = new SetOnce<>(); private final SetOnce inferenceServiceRegistry = new SetOnce<>(); @@ -144,11 +144,10 @@ public Collection createComponents(PluginServices services) { var truncator = new Truncator(settings, services.clusterService()); serviceComponents.set(new ServiceComponents(services.threadPool(), throttlerManager, settings, truncator)); - var httpRequestSenderFactory = new HttpRequestSenderFactory( - services.threadPool(), + var httpRequestSenderFactory = new HttpRequestSender.Factory( + serviceComponents.get(), HttpClientManager.create(settings, services.threadPool(), services.clusterService(), throttlerManager), - services.clusterService(), - settings + services.clusterService() ); httpFactory.set(httpRequestSenderFactory); @@ -161,6 +160,8 @@ public Collection createComponents(PluginServices services) { inferenceServices.add(this::getInferenceServiceFactories); var factoryContext = new InferenceServiceExtension.InferenceServiceFactoryContext(services.client()); + // This must be done after the HttpRequestSenderFactory is created so that the services can get the + // reference correctly var inferenceRegistry = new InferenceServiceRegistryImpl(inferenceServices, factoryContext); inferenceRegistry.init(services.client()); inferenceServiceRegistry.set(inferenceRegistry); @@ -178,11 +179,11 @@ public void loadExtensions(ExtensionLoader loader) { public List getInferenceServiceFactories() { return List.of( ElserInternalService::new, - context -> new HuggingFaceElserService(httpFactory, serviceComponents), - context -> new HuggingFaceService(httpFactory, serviceComponents), - context -> new OpenAiService(httpFactory, serviceComponents), - context -> new CohereService(httpFactory, serviceComponents), - TextEmbeddingInternalService::new + context -> new HuggingFaceElserService(httpFactory.get(), serviceComponents.get()), + context -> new HuggingFaceService(httpFactory.get(), serviceComponents.get()), + context -> new OpenAiService(httpFactory.get(), serviceComponents.get()), + context -> new CohereService(httpFactory.get(), serviceComponents.get()), + ElasticsearchInternalService::new ); } @@ -239,7 +240,7 @@ public List> getSettings() { return Stream.of( HttpSettings.getSettings(), HttpClientManager.getSettings(), - HttpRequestSenderFactory.HttpRequestSender.getSettings(), + HttpRequestSender.getSettings(), ThrottlerManager.getSettings(), RetrySettings.getSettingsDefinitions(), Truncator.getSettings(), diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereActionCreator.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereActionCreator.java index 0fb5ca9283fae..91db5e691cb61 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereActionCreator.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereActionCreator.java @@ -32,6 +32,6 @@ public CohereActionCreator(Sender sender, ServiceComponents serviceComponents) { public ExecutableAction create(CohereEmbeddingsModel model, Map taskSettings, InputType inputType) { var overriddenModel = CohereEmbeddingsModel.of(model, taskSettings, inputType); - return new CohereEmbeddingsAction(sender, overriddenModel, serviceComponents); + return new CohereEmbeddingsAction(sender, overriddenModel); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereEmbeddingsAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereEmbeddingsAction.java index ae66496abbb1f..1f50f0ae6bc57 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereEmbeddingsAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereEmbeddingsAction.java @@ -7,21 +7,12 @@ package org.elasticsearch.xpack.inference.external.action.cohere; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; -import org.elasticsearch.xpack.inference.external.cohere.CohereAccount; -import org.elasticsearch.xpack.inference.external.cohere.CohereResponseHandler; -import org.elasticsearch.xpack.inference.external.http.retry.ResponseHandler; -import org.elasticsearch.xpack.inference.external.http.retry.RetrySettings; -import org.elasticsearch.xpack.inference.external.http.retry.RetryingHttpSender; +import org.elasticsearch.xpack.inference.external.http.sender.CohereEmbeddingsExecutableRequestCreator; import org.elasticsearch.xpack.inference.external.http.sender.Sender; -import org.elasticsearch.xpack.inference.external.request.cohere.CohereEmbeddingsRequest; -import org.elasticsearch.xpack.inference.external.response.cohere.CohereEmbeddingsResponseEntity; -import org.elasticsearch.xpack.inference.services.ServiceComponents; import org.elasticsearch.xpack.inference.services.cohere.embeddings.CohereEmbeddingsModel; import java.util.List; @@ -32,51 +23,32 @@ import static org.elasticsearch.xpack.inference.external.action.ActionUtils.wrapFailuresInElasticsearchException; public class CohereEmbeddingsAction implements ExecutableAction { - private static final Logger logger = LogManager.getLogger(CohereEmbeddingsAction.class); - private static final ResponseHandler HANDLER = createEmbeddingsHandler(); - - private final CohereAccount account; - private final CohereEmbeddingsModel model; private final String failedToSendRequestErrorMessage; - private final RetryingHttpSender sender; + private final Sender sender; + private final CohereEmbeddingsExecutableRequestCreator requestCreator; - public CohereEmbeddingsAction(Sender sender, CohereEmbeddingsModel model, ServiceComponents serviceComponents) { - this.model = Objects.requireNonNull(model); - this.account = new CohereAccount( - this.model.getServiceSettings().getCommonSettings().getUri(), - this.model.getSecretSettings().apiKey() - ); + public CohereEmbeddingsAction(Sender sender, CohereEmbeddingsModel model) { + Objects.requireNonNull(model); + this.sender = Objects.requireNonNull(sender); this.failedToSendRequestErrorMessage = constructFailedToSendRequestMessage( - this.model.getServiceSettings().getCommonSettings().getUri(), + model.getServiceSettings().getCommonSettings().getUri(), "Cohere embeddings" ); - this.sender = new RetryingHttpSender( - Objects.requireNonNull(sender), - serviceComponents.throttlerManager(), - logger, - new RetrySettings(serviceComponents.settings()), - serviceComponents.threadPool() - ); + requestCreator = new CohereEmbeddingsExecutableRequestCreator(model); } @Override public void execute(List input, ActionListener listener) { try { - CohereEmbeddingsRequest request = new CohereEmbeddingsRequest(account, input, model); ActionListener wrappedListener = wrapFailuresInElasticsearchException( failedToSendRequestErrorMessage, listener ); - - sender.send(request, HANDLER, wrappedListener); + sender.send(requestCreator, input, wrappedListener); } catch (ElasticsearchException e) { listener.onFailure(e); } catch (Exception e) { listener.onFailure(createInternalServerError(e, failedToSendRequestErrorMessage)); } } - - private static ResponseHandler createEmbeddingsHandler() { - return new CohereResponseHandler("cohere text embedding", CohereEmbeddingsResponseEntity::fromResponse); - } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceAction.java index 67c5fda5f83a0..928d396c991f8 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceAction.java @@ -7,19 +7,13 @@ package org.elasticsearch.xpack.inference.external.action.huggingface; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.inference.InferenceServiceResults; -import org.elasticsearch.xpack.inference.common.Truncator; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; import org.elasticsearch.xpack.inference.external.http.retry.ResponseHandler; -import org.elasticsearch.xpack.inference.external.http.retry.RetrySettings; -import org.elasticsearch.xpack.inference.external.http.retry.RetryingHttpSender; +import org.elasticsearch.xpack.inference.external.http.sender.HuggingFaceExecutableRequestCreator; import org.elasticsearch.xpack.inference.external.http.sender.Sender; -import org.elasticsearch.xpack.inference.external.huggingface.HuggingFaceAccount; -import org.elasticsearch.xpack.inference.external.request.huggingface.HuggingFaceInferenceRequest; import org.elasticsearch.xpack.inference.services.ServiceComponents; import org.elasticsearch.xpack.inference.services.huggingface.HuggingFaceModel; @@ -27,19 +21,13 @@ import java.util.Objects; import static org.elasticsearch.core.Strings.format; -import static org.elasticsearch.xpack.inference.common.Truncator.truncate; import static org.elasticsearch.xpack.inference.external.action.ActionUtils.createInternalServerError; import static org.elasticsearch.xpack.inference.external.action.ActionUtils.wrapFailuresInElasticsearchException; public class HuggingFaceAction implements ExecutableAction { - private static final Logger logger = LogManager.getLogger(HuggingFaceAction.class); - - private final HuggingFaceAccount account; private final String errorMessage; - private final RetryingHttpSender sender; - private final ResponseHandler responseHandler; - private final Truncator truncator; - private final HuggingFaceModel model; + private final Sender sender; + private final HuggingFaceExecutableRequestCreator requestCreator; public HuggingFaceAction( Sender sender, @@ -50,34 +38,20 @@ public HuggingFaceAction( ) { Objects.requireNonNull(serviceComponents); Objects.requireNonNull(requestType); - - this.model = Objects.requireNonNull(model); - this.responseHandler = Objects.requireNonNull(responseHandler); - this.sender = new RetryingHttpSender( - Objects.requireNonNull(sender), - serviceComponents.throttlerManager(), - logger, - new RetrySettings(serviceComponents.settings()), - serviceComponents.threadPool() - ); - this.account = new HuggingFaceAccount(model.getUri(), model.getApiKey()); - this.errorMessage = format( + this.sender = Objects.requireNonNull(sender); + requestCreator = new HuggingFaceExecutableRequestCreator(model, responseHandler, serviceComponents.truncator()); + errorMessage = format( "Failed to send Hugging Face %s request from inference entity id [%s]", requestType, model.getInferenceEntityId() ); - this.truncator = Objects.requireNonNull(serviceComponents.truncator()); } @Override public void execute(List input, ActionListener listener) { try { - var truncatedInput = truncate(input, model.getTokenLimit()); - - HuggingFaceInferenceRequest request = new HuggingFaceInferenceRequest(truncator, account, truncatedInput, model); ActionListener wrappedListener = wrapFailuresInElasticsearchException(errorMessage, listener); - - sender.send(request, responseHandler, wrappedListener); + sender.send(requestCreator, input, wrappedListener); } catch (ElasticsearchException e) { listener.onFailure(e); } catch (Exception e) { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiEmbeddingsAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiEmbeddingsAction.java index 2e804dfeb6a4f..d5f083ac8aa90 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiEmbeddingsAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiEmbeddingsAction.java @@ -10,52 +10,39 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.inference.InferenceServiceResults; -import org.elasticsearch.xpack.inference.common.Truncator; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.external.http.sender.OpenAiEmbeddingsExecutableRequestCreator; import org.elasticsearch.xpack.inference.external.http.sender.Sender; -import org.elasticsearch.xpack.inference.external.openai.OpenAiAccount; -import org.elasticsearch.xpack.inference.external.openai.OpenAiClient; -import org.elasticsearch.xpack.inference.external.request.openai.OpenAiEmbeddingsRequest; import org.elasticsearch.xpack.inference.services.ServiceComponents; import org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsModel; import java.util.List; import java.util.Objects; -import static org.elasticsearch.xpack.inference.common.Truncator.truncate; import static org.elasticsearch.xpack.inference.external.action.ActionUtils.constructFailedToSendRequestMessage; import static org.elasticsearch.xpack.inference.external.action.ActionUtils.createInternalServerError; import static org.elasticsearch.xpack.inference.external.action.ActionUtils.wrapFailuresInElasticsearchException; public class OpenAiEmbeddingsAction implements ExecutableAction { - private final OpenAiAccount account; - private final OpenAiClient client; - private final OpenAiEmbeddingsModel model; private final String errorMessage; - private final Truncator truncator; + private final OpenAiEmbeddingsExecutableRequestCreator requestCreator; + private final Sender sender; public OpenAiEmbeddingsAction(Sender sender, OpenAiEmbeddingsModel model, ServiceComponents serviceComponents) { - this.model = Objects.requireNonNull(model); - this.account = new OpenAiAccount( - this.model.getServiceSettings().uri(), - this.model.getServiceSettings().organizationId(), - this.model.getSecretSettings().apiKey() - ); - this.client = new OpenAiClient(Objects.requireNonNull(sender), Objects.requireNonNull(serviceComponents)); - this.errorMessage = constructFailedToSendRequestMessage(this.model.getServiceSettings().uri(), "OpenAI embeddings"); - this.truncator = Objects.requireNonNull(serviceComponents.truncator()); + Objects.requireNonNull(serviceComponents); + Objects.requireNonNull(model); + this.sender = Objects.requireNonNull(sender); + requestCreator = new OpenAiEmbeddingsExecutableRequestCreator(model, serviceComponents.truncator()); + errorMessage = constructFailedToSendRequestMessage(model.getServiceSettings().uri(), "OpenAI embeddings"); } @Override public void execute(List input, ActionListener listener) { try { - var truncatedInput = truncate(input, model.getServiceSettings().maxInputTokens()); - - OpenAiEmbeddingsRequest request = new OpenAiEmbeddingsRequest(truncator, account, truncatedInput, model); ActionListener wrappedListener = wrapFailuresInElasticsearchException(errorMessage, listener); - client.send(request, wrappedListener); + sender.send(requestCreator, input, wrappedListener); } catch (ElasticsearchException e) { listener.onFailure(e); } catch (Exception e) { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/cohere/CohereResponseHandler.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/cohere/CohereResponseHandler.java index c7e6493949400..b5af0b474834f 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/cohere/CohereResponseHandler.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/cohere/CohereResponseHandler.java @@ -59,7 +59,9 @@ void checkForFailureStatusCode(Request request, HttpResult result) throws RetryE } // handle error codes - if (statusCode >= 500) { + if (statusCode == 500) { + throw new RetryException(true, buildError(SERVER_ERROR, request, result)); + } else if (statusCode > 500) { throw new RetryException(false, buildError(SERVER_ERROR, request, result)); } else if (statusCode == 429) { throw new RetryException(true, buildError(RATE_LIMIT, request, result)); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClientManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClientManager.java index 7cc4a3cb24502..ab3a8a8c0e043 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClientManager.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClientManager.java @@ -37,7 +37,6 @@ public class HttpClientManager implements Closeable { */ public static final Setting MAX_CONNECTIONS = Setting.intSetting( "xpack.inference.http.max_connections", - // TODO pick a reasonable values here 20, // default 1, // min Setting.Property.NodeScope, diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/RequestExecutor.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/RequestExecutor.java index 5c8fa62ba88f9..77b4d49d62b9f 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/RequestExecutor.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/RequestExecutor.java @@ -10,8 +10,10 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.xpack.inference.external.request.HttpRequest; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.http.sender.ExecutableRequestCreator; +import java.util.List; import java.util.concurrent.TimeUnit; public interface RequestExecutor { @@ -25,5 +27,10 @@ public interface RequestExecutor { boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException; - void execute(HttpRequest request, @Nullable TimeValue timeout, ActionListener listener); + void execute( + ExecutableRequestCreator requestCreator, + List input, + @Nullable TimeValue timeout, + ActionListener listener + ); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/Retrier.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RequestSender.java similarity index 53% rename from x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/Retrier.java rename to x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RequestSender.java index 2e2ba03345a3b..8244e5ad29e95 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/Retrier.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RequestSender.java @@ -7,10 +7,21 @@ package org.elasticsearch.xpack.inference.external.http.retry; +import org.apache.http.client.protocol.HttpClientContext; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.xpack.inference.external.request.Request; -public interface Retrier { - void send(Request request, ResponseHandler responseHandler, ActionListener listener); +import java.util.function.Supplier; + +public interface RequestSender { + void send( + Logger logger, + Request request, + HttpClientContext context, + Supplier hasRequestTimedOutFunction, + ResponseHandler responseHandler, + ActionListener listener + ); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RetrySettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RetrySettings.java index 040903a35ab08..35e50e557cc83 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RetrySettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RetrySettings.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.inference.external.http.retry; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; @@ -15,43 +16,128 @@ public class RetrySettings { - public static final Setting RETRY_INITIAL_DELAY_SETTING = Setting.timeSetting( + static final Setting RETRY_INITIAL_DELAY_SETTING = Setting.timeSetting( "xpack.inference.http.retry.initial_delay", TimeValue.timeValueSeconds(1), Setting.Property.NodeScope, Setting.Property.Dynamic ); - public static final Setting RETRY_MAX_DELAY_BOUND_SETTING = Setting.timeSetting( + static final Setting RETRY_MAX_DELAY_BOUND_SETTING = Setting.timeSetting( "xpack.inference.http.retry.max_delay_bound", TimeValue.timeValueSeconds(5), Setting.Property.NodeScope, Setting.Property.Dynamic ); - public static final Setting RETRY_TIMEOUT_SETTING = Setting.timeSetting( + static final Setting RETRY_TIMEOUT_SETTING = Setting.timeSetting( "xpack.inference.http.retry.timeout", TimeValue.timeValueSeconds(30), Setting.Property.NodeScope, Setting.Property.Dynamic ); - private final InternalSettings internalSettings; + static final Setting RETRY_DEBUG_FREQUENCY_MODE_SETTING = Setting.enumSetting( + DebugFrequencyMode.class, + "xpack.inference.http.retry.debug_frequency_mode", + DebugFrequencyMode.OFF, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + static final Setting RETRY_DEBUG_FREQUENCY_AMOUNT_SETTING = Setting.timeSetting( + "xpack.inference.http.retry.debug_frequency_amount", + TimeValue.timeValueMinutes(5), + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + private volatile TimeValue initialDelay; + private volatile TimeValue maxDelayBound; + private volatile TimeValue timeout; + private volatile DebugFrequencyMode debugMode; + private volatile TimeValue debugFrequency; + + public RetrySettings(Settings settings, ClusterService clusterService) { + initialDelay = RETRY_INITIAL_DELAY_SETTING.get(settings); + maxDelayBound = RETRY_MAX_DELAY_BOUND_SETTING.get(settings); + timeout = RETRY_TIMEOUT_SETTING.get(settings); + debugMode = RETRY_DEBUG_FREQUENCY_MODE_SETTING.get(settings); + debugFrequency = RETRY_DEBUG_FREQUENCY_AMOUNT_SETTING.get(settings); + + addSettingsUpdateConsumers(clusterService); + } + + private void addSettingsUpdateConsumers(ClusterService clusterService) { + clusterService.getClusterSettings().addSettingsUpdateConsumer(RETRY_INITIAL_DELAY_SETTING, this::setInitialDelay); + clusterService.getClusterSettings().addSettingsUpdateConsumer(RETRY_MAX_DELAY_BOUND_SETTING, this::setMaxDelayBound); + clusterService.getClusterSettings().addSettingsUpdateConsumer(RETRY_TIMEOUT_SETTING, this::setTimeout); + clusterService.getClusterSettings().addSettingsUpdateConsumer(RETRY_DEBUG_FREQUENCY_MODE_SETTING, this::setDebugMode); + clusterService.getClusterSettings().addSettingsUpdateConsumer(RETRY_DEBUG_FREQUENCY_AMOUNT_SETTING, this::setDebugFrequencyAmount); + } + + private void setInitialDelay(TimeValue initialDelay) { + this.initialDelay = initialDelay; + } - public RetrySettings(Settings settings) { - var initialDelay = RETRY_INITIAL_DELAY_SETTING.get(settings); - var maxDelayBound = RETRY_MAX_DELAY_BOUND_SETTING.get(settings); - var timeoutValue = RETRY_TIMEOUT_SETTING.get(settings); - this.internalSettings = new InternalSettings(initialDelay, maxDelayBound, timeoutValue); + private void setMaxDelayBound(TimeValue maxDelayBound) { + this.maxDelayBound = maxDelayBound; } - public record InternalSettings(TimeValue initialDelay, TimeValue maxDelayBound, TimeValue timeoutValue) {} + private void setTimeout(TimeValue timeout) { + this.timeout = timeout; + } + + private void setDebugMode(DebugFrequencyMode debugMode) { + this.debugMode = debugMode; + } - public InternalSettings getSettings() { - return internalSettings; + private void setDebugFrequencyAmount(TimeValue debugFrequency) { + this.debugFrequency = debugFrequency; } public static List> getSettingsDefinitions() { - return List.of(RETRY_INITIAL_DELAY_SETTING, RETRY_MAX_DELAY_BOUND_SETTING, RETRY_TIMEOUT_SETTING); + return List.of( + RETRY_INITIAL_DELAY_SETTING, + RETRY_MAX_DELAY_BOUND_SETTING, + RETRY_TIMEOUT_SETTING, + RETRY_DEBUG_FREQUENCY_MODE_SETTING, + RETRY_DEBUG_FREQUENCY_AMOUNT_SETTING + ); + } + + TimeValue getInitialDelay() { + return initialDelay; + } + + TimeValue getMaxDelayBound() { + return maxDelayBound; + } + + TimeValue getTimeout() { + return timeout; + } + + DebugFrequencyMode getDebugMode() { + return debugMode; + } + + TimeValue getDebugFrequency() { + return debugFrequency; + } + + enum DebugFrequencyMode { + /** + * Indicates that the debug messages should be logged every time + */ + ON, + /** + * Indicates that the debug messages should never be logged + */ + OFF, + /** + * Indicates that the debug messages should be logged on an interval + */ + INTERVAL } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSender.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSender.java index d8476c7c583d5..ffe10ffe3b6ae 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSender.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSender.java @@ -7,7 +7,9 @@ package org.elasticsearch.xpack.inference.external.http.retry; +import org.apache.http.client.protocol.HttpClientContext; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; @@ -15,8 +17,8 @@ import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.inference.external.http.HttpClient; import org.elasticsearch.xpack.inference.external.http.HttpResult; -import org.elasticsearch.xpack.inference.external.http.sender.Sender; import org.elasticsearch.xpack.inference.external.request.Request; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; @@ -24,40 +26,37 @@ import java.net.UnknownHostException; import java.util.Objects; import java.util.concurrent.Executor; +import java.util.function.Supplier; import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.inference.InferencePlugin.UTILITY_THREAD_POOL_NAME; -public class RetryingHttpSender implements Retrier { - private final Sender sender; +public class RetryingHttpSender implements RequestSender { + private final HttpClient httpClient; private final ThrottlerManager throttlerManager; - private final Logger logger; private final RetrySettings retrySettings; private final ThreadPool threadPool; private final Executor executor; public RetryingHttpSender( - Sender sender, + HttpClient httpClient, ThrottlerManager throttlerManager, - Logger logger, RetrySettings retrySettings, ThreadPool threadPool ) { - this(sender, throttlerManager, logger, retrySettings, threadPool, threadPool.executor(UTILITY_THREAD_POOL_NAME)); + this(httpClient, throttlerManager, retrySettings, threadPool, threadPool.executor(UTILITY_THREAD_POOL_NAME)); } // For testing only RetryingHttpSender( - Sender sender, + HttpClient httpClient, ThrottlerManager throttlerManager, - Logger logger, RetrySettings retrySettings, ThreadPool threadPool, Executor executor ) { - this.sender = Objects.requireNonNull(sender); + this.httpClient = Objects.requireNonNull(httpClient); this.throttlerManager = Objects.requireNonNull(throttlerManager); - this.logger = Objects.requireNonNull(logger); this.retrySettings = Objects.requireNonNull(retrySettings); this.threadPool = Objects.requireNonNull(threadPool); this.executor = Objects.requireNonNull(executor); @@ -66,23 +65,41 @@ public RetryingHttpSender( private class InternalRetrier extends RetryableAction { private Request request; private final ResponseHandler responseHandler; - - InternalRetrier(Request request, ResponseHandler responseHandler, ActionListener listener) { + private final Logger logger; + private final HttpClientContext context; + private final Supplier hasRequestCompletedFunction; + + InternalRetrier( + Logger logger, + Request request, + HttpClientContext context, + Supplier hasRequestCompletedFunction, + ResponseHandler responseHandler, + ActionListener listener + ) { super( - logger, + Objects.requireNonNull(logger), threadPool, - retrySettings.getSettings().initialDelay(), - retrySettings.getSettings().maxDelayBound(), - retrySettings.getSettings().timeoutValue(), + retrySettings.getInitialDelay(), + retrySettings.getMaxDelayBound(), + retrySettings.getTimeout(), listener, executor ); - this.request = request; - this.responseHandler = responseHandler; + this.logger = logger; + this.request = Objects.requireNonNull(request); + this.context = Objects.requireNonNull(context); + this.responseHandler = Objects.requireNonNull(responseHandler); + this.hasRequestCompletedFunction = Objects.requireNonNull(hasRequestCompletedFunction); } @Override public void tryAction(ActionListener listener) { + // A timeout likely occurred so let's stop attempting to execute the request + if (hasRequestCompletedFunction.get()) { + return; + } + ActionListener responseListener = ActionListener.wrap(result -> { try { responseHandler.validateResponse(throttlerManager, logger, request, result); @@ -90,25 +107,21 @@ public void tryAction(ActionListener listener) { listener.onResponse(inferenceResults); } catch (Exception e) { - logException(request, result, responseHandler.getRequestType(), e); + logException(logger, request, result, responseHandler.getRequestType(), e); listener.onFailure(e); } }, e -> { - logException(request, responseHandler.getRequestType(), e); + logException(logger, request, responseHandler.getRequestType(), e); listener.onFailure(transformIfRetryable(e)); }); - sender.send(request.createHttpRequest(), responseListener); - } + try { + httpClient.send(request.createHttpRequest(), context, responseListener); + } catch (Exception e) { + logException(logger, request, responseHandler.getRequestType(), e); - @Override - public boolean shouldRetry(Exception e) { - if (e instanceof Retryable retry) { - request = retry.rebuildRequest(request); - return retry.shouldRetry(); + listener.onFailure(wrapWithElasticsearchException(e, request.getInferenceEntityId())); } - - return false; } /** @@ -135,15 +148,45 @@ private Exception transformIfRetryable(Exception e) { return exceptionToReturn; } + + private Exception wrapWithElasticsearchException(Exception e, String inferenceEntityId) { + var transformedException = transformIfRetryable(e); + + if (transformedException instanceof ElasticsearchException) { + return transformedException; + } + + return new ElasticsearchException( + format("Http client failed to send request from inference entity id [%s]", inferenceEntityId), + transformedException + ); + } + + @Override + public boolean shouldRetry(Exception e) { + if (e instanceof Retryable retry) { + request = retry.rebuildRequest(request); + return retry.shouldRetry(); + } + + return false; + } } @Override - public void send(Request request, ResponseHandler responseHandler, ActionListener listener) { - InternalRetrier retrier = new InternalRetrier(request, responseHandler, listener); + public void send( + Logger logger, + Request request, + HttpClientContext context, + Supplier hasRequestTimedOutFunction, + ResponseHandler responseHandler, + ActionListener listener + ) { + InternalRetrier retrier = new InternalRetrier(logger, request, context, hasRequestTimedOutFunction, responseHandler, listener); retrier.run(); } - private void logException(Request request, String requestType, Exception exception) { + private void logException(Logger logger, Request request, String requestType, Exception exception) { var causeException = ExceptionsHelper.unwrapCause(exception); throttlerManager.warn( @@ -153,7 +196,7 @@ private void logException(Request request, String requestType, Exception excepti ); } - private void logException(Request request, HttpResult result, String requestType, Exception exception) { + private void logException(Logger logger, Request request, HttpResult result, String requestType, Exception exception) { var causeException = ExceptionsHelper.unwrapCause(exception); throttlerManager.warn( diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/CohereEmbeddingsExecutableRequestCreator.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/CohereEmbeddingsExecutableRequestCreator.java new file mode 100644 index 0000000000000..b0fdc800a64da --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/CohereEmbeddingsExecutableRequestCreator.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.apache.http.client.protocol.HttpClientContext; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.cohere.CohereAccount; +import org.elasticsearch.xpack.inference.external.cohere.CohereResponseHandler; +import org.elasticsearch.xpack.inference.external.http.retry.RequestSender; +import org.elasticsearch.xpack.inference.external.http.retry.ResponseHandler; +import org.elasticsearch.xpack.inference.external.request.cohere.CohereEmbeddingsRequest; +import org.elasticsearch.xpack.inference.external.response.cohere.CohereEmbeddingsResponseEntity; +import org.elasticsearch.xpack.inference.services.cohere.embeddings.CohereEmbeddingsModel; + +import java.util.List; +import java.util.Objects; +import java.util.function.Supplier; + +public class CohereEmbeddingsExecutableRequestCreator implements ExecutableRequestCreator { + private static final Logger logger = LogManager.getLogger(CohereEmbeddingsExecutableRequestCreator.class); + private static final ResponseHandler HANDLER = createEmbeddingsHandler(); + + private static ResponseHandler createEmbeddingsHandler() { + return new CohereResponseHandler("cohere text embedding", CohereEmbeddingsResponseEntity::fromResponse); + } + + private final CohereAccount account; + private final CohereEmbeddingsModel model; + + public CohereEmbeddingsExecutableRequestCreator(CohereEmbeddingsModel model) { + this.model = Objects.requireNonNull(model); + account = new CohereAccount(this.model.getServiceSettings().getCommonSettings().getUri(), this.model.getSecretSettings().apiKey()); + } + + @Override + public Runnable create( + List input, + RequestSender requestSender, + Supplier hasRequestCompletedFunction, + HttpClientContext context, + ActionListener listener + ) { + CohereEmbeddingsRequest request = new CohereEmbeddingsRequest(account, input, model); + + return new ExecutableInferenceRequest(requestSender, logger, request, context, HANDLER, hasRequestCompletedFunction, listener); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/ExecutableInferenceRequest.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/ExecutableInferenceRequest.java new file mode 100644 index 0000000000000..53f30773cbfe3 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/ExecutableInferenceRequest.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.apache.http.client.protocol.HttpClientContext; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.Strings; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.http.retry.RequestSender; +import org.elasticsearch.xpack.inference.external.http.retry.ResponseHandler; +import org.elasticsearch.xpack.inference.external.request.Request; + +import java.util.function.Supplier; + +record ExecutableInferenceRequest( + RequestSender requestSender, + Logger logger, + Request request, + HttpClientContext context, + ResponseHandler responseHandler, + Supplier hasFinished, + ActionListener listener +) implements Runnable { + + @Override + public void run() { + var inferenceEntityId = request.createHttpRequest().inferenceEntityId(); + + try { + requestSender.send(logger, request, context, hasFinished, responseHandler, listener); + } catch (Exception e) { + var errorMessage = Strings.format("Failed to send request from inference entity id [%s]", inferenceEntityId); + logger.warn(errorMessage, e); + listener.onFailure(new ElasticsearchException(errorMessage, e)); + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/ExecutableRequestCreator.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/ExecutableRequestCreator.java new file mode 100644 index 0000000000000..96455ca4b1cb1 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/ExecutableRequestCreator.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.apache.http.client.protocol.HttpClientContext; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.http.retry.RequestSender; + +import java.util.List; +import java.util.function.Supplier; + +/** + * A contract for constructing a {@link Runnable} to handle sending an inference request to a 3rd party service. + */ +public interface ExecutableRequestCreator { + Runnable create( + List input, + RequestSender requestSender, + Supplier hasRequestCompletedFunction, + HttpClientContext context, + ActionListener listener + ); +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSender.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSender.java new file mode 100644 index 0000000000000..0131bf2989f6f --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSender.java @@ -0,0 +1,192 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.inference.external.http.HttpClientManager; +import org.elasticsearch.xpack.inference.external.http.retry.RetrySettings; +import org.elasticsearch.xpack.inference.external.http.retry.RetryingHttpSender; +import org.elasticsearch.xpack.inference.services.ServiceComponents; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.inference.InferencePlugin.UTILITY_THREAD_POOL_NAME; + +/** + * A class for providing a more friendly interface for sending an inference request to a 3rd party service. + */ +public class HttpRequestSender implements Sender { + + /** + * A helper class for constructing a {@link HttpRequestSender}. + */ + public static class Factory { + private final ServiceComponents serviceComponents; + private final HttpClientManager httpClientManager; + private final ClusterService clusterService; + private final SingleRequestManager requestManager; + + public Factory(ServiceComponents serviceComponents, HttpClientManager httpClientManager, ClusterService clusterService) { + this.serviceComponents = Objects.requireNonNull(serviceComponents); + this.httpClientManager = Objects.requireNonNull(httpClientManager); + this.clusterService = Objects.requireNonNull(clusterService); + + var requestSender = new RetryingHttpSender( + this.httpClientManager.getHttpClient(), + serviceComponents.throttlerManager(), + new RetrySettings(serviceComponents.settings(), clusterService), + serviceComponents.threadPool() + ); + requestManager = new SingleRequestManager(requestSender); + } + + public Sender createSender(String serviceName) { + return new HttpRequestSender( + serviceName, + serviceComponents.threadPool(), + httpClientManager, + clusterService, + serviceComponents.settings(), + requestManager + ); + } + } + + private static final Logger logger = LogManager.getLogger(HttpRequestSender.class); + private static final TimeValue START_COMPLETED_WAIT_TIME = TimeValue.timeValueSeconds(5); + + /** + * The maximum time a request can take. The timer starts once a request is enqueued and continues until a response is + * received from the 3rd party service. This encompasses the time the request might just sit in the queue waiting to be sent + * if another request is already waiting for a connection lease from the connection pool. + */ + public static final Setting MAX_REQUEST_TIMEOUT = Setting.timeSetting( + "xpack.inference.http.max_request_timeout", + TimeValue.timeValueSeconds(30), + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + private final ThreadPool threadPool; + private final HttpClientManager manager; + private final RequestExecutorService service; + private final AtomicBoolean started = new AtomicBoolean(false); + private volatile TimeValue maxRequestTimeout; + private final CountDownLatch startCompleted = new CountDownLatch(2); + + private HttpRequestSender( + String serviceName, + ThreadPool threadPool, + HttpClientManager httpClientManager, + ClusterService clusterService, + Settings settings, + SingleRequestManager requestManager + ) { + this.threadPool = Objects.requireNonNull(threadPool); + this.manager = Objects.requireNonNull(httpClientManager); + service = new RequestExecutorService( + serviceName, + threadPool, + startCompleted, + new RequestExecutorServiceSettings(settings, clusterService), + requestManager + ); + + this.maxRequestTimeout = MAX_REQUEST_TIMEOUT.get(settings); + addSettingsUpdateConsumers(clusterService); + } + + private void addSettingsUpdateConsumers(ClusterService clusterService) { + clusterService.getClusterSettings().addSettingsUpdateConsumer(MAX_REQUEST_TIMEOUT, this::setMaxRequestTimeout); + } + + // Default for testing + void setMaxRequestTimeout(TimeValue maxRequestTimeout) { + logger.debug(() -> format("Max request timeout updated to [%s] for service [%s]", maxRequestTimeout, service)); + this.maxRequestTimeout = maxRequestTimeout; + } + + /** + * Start various internal services. This is required before sending requests. + */ + public void start() { + if (started.compareAndSet(false, true)) { + // The manager must be started before the executor service. That way we guarantee that the http client + // is ready prior to the service attempting to use the http client to send a request + manager.start(); + threadPool.executor(UTILITY_THREAD_POOL_NAME).execute(service::start); + startCompleted.countDown(); + } + } + + @Override + public void close() throws IOException { + manager.close(); + service.shutdown(); + } + + /** + * Send a request at some point in the future. The timeout used is retrieved from the settings. + * @param requestCreator a factory for creating a request to be sent to a 3rd party service + * @param input the list of string input to send in the request + * @param timeout the maximum time the request should wait for a response before timing out. If null, the timeout is ignored. + * The queuing logic may still throw a timeout if it fails to send the request because it couldn't get a leased + * @param listener a listener to handle the response + */ + public void send( + ExecutableRequestCreator requestCreator, + List input, + @Nullable TimeValue timeout, + ActionListener listener + ) { + assert started.get() : "call start() before sending a request"; + waitForStartToComplete(); + service.execute(requestCreator, input, timeout, listener); + } + + private void waitForStartToComplete() { + try { + if (startCompleted.await(START_COMPLETED_WAIT_TIME.getSeconds(), TimeUnit.SECONDS) == false) { + throw new IllegalStateException("Http sender startup did not complete in time"); + } + } catch (InterruptedException e) { + throw new IllegalStateException("Http sender interrupted while waiting for startup to complete"); + } + } + + /** + * Send a request at some point in the future. The timeout used is retrieved from the settings. + * @param requestCreator a factory for creating a request to be sent to a 3rd party service + * @param input the list of string input to send in the request + * @param listener a listener to handle the response + */ + public void send(ExecutableRequestCreator requestCreator, List input, ActionListener listener) { + assert started.get() : "call start() before sending a request"; + waitForStartToComplete(); + service.execute(requestCreator, input, maxRequestTimeout, listener); + } + + public static List> getSettings() { + return List.of(MAX_REQUEST_TIMEOUT); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderFactory.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderFactory.java deleted file mode 100644 index c773f57933415..0000000000000 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderFactory.java +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.inference.external.http.sender; - -import org.apache.http.client.methods.HttpUriRequest; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.Nullable; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.inference.external.http.HttpClientManager; -import org.elasticsearch.xpack.inference.external.http.HttpResult; -import org.elasticsearch.xpack.inference.external.request.HttpRequest; - -import java.io.IOException; -import java.util.List; -import java.util.Objects; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; - -import static org.elasticsearch.core.Strings.format; -import static org.elasticsearch.xpack.inference.InferencePlugin.UTILITY_THREAD_POOL_NAME; - -/** - * A helper class for constructing a {@link HttpRequestSender}. - */ -public class HttpRequestSenderFactory { - private final ThreadPool threadPool; - private final HttpClientManager httpClientManager; - private final ClusterService clusterService; - private final Settings settings; - - public HttpRequestSenderFactory( - ThreadPool threadPool, - HttpClientManager httpClientManager, - ClusterService clusterService, - Settings settings - ) { - this.threadPool = Objects.requireNonNull(threadPool); - this.httpClientManager = Objects.requireNonNull(httpClientManager); - this.clusterService = Objects.requireNonNull(clusterService); - this.settings = Objects.requireNonNull(settings); - } - - public Sender createSender(String serviceName) { - return new HttpRequestSender(serviceName, threadPool, httpClientManager, clusterService, settings); - } - - /** - * A class for providing a more friendly interface for sending an {@link HttpUriRequest}. This leverages the queuing logic for sending - * a request. - */ - public static final class HttpRequestSender implements Sender { - private static final Logger logger = LogManager.getLogger(HttpRequestSender.class); - private static final TimeValue START_COMPLETED_WAIT_TIME = TimeValue.timeValueSeconds(5); - - /** - * The maximum time a request can take. The timer starts once a request is enqueued and continues until a response is - * received from the 3rd party service. This encompasses the time the request might just sit in the queue waiting to be sent - * if another request is already waiting for a connection lease from the connection pool. - */ - public static final Setting MAX_REQUEST_TIMEOUT = Setting.timeSetting( - "xpack.inference.http.max_request_timeout", - TimeValue.timeValueSeconds(30), - Setting.Property.NodeScope, - Setting.Property.Dynamic - ); - - private final ThreadPool threadPool; - private final HttpClientManager manager; - private final RequestExecutorService service; - private final AtomicBoolean started = new AtomicBoolean(false); - private volatile TimeValue maxRequestTimeout; - private final CountDownLatch startCompleted = new CountDownLatch(2); - - private HttpRequestSender( - String serviceName, - ThreadPool threadPool, - HttpClientManager httpClientManager, - ClusterService clusterService, - Settings settings - ) { - this.threadPool = Objects.requireNonNull(threadPool); - this.manager = Objects.requireNonNull(httpClientManager); - service = new RequestExecutorService( - serviceName, - manager.getHttpClient(), - threadPool, - startCompleted, - new RequestExecutorServiceSettings(settings, clusterService) - ); - - this.maxRequestTimeout = MAX_REQUEST_TIMEOUT.get(settings); - addSettingsUpdateConsumers(clusterService); - } - - private void addSettingsUpdateConsumers(ClusterService clusterService) { - clusterService.getClusterSettings().addSettingsUpdateConsumer(MAX_REQUEST_TIMEOUT, this::setMaxRequestTimeout); - } - - // Default for testing - void setMaxRequestTimeout(TimeValue maxRequestTimeout) { - logger.debug(() -> format("Max request timeout updated to [%s] for service [%s]", maxRequestTimeout, service)); - this.maxRequestTimeout = maxRequestTimeout; - } - - /** - * Start various internal services. This is required before sending requests. - */ - public void start() { - if (started.compareAndSet(false, true)) { - // The manager must be started before the executor service. That way we guarantee that the http client - // is ready prior to the service attempting to use the http client to send a request - manager.start(); - threadPool.executor(UTILITY_THREAD_POOL_NAME).execute(service::start); - startCompleted.countDown(); - } - } - - @Override - public void close() throws IOException { - manager.close(); - service.shutdown(); - } - - /** - * Send a request at some point in the future with a timeout specified. - * @param request the http request to send - * @param timeout the maximum time the request should wait for a response before timing out. If null, the timeout is ignored. - * The queuing logic may still throw a timeout if it fails to send the request because it couldn't get a leased - * connection from the connection pool - * @param listener a listener to handle the response - */ - public void send(HttpRequest request, @Nullable TimeValue timeout, ActionListener listener) { - assert started.get() : "call start() before sending a request"; - waitForStartToComplete(); - service.execute(request, timeout, listener); - } - - private void waitForStartToComplete() { - try { - if (startCompleted.await(START_COMPLETED_WAIT_TIME.getSeconds(), TimeUnit.SECONDS) == false) { - throw new IllegalStateException("Http sender startup did not complete in time"); - } - } catch (InterruptedException e) { - throw new IllegalStateException("Http sender interrupted while waiting for startup to complete"); - } - } - - /** - * Send a request at some point in the future. The timeout used is retrieved from the settings. - * @param request the http request to send - * @param listener a listener to handle the response - */ - public void send(HttpRequest request, ActionListener listener) { - assert started.get() : "call start() before sending a request"; - waitForStartToComplete(); - service.execute(request, maxRequestTimeout, listener); - } - - public static List> getSettings() { - return List.of(MAX_REQUEST_TIMEOUT); - } - } -} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/HuggingFaceExecutableRequestCreator.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/HuggingFaceExecutableRequestCreator.java new file mode 100644 index 0000000000000..62558fe6071ac --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/HuggingFaceExecutableRequestCreator.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.apache.http.client.protocol.HttpClientContext; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.common.Truncator; +import org.elasticsearch.xpack.inference.external.http.retry.RequestSender; +import org.elasticsearch.xpack.inference.external.http.retry.ResponseHandler; +import org.elasticsearch.xpack.inference.external.huggingface.HuggingFaceAccount; +import org.elasticsearch.xpack.inference.external.request.huggingface.HuggingFaceInferenceRequest; +import org.elasticsearch.xpack.inference.services.huggingface.HuggingFaceModel; + +import java.util.List; +import java.util.Objects; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.inference.common.Truncator.truncate; + +public class HuggingFaceExecutableRequestCreator implements ExecutableRequestCreator { + private static final Logger logger = LogManager.getLogger(HuggingFaceExecutableRequestCreator.class); + + private final HuggingFaceModel model; + private final HuggingFaceAccount account; + private final ResponseHandler responseHandler; + private final Truncator truncator; + + public HuggingFaceExecutableRequestCreator(HuggingFaceModel model, ResponseHandler responseHandler, Truncator truncator) { + this.model = Objects.requireNonNull(model); + account = new HuggingFaceAccount(model.getUri(), model.getApiKey()); + this.responseHandler = Objects.requireNonNull(responseHandler); + this.truncator = Objects.requireNonNull(truncator); + } + + @Override + public Runnable create( + List input, + RequestSender requestSender, + Supplier hasRequestCompletedFunction, + HttpClientContext context, + ActionListener listener + ) { + var truncatedInput = truncate(input, model.getTokenLimit()); + var request = new HuggingFaceInferenceRequest(truncator, account, truncatedInput, model); + + return new ExecutableInferenceRequest( + requestSender, + logger, + request, + context, + responseHandler, + hasRequestCompletedFunction, + listener + ); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/InferenceRequest.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/InferenceRequest.java new file mode 100644 index 0000000000000..ed77e4b207a94 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/InferenceRequest.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceServiceResults; + +import java.util.List; +import java.util.function.Supplier; + +/** + * A contract for defining a request sent to a 3rd party service. + */ +public interface InferenceRequest { + + /** + * Returns the creator that handles building an executable request based on the input provided. + */ + ExecutableRequestCreator getRequestCreator(); + + /** + * Returns the text input associated with this request. + */ + List getInput(); + + /** + * Returns the listener to notify of the results. + */ + ActionListener getListener(); + + /** + * Returns whether the request has completed. Returns true if from a failure, success, or a timeout. + */ + boolean hasCompleted(); + + /** + * Returns a {@link Supplier} to determine if the request has completed. + */ + Supplier getRequestCompletedFunction(); +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/NoopTask.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/NoopTask.java index c5e533eb7d8fe..6cdcd38d224a9 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/NoopTask.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/NoopTask.java @@ -7,13 +7,41 @@ package org.elasticsearch.xpack.inference.external.http.sender; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceServiceResults; -class NoopTask extends AbstractRunnable { +import java.util.List; +import java.util.function.Supplier; + +class NoopTask implements RejectableTask { + + @Override + public ExecutableRequestCreator getRequestCreator() { + return null; + } + + @Override + public List getInput() { + return null; + } @Override - public void onFailure(Exception e) {} + public ActionListener getListener() { + return null; + } @Override - protected void doRun() throws Exception {} + public boolean hasCompleted() { + return true; + } + + @Override + public Supplier getRequestCompletedFunction() { + return () -> true; + } + + @Override + public void onRejection(Exception e) { + + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiEmbeddingsExecutableRequestCreator.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiEmbeddingsExecutableRequestCreator.java new file mode 100644 index 0000000000000..708e67944441c --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiEmbeddingsExecutableRequestCreator.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.apache.http.client.protocol.HttpClientContext; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.common.Truncator; +import org.elasticsearch.xpack.inference.external.http.retry.RequestSender; +import org.elasticsearch.xpack.inference.external.http.retry.ResponseHandler; +import org.elasticsearch.xpack.inference.external.openai.OpenAiAccount; +import org.elasticsearch.xpack.inference.external.openai.OpenAiResponseHandler; +import org.elasticsearch.xpack.inference.external.request.openai.OpenAiEmbeddingsRequest; +import org.elasticsearch.xpack.inference.external.response.openai.OpenAiEmbeddingsResponseEntity; +import org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsModel; + +import java.util.List; +import java.util.Objects; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.inference.common.Truncator.truncate; + +public class OpenAiEmbeddingsExecutableRequestCreator implements ExecutableRequestCreator { + + private static final Logger logger = LogManager.getLogger(OpenAiEmbeddingsExecutableRequestCreator.class); + + private static final ResponseHandler HANDLER = createEmbeddingsHandler(); + + private static ResponseHandler createEmbeddingsHandler() { + return new OpenAiResponseHandler("openai text embedding", OpenAiEmbeddingsResponseEntity::fromResponse); + } + + private final Truncator truncator; + private final OpenAiEmbeddingsModel model; + private final OpenAiAccount account; + + public OpenAiEmbeddingsExecutableRequestCreator(OpenAiEmbeddingsModel model, Truncator truncator) { + this.model = Objects.requireNonNull(model); + this.account = new OpenAiAccount( + this.model.getServiceSettings().uri(), + this.model.getServiceSettings().organizationId(), + this.model.getSecretSettings().apiKey() + ); + this.truncator = Objects.requireNonNull(truncator); + } + + @Override + public Runnable create( + List input, + RequestSender requestSender, + Supplier hasRequestCompletedFunction, + HttpClientContext context, + ActionListener listener + ) { + var truncatedInput = truncate(input, model.getServiceSettings().maxInputTokens()); + OpenAiEmbeddingsRequest request = new OpenAiEmbeddingsRequest(truncator, account, truncatedInput, model); + + return new ExecutableInferenceRequest(requestSender, logger, request, context, HANDLER, hasRequestCompletedFunction, listener); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RejectableTask.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RejectableTask.java new file mode 100644 index 0000000000000..3da64d5491a60 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RejectableTask.java @@ -0,0 +1,12 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +interface RejectableTask extends InferenceRequest { + void onRejection(Exception e); +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorService.java index f844787455290..ecbaf26ea17f4 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorService.java @@ -11,17 +11,15 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.inference.common.AdjustableCapacityBlockingQueue; -import org.elasticsearch.xpack.inference.external.http.HttpClient; -import org.elasticsearch.xpack.inference.external.http.HttpResult; import org.elasticsearch.xpack.inference.external.http.RequestExecutor; -import org.elasticsearch.xpack.inference.external.request.HttpRequest; import java.util.ArrayList; import java.util.List; @@ -36,8 +34,7 @@ import static org.elasticsearch.core.Strings.format; /** - * An {@link java.util.concurrent.ExecutorService} for queuing and executing {@link RequestTask} containing - * {@link org.apache.http.client.methods.HttpUriRequest}. This class is useful because the + * A service for queuing and executing {@link RequestTask}. This class is useful because the * {@link org.apache.http.impl.nio.conn.PoolingNHttpClientConnectionManager} will block when leasing a connection if no * connections are available. To avoid blocking the inference transport threads, this executor will queue up the * requests until connections are available. @@ -48,12 +45,11 @@ * {@link org.apache.http.client.config.RequestConfig.Builder#setConnectionRequestTimeout} for more info. */ class RequestExecutorService implements RequestExecutor { - - private static final AdjustableCapacityBlockingQueue.QueueCreator QUEUE_CREATOR = + private static final AdjustableCapacityBlockingQueue.QueueCreator QUEUE_CREATOR = new AdjustableCapacityBlockingQueue.QueueCreator<>() { @Override - public BlockingQueue create(int capacity) { - BlockingQueue queue; + public BlockingQueue create(int capacity) { + BlockingQueue queue; if (capacity <= 0) { queue = create(); } else { @@ -64,41 +60,30 @@ public BlockingQueue create(int capacity) { } @Override - public BlockingQueue create() { + public BlockingQueue create() { return new LinkedBlockingQueue<>(); } }; private static final Logger logger = LogManager.getLogger(RequestExecutorService.class); private final String serviceName; - private final AdjustableCapacityBlockingQueue queue; + private final AdjustableCapacityBlockingQueue queue; private final AtomicBoolean running = new AtomicBoolean(true); private final CountDownLatch terminationLatch = new CountDownLatch(1); private final HttpClientContext httpContext; - private final HttpClient httpClient; private final ThreadPool threadPool; private final CountDownLatch startupLatch; private final BlockingQueue controlQueue = new LinkedBlockingQueue<>(); + private final SingleRequestManager requestManager; RequestExecutorService( String serviceName, - HttpClient httpClient, ThreadPool threadPool, @Nullable CountDownLatch startupLatch, - RequestExecutorServiceSettings settings + RequestExecutorServiceSettings settings, + SingleRequestManager requestManager ) { - this(serviceName, httpClient, threadPool, QUEUE_CREATOR, startupLatch, settings); - } - - private static BlockingQueue buildQueue(int capacity) { - BlockingQueue queue; - if (capacity <= 0) { - queue = new LinkedBlockingQueue<>(); - } else { - queue = new LinkedBlockingQueue<>(capacity); - } - - return queue; + this(serviceName, threadPool, QUEUE_CREATOR, startupLatch, settings, requestManager); } /** @@ -106,18 +91,18 @@ private static BlockingQueue buildQueue(int capacity) { */ RequestExecutorService( String serviceName, - HttpClient httpClient, ThreadPool threadPool, - AdjustableCapacityBlockingQueue.QueueCreator createQueue, + AdjustableCapacityBlockingQueue.QueueCreator createQueue, @Nullable CountDownLatch startupLatch, - RequestExecutorServiceSettings settings + RequestExecutorServiceSettings settings, + SingleRequestManager requestManager ) { this.serviceName = Objects.requireNonNull(serviceName); - this.httpClient = Objects.requireNonNull(httpClient); this.threadPool = Objects.requireNonNull(threadPool); this.httpContext = HttpClientContext.create(); this.queue = new AdjustableCapacityBlockingQueue<>(createQueue, settings.getQueueCapacity()); this.startupLatch = startupLatch; + this.requestManager = Objects.requireNonNull(requestManager); Objects.requireNonNull(settings); settings.registerQueueCapacityCallback(this::onCapacityChange); @@ -179,7 +164,7 @@ private void signalStartInitiated() { */ private void handleTasks() throws InterruptedException { try { - AbstractRunnable task = queue.take(); + RejectableTask task = queue.take(); var command = controlQueue.poll(); if (command != null) { @@ -200,9 +185,9 @@ private void handleTasks() throws InterruptedException { } } - private void executeTask(AbstractRunnable task) { + private void executeTask(RejectableTask task) { try { - task.run(); + requestManager.execute(task, httpContext); } catch (Exception e) { logger.warn(format("Http executor service [%s] failed to execute request [%s]", serviceName, task), e); } @@ -212,7 +197,7 @@ private synchronized void notifyRequestsOfShutdown() { assert isShutdown() : "Requests should only be notified if the executor is shutting down"; try { - List notExecuted = new ArrayList<>(); + List notExecuted = new ArrayList<>(); queue.drainTo(notExecuted); rejectTasks(notExecuted, this::rejectTaskBecauseOfShutdown); @@ -221,7 +206,7 @@ private synchronized void notifyRequestsOfShutdown() { } } - private void rejectTaskBecauseOfShutdown(AbstractRunnable task) { + private void rejectTaskBecauseOfShutdown(RejectableTask task) { try { task.onRejection( new EsRejectedExecutionException( @@ -236,7 +221,7 @@ private void rejectTaskBecauseOfShutdown(AbstractRunnable task) { } } - private void rejectTasks(List tasks, Consumer rejectionFunction) { + private void rejectTasks(List tasks, Consumer rejectionFunction) { for (var task : tasks) { rejectionFunction.accept(task); } @@ -270,16 +255,30 @@ public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedE } /** - * Send the request at some point in the future. + * Execute the request at some point in the future. * - * @param request the http request to send - * @param timeout the maximum time to wait for this request to complete (failing or succeeding). Once the time elapses, the - * listener::onFailure is called with a {@link org.elasticsearch.ElasticsearchTimeoutException}. - * If null, then the request will wait forever - * @param listener an {@link ActionListener} for the response or failure + * @param requestCreator the http request to send + * @param input the text to perform inference on + * @param timeout the maximum time to wait for this request to complete (failing or succeeding). Once the time elapses, the + * listener::onFailure is called with a {@link org.elasticsearch.ElasticsearchTimeoutException}. + * If null, then the request will wait forever + * @param listener an {@link ActionListener} for the response or failure */ - public void execute(HttpRequest request, @Nullable TimeValue timeout, ActionListener listener) { - RequestTask task = new RequestTask(request, httpClient, httpContext, timeout, threadPool, listener); + public void execute( + ExecutableRequestCreator requestCreator, + List input, + @Nullable TimeValue timeout, + ActionListener listener + ) { + var task = new RequestTask( + requestCreator, + input, + timeout, + threadPool, + // TODO when multi-tenancy (as well as batching) is implemented we need to be very careful that we preserve + // the thread contexts correctly to avoid accidentally retrieving the credentials for the wrong user + ContextPreservingActionListener.wrapPreservingContext(listener, threadPool.getThreadContext()) + ); if (isShutdown()) { EsRejectedExecutionException rejected = new EsRejectedExecutionException( diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RequestTask.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RequestTask.java index cc65d16af652c..970366f7765dd 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RequestTask.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RequestTask.java @@ -7,157 +7,96 @@ package org.elasticsearch.xpack.inference.external.http.sender; -import org.apache.http.client.protocol.HttpClientContext; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.action.support.ListenerTimeouts; +import org.elasticsearch.common.Strings; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.threadpool.Scheduler; +import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.inference.external.http.HttpClient; -import org.elasticsearch.xpack.inference.external.http.HttpResult; -import org.elasticsearch.xpack.inference.external.request.HttpRequest; +import java.util.List; import java.util.Objects; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Supplier; -import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.inference.InferencePlugin.UTILITY_THREAD_POOL_NAME; -class RequestTask extends AbstractRunnable { - private static final Logger logger = LogManager.getLogger(RequestTask.class); - private static final Scheduler.Cancellable NOOP_TIMEOUT_HANDLER = createDefaultHandler(); +class RequestTask implements RejectableTask { - private final HttpRequest request; - private final ActionListener listener; - private final Scheduler.Cancellable timeoutHandler; - private final AtomicBoolean notified = new AtomicBoolean(); - private final TimeValue timeout; - private final Runnable command; + private final AtomicBoolean finished = new AtomicBoolean(); + private final ExecutableRequestCreator requestCreator; + private final List input; + private final ActionListener listener; RequestTask( - HttpRequest request, - HttpClient httpClient, - HttpClientContext context, + ExecutableRequestCreator requestCreator, + List input, @Nullable TimeValue timeout, ThreadPool threadPool, - ActionListener listener + ActionListener listener ) { - this.request = Objects.requireNonNull(request); - this.listener = Objects.requireNonNull(listener); - this.timeout = timeout; - this.timeoutHandler = startTimer(threadPool, timeout); - this.command = threadPool.getThreadContext() - .preserveContext( - new Command( - Objects.requireNonNull(httpClient), - this.request, - Objects.requireNonNull(context), - ActionListener.wrap(this::onSuccess, this::onFailure) - ) - ); + this.requestCreator = Objects.requireNonNull(requestCreator); + this.input = Objects.requireNonNull(input); + this.listener = getListener(Objects.requireNonNull(listener), timeout, Objects.requireNonNull(threadPool)); } - private Scheduler.Cancellable startTimer(ThreadPool threadPool, TimeValue timeout) { - Objects.requireNonNull(threadPool); + private ActionListener getListener( + ActionListener origListener, + @Nullable TimeValue timeout, + ThreadPool threadPool + ) { + ActionListener notificationListener = ActionListener.wrap(result -> { + finished.set(true); + origListener.onResponse(result); + }, e -> { + finished.set(true); + origListener.onFailure(e); + }); if (timeout == null) { - return NOOP_TIMEOUT_HANDLER; + return notificationListener; } - return threadPool.schedule(this::onTimeout, timeout, threadPool.executor(UTILITY_THREAD_POOL_NAME)); - } - - private void onTimeout() { - assert timeout != null : "timeout must be defined to use a timeout handler"; - logger.debug( - () -> format( - "Request from inference entity id [%s] timed out after [%s] while waiting to be executed", - request.inferenceEntityId(), - timeout + return ListenerTimeouts.wrapWithTimeout( + threadPool, + timeout, + threadPool.executor(UTILITY_THREAD_POOL_NAME), + notificationListener, + (ignored) -> notificationListener.onFailure( + new ElasticsearchTimeoutException(Strings.format("Request timed out waiting to be sent after [%s]", timeout)) ) ); - notifyOfResult( - () -> listener.onFailure( - new ElasticsearchTimeoutException(format("Request timed out waiting to be executed after [%s]", timeout)) - ) - ); - } - - private void notifyOfResult(Runnable runnable) { - if (notified.compareAndSet(false, true)) { - runnable.run(); - } } @Override - public void onFailure(Exception e) { - timeoutHandler.cancel(); - notifyOfResult(() -> listener.onFailure(e)); + public boolean hasCompleted() { + return finished.get(); } @Override - protected void doRun() { - try { - command.run(); - } catch (Exception e) { - String message = format("Failed while executing request from inference entity id [%s]", request.inferenceEntityId()); - logger.warn(message, e); - onFailure(new ElasticsearchException(message, e)); - } + public Supplier getRequestCompletedFunction() { + return this::hasCompleted; } - private void onSuccess(HttpResult result) { - timeoutHandler.cancel(); - notifyOfResult(() -> listener.onResponse(result)); + @Override + public List getInput() { + return input; } @Override - public String toString() { - return request.inferenceEntityId(); + public ActionListener getListener() { + return listener; } - private static Scheduler.Cancellable createDefaultHandler() { - return new Scheduler.Cancellable() { - @Override - public boolean cancel() { - return true; - } - - @Override - public boolean isCancelled() { - return true; - } - }; + @Override + public void onRejection(Exception e) { + listener.onFailure(e); } - private record Command( - HttpClient httpClient, - HttpRequest requestToSend, - HttpClientContext context, - ActionListener resultListener - ) implements Runnable { - - @Override - public void run() { - try { - httpClient.send(requestToSend, context, resultListener); - } catch (Exception e) { - logger.warn( - format("Failed to send request from inference entity id [%s] via the http client", requestToSend.inferenceEntityId()), - e - ); - resultListener.onFailure( - new ElasticsearchException( - format("Failed to send request from inference entity id [%s]", requestToSend.inferenceEntityId()), - e - ) - ); - } - } + @Override + public ExecutableRequestCreator getRequestCreator() { + return requestCreator; } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/Sender.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/Sender.java index f1a0e112219fd..0272f4b0e351c 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/Sender.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/Sender.java @@ -10,15 +10,20 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.xpack.inference.external.http.HttpResult; -import org.elasticsearch.xpack.inference.external.request.HttpRequest; +import org.elasticsearch.inference.InferenceServiceResults; import java.io.Closeable; +import java.util.List; public interface Sender extends Closeable { void start(); - void send(HttpRequest request, ActionListener listener); + void send( + ExecutableRequestCreator requestCreator, + List input, + @Nullable TimeValue timeout, + ActionListener listener + ); - void send(HttpRequest request, @Nullable TimeValue timeout, ActionListener listener); + void send(ExecutableRequestCreator requestCreator, List input, ActionListener listener); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/SingleRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/SingleRequestManager.java new file mode 100644 index 0000000000000..ecd12814d0877 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/SingleRequestManager.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.apache.http.client.protocol.HttpClientContext; +import org.elasticsearch.xpack.inference.external.http.retry.RetryingHttpSender; + +import java.util.Objects; + +/** + * Handles executing a single inference request at a time. + */ +public class SingleRequestManager { + + protected RetryingHttpSender requestSender; + + public SingleRequestManager(RetryingHttpSender requestSender) { + this.requestSender = Objects.requireNonNull(requestSender); + } + + public void execute(InferenceRequest inferenceRequest, HttpClientContext context) { + if (isNoopRequest(inferenceRequest) || inferenceRequest.hasCompleted()) { + return; + } + + inferenceRequest.getRequestCreator() + .create( + inferenceRequest.getInput(), + requestSender, + inferenceRequest.getRequestCompletedFunction(), + context, + inferenceRequest.getListener() + ) + .run(); + } + + private static boolean isNoopRequest(InferenceRequest inferenceRequest) { + return inferenceRequest.getRequestCreator() == null + || inferenceRequest.getInput() == null + || inferenceRequest.getListener() == null; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/openai/OpenAiClient.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/openai/OpenAiClient.java deleted file mode 100644 index cb82616587091..0000000000000 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/openai/OpenAiClient.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.inference.external.openai; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.inference.InferenceServiceResults; -import org.elasticsearch.xpack.inference.external.http.retry.ResponseHandler; -import org.elasticsearch.xpack.inference.external.http.retry.RetrySettings; -import org.elasticsearch.xpack.inference.external.http.retry.RetryingHttpSender; -import org.elasticsearch.xpack.inference.external.http.sender.Sender; -import org.elasticsearch.xpack.inference.external.request.openai.OpenAiEmbeddingsRequest; -import org.elasticsearch.xpack.inference.external.response.openai.OpenAiEmbeddingsResponseEntity; -import org.elasticsearch.xpack.inference.services.ServiceComponents; - -import java.io.IOException; - -public class OpenAiClient { - private static final Logger logger = LogManager.getLogger(OpenAiClient.class); - private static final ResponseHandler EMBEDDINGS_HANDLER = createEmbeddingsHandler(); - - private final RetryingHttpSender sender; - - public OpenAiClient(Sender sender, ServiceComponents serviceComponents) { - this.sender = new RetryingHttpSender( - sender, - serviceComponents.throttlerManager(), - logger, - new RetrySettings(serviceComponents.settings()), - serviceComponents.threadPool() - ); - } - - public void send(OpenAiEmbeddingsRequest request, ActionListener listener) throws IOException { - sender.send(request, EMBEDDINGS_HANDLER, listener); - } - - private static ResponseHandler createEmbeddingsHandler() { - return new OpenAiResponseHandler("openai text embedding", OpenAiEmbeddingsResponseEntity::fromResponse); - } -} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/SenderService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/SenderService.java index fa329e37fb0e2..98b004cd1aa7f 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/SenderService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/SenderService.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.inference.services; -import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.ActionListener; import org.elasticsearch.core.IOUtils; import org.elasticsearch.inference.ChunkedInferenceServiceResults; @@ -16,31 +15,30 @@ import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; -import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.external.http.sender.Sender; import java.io.IOException; import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.concurrent.atomic.AtomicReference; public abstract class SenderService implements InferenceService { - private final SetOnce factory; - private final SetOnce serviceComponents; - private final AtomicReference sender = new AtomicReference<>(); + private final Sender sender; + private final ServiceComponents serviceComponents; - public SenderService(SetOnce factory, SetOnce serviceComponents) { - this.factory = Objects.requireNonNull(factory); + public SenderService(HttpRequestSender.Factory factory, ServiceComponents serviceComponents) { + Objects.requireNonNull(factory); + sender = factory.createSender(name()); this.serviceComponents = Objects.requireNonNull(serviceComponents); } protected Sender getSender() { - return sender.get(); + return sender; } protected ServiceComponents getServiceComponents() { - return serviceComponents.get(); + return serviceComponents; } @Override @@ -98,12 +96,11 @@ protected void doStart(Model model, ActionListener listener) { } private void init() { - sender.updateAndGet(current -> Objects.requireNonNullElseGet(current, () -> factory.get().createSender(name()))); - sender.get().start(); + sender.start(); } @Override public void close() throws IOException { - IOUtils.closeWhileHandlingException(sender.get()); + IOUtils.closeWhileHandlingException(sender); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereService.java index 35b245e9a657a..172a71bd45434 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereService.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.inference.services.cohere; -import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; @@ -24,7 +23,7 @@ import org.elasticsearch.inference.TaskType; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.inference.external.action.cohere.CohereActionCreator; -import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.services.SenderService; import org.elasticsearch.xpack.inference.services.ServiceComponents; import org.elasticsearch.xpack.inference.services.ServiceUtils; @@ -43,7 +42,7 @@ public class CohereService extends SenderService { public static final String NAME = "cohere"; - public CohereService(SetOnce factory, SetOnce serviceComponents) { + public CohereService(HttpRequestSender.Factory factory, ServiceComponents serviceComponents) { super(factory, serviceComponents); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/textembedding/CustomElandInternalServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandInternalServiceSettings.java similarity index 93% rename from x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/textembedding/CustomElandInternalServiceSettings.java rename to x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandInternalServiceSettings.java index 49cf3fdcd9e89..ee22d51914b15 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/textembedding/CustomElandInternalServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandInternalServiceSettings.java @@ -3,11 +3,9 @@ * or more contributor license agreements. Licensed under the Elastic License * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. - * - * this file was contributed to by a generative AI */ -package org.elasticsearch.xpack.inference.services.textembedding; +package org.elasticsearch.xpack.inference.services.elasticsearch; import org.elasticsearch.TransportVersion; import org.elasticsearch.common.ValidationException; @@ -21,7 +19,7 @@ import static org.elasticsearch.TransportVersions.ML_TEXT_EMBEDDING_INFERENCE_SERVICE_ADDED; -public class CustomElandInternalServiceSettings extends TextEmbeddingInternalServiceSettings { +public class CustomElandInternalServiceSettings extends ElasticsearchInternalServiceSettings { public static final String NAME = "custom_eland_model_internal_service_settings"; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/textembedding/CustomElandModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandModel.java similarity index 96% rename from x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/textembedding/CustomElandModel.java rename to x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandModel.java index 5d7b63431841f..aa05af9461565 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/textembedding/CustomElandModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandModel.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.inference.services.textembedding; +package org.elasticsearch.xpack.inference.services.elasticsearch; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; @@ -17,7 +17,7 @@ import static org.elasticsearch.xpack.core.ml.inference.assignment.AllocationStatus.State.STARTED; -public class CustomElandModel extends TextEmbeddingModel { +public class CustomElandModel extends ElasticsearchModel { public CustomElandModel( String inferenceEntityId, diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/textembedding/TextEmbeddingInternalService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java similarity index 95% rename from x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/textembedding/TextEmbeddingInternalService.java rename to x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java index 06d6545a381bd..1aafa340268f3 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/textembedding/TextEmbeddingInternalService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.inference.services.textembedding; +package org.elasticsearch.xpack.inference.services.elasticsearch; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -52,9 +52,9 @@ import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwIfNotEmptyMap; import static org.elasticsearch.xpack.inference.services.settings.InternalServiceSettings.MODEL_ID; -public class TextEmbeddingInternalService implements InferenceService { +public class ElasticsearchInternalService implements InferenceService { - public static final String NAME = "text_embedding"; + public static final String NAME = "elasticsearch"; static final String MULTILINGUAL_E5_SMALL_MODEL_ID = ".multilingual-e5-small"; static final String MULTILINGUAL_E5_SMALL_MODEL_ID_LINUX_X86 = ".multilingual-e5-small_linux-x86_64"; @@ -65,9 +65,9 @@ public class TextEmbeddingInternalService implements InferenceService { private final OriginSettingClient client; - private static final Logger logger = LogManager.getLogger(TextEmbeddingInternalService.class); + private static final Logger logger = LogManager.getLogger(ElasticsearchInternalService.class); - public TextEmbeddingInternalService(InferenceServiceExtension.InferenceServiceFactoryContext context) { + public ElasticsearchInternalService(InferenceServiceExtension.InferenceServiceFactoryContext context) { this.client = new OriginSettingClient(context.client(), ClientHelper.INFERENCE_ORIGIN); } @@ -168,7 +168,7 @@ private static boolean modelVariantDoesNotMatchArchitecturesAndIsNotPlatformAgno } @Override - public TextEmbeddingModel parsePersistedConfigWithSecrets( + public ElasticsearchModel parsePersistedConfigWithSecrets( String inferenceEntityId, TaskType taskType, Map config, @@ -178,7 +178,7 @@ public TextEmbeddingModel parsePersistedConfigWithSecrets( } @Override - public TextEmbeddingModel parsePersistedConfig(String inferenceEntityId, TaskType taskType, Map config) { + public ElasticsearchModel parsePersistedConfig(String inferenceEntityId, TaskType taskType, Map config) { Map serviceSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.SERVICE_SETTINGS); String modelId = (String) serviceSettingsMap.get(MODEL_ID); @@ -270,7 +270,7 @@ public void chunkedInfer( @Override public void start(Model model, ActionListener listener) { - if (model instanceof TextEmbeddingModel == false) { + if (model instanceof ElasticsearchModel == false) { listener.onFailure(notTextEmbeddingModelException(model)); return; } @@ -282,8 +282,8 @@ public void start(Model model, ActionListener listener) { return; } - var startRequest = ((TextEmbeddingModel) model).getStartTrainedModelDeploymentActionRequest(); - var responseListener = ((TextEmbeddingModel) model).getCreateTrainedModelAssignmentActionListener(model, listener); + var startRequest = ((ElasticsearchModel) model).getStartTrainedModelDeploymentActionRequest(); + var responseListener = ((ElasticsearchModel) model).getCreateTrainedModelAssignmentActionListener(model, listener); client.execute(StartTrainedModelDeploymentAction.INSTANCE, startRequest, responseListener); } @@ -299,7 +299,7 @@ public void stop(String inferenceEntityId, ActionListener listener) { @Override public void putModel(Model model, ActionListener listener) { - if (model instanceof TextEmbeddingModel == false) { + if (model instanceof ElasticsearchModel == false) { listener.onFailure(notTextEmbeddingModelException(model)); return; } else if (model instanceof MultilingualE5SmallModel e5Model) { @@ -347,7 +347,7 @@ public void isModelDownloaded(Model model, ActionListener listener) { } }); - if (model instanceof TextEmbeddingModel == false) { + if (model instanceof ElasticsearchModel == false) { listener.onFailure(notTextEmbeddingModelException(model)); } else if (model.getServiceSettings() instanceof InternalServiceSettings internalServiceSettings) { String modelId = internalServiceSettings.getModelId(); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/textembedding/TextEmbeddingInternalServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceSettings.java similarity index 74% rename from x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/textembedding/TextEmbeddingInternalServiceSettings.java rename to x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceSettings.java index fcc96703e221f..f6458b48f99fc 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/textembedding/TextEmbeddingInternalServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceSettings.java @@ -3,11 +3,9 @@ * or more contributor license agreements. Licensed under the Elastic License * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. - * - * This file was contributed to by a generative AI */ -package org.elasticsearch.xpack.inference.services.textembedding; +package org.elasticsearch.xpack.inference.services.elasticsearch; import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; @@ -17,21 +15,21 @@ import static org.elasticsearch.TransportVersions.ML_TEXT_EMBEDDING_INFERENCE_SERVICE_ADDED; -public class TextEmbeddingInternalServiceSettings extends InternalServiceSettings { +public class ElasticsearchInternalServiceSettings extends InternalServiceSettings { public static final String NAME = "text_embedding_internal_service_settings"; - public TextEmbeddingInternalServiceSettings(int numAllocations, int numThreads, String modelVariant) { + public ElasticsearchInternalServiceSettings(int numAllocations, int numThreads, String modelVariant) { super(numAllocations, numThreads, modelVariant); } - public TextEmbeddingInternalServiceSettings(StreamInput in) throws IOException { + public ElasticsearchInternalServiceSettings(StreamInput in) throws IOException { super(in.readVInt(), in.readVInt(), in.readString()); } @Override public String getWriteableName() { - return TextEmbeddingInternalServiceSettings.NAME; + return ElasticsearchInternalServiceSettings.NAME; } @Override diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/textembedding/TextEmbeddingModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchModel.java similarity index 77% rename from x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/textembedding/TextEmbeddingModel.java rename to x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchModel.java index 800e2928c7afa..954469537a4cc 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/textembedding/TextEmbeddingModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchModel.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.inference.services.textembedding; +package org.elasticsearch.xpack.inference.services.elasticsearch; import org.elasticsearch.action.ActionListener; import org.elasticsearch.inference.Model; @@ -14,20 +14,20 @@ import org.elasticsearch.xpack.core.ml.action.CreateTrainedModelAssignmentAction; import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction; -public abstract class TextEmbeddingModel extends Model { +public abstract class ElasticsearchModel extends Model { - public TextEmbeddingModel( + public ElasticsearchModel( String inferenceEntityId, TaskType taskType, String service, - TextEmbeddingInternalServiceSettings serviceSettings + ElasticsearchInternalServiceSettings serviceSettings ) { super(new ModelConfigurations(inferenceEntityId, taskType, service, serviceSettings)); } @Override - public TextEmbeddingInternalServiceSettings getServiceSettings() { - return (TextEmbeddingInternalServiceSettings) super.getServiceSettings(); + public ElasticsearchInternalServiceSettings getServiceSettings() { + return (ElasticsearchInternalServiceSettings) super.getServiceSettings(); } abstract StartTrainedModelDeploymentAction.Request getStartTrainedModelDeploymentActionRequest(); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/textembedding/MultilingualE5SmallInternalServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/MultilingualE5SmallInternalServiceSettings.java similarity index 93% rename from x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/textembedding/MultilingualE5SmallInternalServiceSettings.java rename to x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/MultilingualE5SmallInternalServiceSettings.java index aa1de0e0beddc..5e93c1a46f796 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/textembedding/MultilingualE5SmallInternalServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/MultilingualE5SmallInternalServiceSettings.java @@ -3,11 +3,9 @@ * or more contributor license agreements. Licensed under the Elastic License * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. - * - * this file was contributed to by a generative AI */ -package org.elasticsearch.xpack.inference.services.textembedding; +package org.elasticsearch.xpack.inference.services.elasticsearch; import org.elasticsearch.TransportVersion; import org.elasticsearch.common.ValidationException; @@ -23,7 +21,7 @@ import static org.elasticsearch.TransportVersions.ML_TEXT_EMBEDDING_INFERENCE_SERVICE_ADDED; -public class MultilingualE5SmallInternalServiceSettings extends TextEmbeddingInternalServiceSettings { +public class MultilingualE5SmallInternalServiceSettings extends ElasticsearchInternalServiceSettings { public static final String NAME = "multilingual_e5_small_service_settings"; @@ -53,12 +51,12 @@ public static MultilingualE5SmallInternalServiceSettings.Builder fromMap(Map factory, SetOnce serviceComponents) { + public HuggingFaceBaseService(HttpRequestSender.Factory factory, ServiceComponents serviceComponents) { super(factory, serviceComponents); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceService.java index 60f947e22da95..838d3dc857fbc 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceService.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.inference.services.huggingface; -import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; @@ -16,7 +15,7 @@ import org.elasticsearch.inference.Model; import org.elasticsearch.inference.TaskType; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.services.ServiceComponents; import org.elasticsearch.xpack.inference.services.ServiceUtils; import org.elasticsearch.xpack.inference.services.huggingface.elser.HuggingFaceElserModel; @@ -27,7 +26,7 @@ public class HuggingFaceService extends HuggingFaceBaseService { public static final String NAME = "hugging_face"; - public HuggingFaceService(SetOnce factory, SetOnce serviceComponents) { + public HuggingFaceService(HttpRequestSender.Factory factory, ServiceComponents serviceComponents) { super(factory, serviceComponents); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java index 68407d8a2e029..2587b2737e164 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java @@ -7,14 +7,13 @@ package org.elasticsearch.xpack.inference.services.huggingface.elser; -import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.TaskType; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.services.ServiceComponents; import org.elasticsearch.xpack.inference.services.huggingface.HuggingFaceBaseService; import org.elasticsearch.xpack.inference.services.huggingface.HuggingFaceModel; @@ -24,7 +23,7 @@ public class HuggingFaceElserService extends HuggingFaceBaseService { public static final String NAME = "hugging_face_elser"; - public HuggingFaceElserService(SetOnce factory, SetOnce serviceComponents) { + public HuggingFaceElserService(HttpRequestSender.Factory factory, ServiceComponents serviceComponents) { super(factory, serviceComponents); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java index 03781450fc08c..234328de67efe 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.inference.services.openai; -import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; @@ -25,7 +24,7 @@ import org.elasticsearch.inference.TaskType; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.inference.external.action.openai.OpenAiActionCreator; -import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.services.SenderService; import org.elasticsearch.xpack.inference.services.ServiceComponents; import org.elasticsearch.xpack.inference.services.ServiceUtils; @@ -46,7 +45,7 @@ public class OpenAiService extends SenderService { public static final String NAME = "openai"; - public OpenAiService(SetOnce factory, SetOnce serviceComponents) { + public OpenAiService(HttpRequestSender.Factory factory, ServiceComponents serviceComponents) { super(factory, serviceComponents); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/Utils.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/Utils.java index 5b7ffb3c8153e..96650bcca565e 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/Utils.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/Utils.java @@ -16,7 +16,7 @@ import org.elasticsearch.xpack.inference.external.http.HttpClientManager; import org.elasticsearch.xpack.inference.external.http.HttpSettings; import org.elasticsearch.xpack.inference.external.http.retry.RetrySettings; -import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.external.http.sender.RequestExecutorServiceSettings; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; @@ -39,7 +39,7 @@ public static ClusterService mockClusterService(Settings settings) { var registeredSettings = Stream.of( HttpSettings.getSettings(), HttpClientManager.getSettings(), - HttpRequestSenderFactory.HttpRequestSender.getSettings(), + HttpRequestSender.getSettings(), ThrottlerManager.getSettings(), RetrySettings.getSettingsDefinitions(), Truncator.getSettings(), diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereActionCreatorTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereActionCreatorTests.java index e7cfc784db117..66ef9910a2649 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereActionCreatorTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereActionCreatorTests.java @@ -19,7 +19,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; -import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderTests; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; import org.elasticsearch.xpack.inference.services.cohere.CohereTruncation; import org.elasticsearch.xpack.inference.services.cohere.embeddings.CohereEmbeddingType; @@ -67,7 +67,7 @@ public void shutdown() throws IOException { } public void testCreate_CohereEmbeddingsModel() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); try (var sender = senderFactory.createSender("test_service")) { sender.start(); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereEmbeddingsActionTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereEmbeddingsActionTests.java index 7fd33f7bba58f..b504744bfe5f3 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereEmbeddingsActionTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereEmbeddingsActionTests.java @@ -23,7 +23,7 @@ import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; import org.elasticsearch.xpack.inference.external.http.HttpResult; -import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderTests; import org.elasticsearch.xpack.inference.external.http.sender.Sender; import org.elasticsearch.xpack.inference.external.request.cohere.CohereUtils; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; @@ -47,7 +47,6 @@ import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; import static org.elasticsearch.xpack.inference.external.http.Utils.getUrl; import static org.elasticsearch.xpack.inference.results.TextEmbeddingResultsTests.buildExpectation; -import static org.elasticsearch.xpack.inference.services.ServiceComponentsTests.createWithEmptySettings; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; @@ -77,9 +76,9 @@ public void shutdown() throws IOException { } public void testExecute_ReturnsSuccessfulResponse() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); - try (var sender = senderFactory.createSender("test_service")) { + try (var sender = HttpRequestSenderTests.createSenderWithSingleRequestManager(senderFactory, "test_service")) { sender.start(); String responseJson = """ @@ -158,9 +157,9 @@ public void testExecute_ReturnsSuccessfulResponse() throws IOException { } public void testExecute_ReturnsSuccessfulResponse_ForInt8ResponseType() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); - try (var sender = senderFactory.createSender("test_service")) { + try (var sender = HttpRequestSenderTests.createSenderWithSingleRequestManager(senderFactory, "test_service")) { sender.start(); String responseJson = """ @@ -253,7 +252,7 @@ public void testExecute_ThrowsURISyntaxException_ForInvalidUrl() throws IOExcept public void testExecute_ThrowsElasticsearchException() { var sender = mock(Sender.class); - doThrow(new ElasticsearchException("failed")).when(sender).send(any(), any()); + doThrow(new ElasticsearchException("failed")).when(sender).send(any(), any(), any()); var action = createAction(getUrl(webServer), "secret", CohereEmbeddingsTaskSettings.EMPTY_SETTINGS, null, null, sender); @@ -274,7 +273,7 @@ public void testExecute_ThrowsElasticsearchException_WhenSenderOnFailureIsCalled listener.onFailure(new IllegalStateException("failed")); return Void.TYPE; - }).when(sender).send(any(), any()); + }).when(sender).send(any(), any(), any()); var action = createAction(getUrl(webServer), "secret", CohereEmbeddingsTaskSettings.EMPTY_SETTINGS, null, null, sender); @@ -298,7 +297,7 @@ public void testExecute_ThrowsElasticsearchException_WhenSenderOnFailureIsCalled listener.onFailure(new IllegalStateException("failed")); return Void.TYPE; - }).when(sender).send(any(), any()); + }).when(sender).send(any(), any(), any()); var action = createAction(null, "secret", CohereEmbeddingsTaskSettings.EMPTY_SETTINGS, null, null, sender); @@ -312,7 +311,7 @@ public void testExecute_ThrowsElasticsearchException_WhenSenderOnFailureIsCalled public void testExecute_ThrowsException() { var sender = mock(Sender.class); - doThrow(new IllegalArgumentException("failed")).when(sender).send(any(), any()); + doThrow(new IllegalArgumentException("failed")).when(sender).send(any(), any(), any()); var action = createAction(getUrl(webServer), "secret", CohereEmbeddingsTaskSettings.EMPTY_SETTINGS, null, null, sender); @@ -329,7 +328,7 @@ public void testExecute_ThrowsException() { public void testExecute_ThrowsExceptionWithNullUrl() { var sender = mock(Sender.class); - doThrow(new IllegalArgumentException("failed")).when(sender).send(any(), any()); + doThrow(new IllegalArgumentException("failed")).when(sender).send(any(), any(), any()); var action = createAction(null, "secret", CohereEmbeddingsTaskSettings.EMPTY_SETTINGS, null, null, sender); @@ -351,7 +350,7 @@ private CohereEmbeddingsAction createAction( ) { var model = CohereEmbeddingsModelTests.createModel(url, apiKey, taskSettings, 1024, 1024, modelName, embeddingType); - return new CohereEmbeddingsAction(sender, model, createWithEmptySettings(threadPool)); + return new CohereEmbeddingsAction(sender, model); } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceActionCreatorTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceActionCreatorTests.java index 95b69f1231e9d..6334c669d0c1f 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceActionCreatorTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceActionCreatorTests.java @@ -20,7 +20,7 @@ import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.inference.common.TruncatorTests; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; -import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderTests; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; import org.elasticsearch.xpack.inference.results.SparseEmbeddingResultsTests; import org.elasticsearch.xpack.inference.results.TextEmbeddingResultsTests; @@ -31,7 +31,6 @@ import org.junit.Before; import java.io.IOException; -import java.net.URISyntaxException; import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -72,7 +71,7 @@ public void shutdown() throws IOException { @SuppressWarnings("unchecked") public void testExecute_ReturnsSuccessfulResponse_ForElserAction() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); try (var sender = senderFactory.createSender("test_service")) { sender.start(); @@ -121,8 +120,14 @@ public void testExecute_ReturnsSuccessfulResponse_ForElserAction() throws IOExce } @SuppressWarnings("unchecked") - public void testSend_FailsFromInvalidResponseFormat_ForElserAction() throws IOException, URISyntaxException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + public void testSend_FailsFromInvalidResponseFormat_ForElserAction() throws IOException { + // timeout as zero for no retries + var settings = buildSettingsWithRetryFields( + TimeValue.timeValueMillis(1), + TimeValue.timeValueMinutes(1), + TimeValue.timeValueSeconds(0) + ); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager, settings); try (var sender = senderFactory.createSender("test_service")) { sender.start(); @@ -147,17 +152,7 @@ public void testSend_FailsFromInvalidResponseFormat_ForElserAction() throws IOEx var model = HuggingFaceElserModelTests.createModel(getUrl(webServer), "secret"); var actionCreator = new HuggingFaceActionCreator( sender, - new ServiceComponents( - threadPool, - mockThrottlerManager(), - // timeout as zero for no retries - buildSettingsWithRetryFields( - TimeValue.timeValueMillis(1), - TimeValue.timeValueMinutes(1), - TimeValue.timeValueSeconds(0) - ), - TruncatorTests.createTruncator() - ) + new ServiceComponents(threadPool, mockThrottlerManager(), settings, TruncatorTests.createTruncator()) ); var action = actionCreator.create(model); @@ -188,7 +183,7 @@ public void testSend_FailsFromInvalidResponseFormat_ForElserAction() throws IOEx @SuppressWarnings("unchecked") public void testExecute_ReturnsSuccessfulResponse_ForEmbeddingsAction() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); try (var sender = senderFactory.createSender("test_service")) { sender.start(); @@ -233,8 +228,14 @@ public void testExecute_ReturnsSuccessfulResponse_ForEmbeddingsAction() throws I } @SuppressWarnings("unchecked") - public void testSend_FailsFromInvalidResponseFormat_ForEmbeddingsAction() throws IOException, URISyntaxException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + public void testSend_FailsFromInvalidResponseFormat_ForEmbeddingsAction() throws IOException { + // timeout as zero for no retries + var settings = buildSettingsWithRetryFields( + TimeValue.timeValueMillis(1), + TimeValue.timeValueMinutes(1), + TimeValue.timeValueSeconds(0) + ); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager, settings); try (var sender = senderFactory.createSender("test_service")) { sender.start(); @@ -257,17 +258,7 @@ public void testSend_FailsFromInvalidResponseFormat_ForEmbeddingsAction() throws var model = HuggingFaceEmbeddingsModelTests.createModel(getUrl(webServer), "secret"); var actionCreator = new HuggingFaceActionCreator( sender, - new ServiceComponents( - threadPool, - mockThrottlerManager(), - // timeout as zero for no retries - buildSettingsWithRetryFields( - TimeValue.timeValueMillis(1), - TimeValue.timeValueMinutes(1), - TimeValue.timeValueSeconds(0) - ), - TruncatorTests.createTruncator() - ) + new ServiceComponents(threadPool, mockThrottlerManager(), settings, TruncatorTests.createTruncator()) ); var action = actionCreator.create(model); @@ -297,7 +288,7 @@ public void testSend_FailsFromInvalidResponseFormat_ForEmbeddingsAction() throws } public void testExecute_ReturnsSuccessfulResponse_AfterTruncating() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); try (var sender = senderFactory.createSender("test_service")) { sender.start(); @@ -362,7 +353,7 @@ public void testExecute_ReturnsSuccessfulResponse_AfterTruncating() throws IOExc } public void testExecute_TruncatesInputBeforeSending() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); try (var sender = senderFactory.createSender("test_service")) { sender.start(); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceActionTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceActionTests.java index 25b05327a21b7..7b332e8c6634d 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceActionTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceActionTests.java @@ -16,7 +16,6 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.inference.common.TruncatorTests; -import org.elasticsearch.xpack.inference.external.http.HttpResult; import org.elasticsearch.xpack.inference.external.http.retry.AlwaysRetryingResponseHandler; import org.elasticsearch.xpack.inference.external.http.sender.Sender; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; @@ -54,7 +53,7 @@ public void shutdown() throws IOException { public void testExecute_ThrowsElasticsearchException_WhenSenderThrows() { var sender = mock(Sender.class); - doThrow(new ElasticsearchException("failed")).when(sender).send(any(), any()); + doThrow(new ElasticsearchException("failed")).when(sender).send(any(), any(), any()); var action = createAction(URL, sender); @@ -71,11 +70,11 @@ public void testExecute_ThrowsElasticsearchException_WhenSenderOnFailureIsCalled doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[1]; listener.onFailure(new IllegalStateException("failed")); return Void.TYPE; - }).when(sender).send(any(), any()); + }).when(sender).send(any(), any(), any()); var action = createAction(URL, sender, "inferenceEntityId"); @@ -92,7 +91,7 @@ public void testExecute_ThrowsElasticsearchException_WhenSenderOnFailureIsCalled public void testExecute_ThrowsException() { var sender = mock(Sender.class); - doThrow(new IllegalArgumentException("failed")).when(sender).send(any(), any()); + doThrow(new IllegalArgumentException("failed")).when(sender).send(any(), any(), any()); var action = createAction(URL, sender, "inferenceEntityId"); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionCreatorTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionCreatorTests.java index cf1a569548143..a844061fa48e1 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionCreatorTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionCreatorTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.inference.external.action.openai; import org.apache.http.HttpHeaders; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; @@ -19,7 +20,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; -import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderTests; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; import org.junit.After; import org.junit.Before; @@ -28,10 +29,12 @@ import java.util.List; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; import static org.elasticsearch.xpack.inference.external.http.Utils.getUrl; +import static org.elasticsearch.xpack.inference.external.http.retry.RetrySettingsTests.buildSettingsWithRetryFields; import static org.elasticsearch.xpack.inference.external.request.openai.OpenAiUtils.ORGANIZATION_HEADER; import static org.elasticsearch.xpack.inference.results.TextEmbeddingResultsTests.buildExpectation; import static org.elasticsearch.xpack.inference.services.ServiceComponentsTests.createWithEmptySettings; @@ -63,7 +66,7 @@ public void shutdown() throws IOException { } public void testCreate_OpenAiEmbeddingsModel() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); try (var sender = senderFactory.createSender("test_service")) { sender.start(); @@ -115,8 +118,173 @@ public void testCreate_OpenAiEmbeddingsModel() throws IOException { } } + public void testCreate_OpenAiEmbeddingsModel_WithoutUser() throws IOException { + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); + + try (var sender = senderFactory.createSender("test_service")) { + sender.start(); + + String responseJson = """ + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + 0.0123, + -0.0123 + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); + + var model = createModel(getUrl(webServer), "org", "secret", "model", null); + var actionCreator = new OpenAiActionCreator(sender, createWithEmptySettings(threadPool)); + var overriddenTaskSettings = getRequestTaskSettingsMap(null); + var action = actionCreator.create(model, overriddenTaskSettings); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(List.of("abc"), listener); + + var result = listener.actionGet(TIMEOUT); + + assertThat(result.asMap(), is(buildExpectation(List.of(List.of(0.0123F, -0.0123F))))); + assertThat(webServer.requests(), hasSize(1)); + assertNull(webServer.requests().get(0).getUri().getQuery()); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.AUTHORIZATION), equalTo("Bearer secret")); + assertThat(webServer.requests().get(0).getHeader(ORGANIZATION_HEADER), equalTo("org")); + + var requestMap = entityAsMap(webServer.requests().get(0).getBody()); + assertThat(requestMap.size(), is(2)); + assertThat(requestMap.get("input"), is(List.of("abc"))); + assertThat(requestMap.get("model"), is("model")); + } + } + + public void testCreate_OpenAiEmbeddingsModel_WithoutOrganization() throws IOException { + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); + + try (var sender = senderFactory.createSender("test_service")) { + sender.start(); + + String responseJson = """ + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + 0.0123, + -0.0123 + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); + + var model = createModel(getUrl(webServer), null, "secret", "model", null); + var actionCreator = new OpenAiActionCreator(sender, createWithEmptySettings(threadPool)); + var overriddenTaskSettings = getRequestTaskSettingsMap("overridden_user"); + var action = actionCreator.create(model, overriddenTaskSettings); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(List.of("abc"), listener); + + var result = listener.actionGet(TIMEOUT); + + assertThat(result.asMap(), is(buildExpectation(List.of(List.of(0.0123F, -0.0123F))))); + assertThat(webServer.requests(), hasSize(1)); + assertNull(webServer.requests().get(0).getUri().getQuery()); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.AUTHORIZATION), equalTo("Bearer secret")); + assertNull(webServer.requests().get(0).getHeader(ORGANIZATION_HEADER)); + + var requestMap = entityAsMap(webServer.requests().get(0).getBody()); + assertThat(requestMap.size(), is(3)); + assertThat(requestMap.get("input"), is(List.of("abc"))); + assertThat(requestMap.get("model"), is("model")); + assertThat(requestMap.get("user"), is("overridden_user")); + } + } + + public void testCreate_OpenAiEmbeddingsModel_FailsFromInvalidResponseFormat() throws IOException { + // timeout as zero for no retries + var settings = buildSettingsWithRetryFields( + TimeValue.timeValueMillis(1), + TimeValue.timeValueMinutes(1), + TimeValue.timeValueSeconds(0) + ); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager, settings); + + try (var sender = senderFactory.createSender("test_service")) { + sender.start(); + + String responseJson = """ + { + "object": "list", + "data_does_not_exist": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + 0.0123, + -0.0123 + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); + + var model = createModel(getUrl(webServer), null, "secret", "model", null); + var actionCreator = new OpenAiActionCreator(sender, createWithEmptySettings(threadPool)); + var overriddenTaskSettings = getRequestTaskSettingsMap("overridden_user"); + var action = actionCreator.create(model, overriddenTaskSettings); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(List.of("abc"), listener); + + var thrownException = expectThrows(ElasticsearchStatusException.class, () -> listener.actionGet(TIMEOUT)); + assertThat(thrownException.getMessage(), is(format("Failed to send OpenAI embeddings request to [%s]", getUrl(webServer)))); + assertThat(thrownException.getCause().getMessage(), is("Failed to find required field [data] in OpenAI embeddings response")); + + assertThat(webServer.requests(), hasSize(1)); + assertNull(webServer.requests().get(0).getUri().getQuery()); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.AUTHORIZATION), equalTo("Bearer secret")); + assertNull(webServer.requests().get(0).getHeader(ORGANIZATION_HEADER)); + + var requestMap = entityAsMap(webServer.requests().get(0).getBody()); + assertThat(requestMap.size(), is(3)); + assertThat(requestMap.get("input"), is(List.of("abc"))); + assertThat(requestMap.get("model"), is("model")); + assertThat(requestMap.get("user"), is("overridden_user")); + } + } + public void testExecute_ReturnsSuccessfulResponse_AfterTruncating_From413StatusCode() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); try (var sender = senderFactory.createSender("test_service")) { sender.start(); @@ -199,7 +367,7 @@ public void testExecute_ReturnsSuccessfulResponse_AfterTruncating_From413StatusC } public void testExecute_ReturnsSuccessfulResponse_AfterTruncating_From400StatusCode() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); try (var sender = senderFactory.createSender("test_service")) { sender.start(); @@ -282,7 +450,7 @@ public void testExecute_ReturnsSuccessfulResponse_AfterTruncating_From400StatusC } public void testExecute_TruncatesInputBeforeSending() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); try (var sender = senderFactory.createSender("test_service")) { sender.start(); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiEmbeddingsActionTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiEmbeddingsActionTests.java index 6bc8e2d61d579..c803121e6da79 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiEmbeddingsActionTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiEmbeddingsActionTests.java @@ -21,10 +21,10 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; -import org.elasticsearch.xpack.inference.external.http.HttpResult; -import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.external.http.sender.Sender; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; +import org.elasticsearch.xpack.inference.services.ServiceComponentsTests; import org.junit.After; import org.junit.Before; @@ -70,7 +70,11 @@ public void shutdown() throws IOException { } public void testExecute_ReturnsSuccessfulResponse() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = new HttpRequestSender.Factory( + ServiceComponentsTests.createWithEmptySettings(threadPool), + clientManager, + mockClusterServiceEmpty() + ); try (var sender = senderFactory.createSender("test_service")) { sender.start(); @@ -131,7 +135,7 @@ public void testExecute_ThrowsURISyntaxException_ForInvalidUrl() throws IOExcept public void testExecute_ThrowsElasticsearchException() { var sender = mock(Sender.class); - doThrow(new ElasticsearchException("failed")).when(sender).send(any(), any()); + doThrow(new ElasticsearchException("failed")).when(sender).send(any(), any(), any()); var action = createAction(getUrl(webServer), "org", "secret", "model", "user", sender); @@ -148,11 +152,11 @@ public void testExecute_ThrowsElasticsearchException_WhenSenderOnFailureIsCalled doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[1]; listener.onFailure(new IllegalStateException("failed")); return Void.TYPE; - }).when(sender).send(any(), any()); + }).when(sender).send(any(), any(), any()); var action = createAction(getUrl(webServer), "org", "secret", "model", "user", sender); @@ -169,11 +173,11 @@ public void testExecute_ThrowsElasticsearchException_WhenSenderOnFailureIsCalled doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[1]; listener.onFailure(new IllegalStateException("failed")); return Void.TYPE; - }).when(sender).send(any(), any()); + }).when(sender).send(any(), any(), any()); var action = createAction(null, "org", "secret", "model", "user", sender); @@ -187,7 +191,7 @@ public void testExecute_ThrowsElasticsearchException_WhenSenderOnFailureIsCalled public void testExecute_ThrowsException() { var sender = mock(Sender.class); - doThrow(new IllegalArgumentException("failed")).when(sender).send(any(), any()); + doThrow(new IllegalArgumentException("failed")).when(sender).send(any(), any(), any()); var action = createAction(getUrl(webServer), "org", "secret", "model", "user", sender); @@ -201,7 +205,7 @@ public void testExecute_ThrowsException() { public void testExecute_ThrowsExceptionWithNullUrl() { var sender = mock(Sender.class); - doThrow(new IllegalArgumentException("failed")).when(sender).send(any(), any()); + doThrow(new IllegalArgumentException("failed")).when(sender).send(any(), any(), any()); var action = createAction(null, "org", "secret", "model", "user", sender); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/cohere/CohereResponseHandlerTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/cohere/CohereResponseHandlerTests.java index 31945d5a8b4fc..d64ac495c8c99 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/cohere/CohereResponseHandlerTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/cohere/CohereResponseHandlerTests.java @@ -44,6 +44,16 @@ public void testCheckForFailureStatusCode_ThrowsFor503() { MatcherAssert.assertThat(((ElasticsearchStatusException) exception.getCause()).status(), is(RestStatus.BAD_REQUEST)); } + public void testCheckForFailureStatusCode_ThrowsFor500_WithShouldRetryTrue() { + var exception = expectThrows(RetryException.class, () -> callCheckForFailureStatusCode(500, "id")); + assertTrue(exception.shouldRetry()); + MatcherAssert.assertThat( + exception.getCause().getMessage(), + containsString("Received a server error status code for request from inference entity id [id] status [500]") + ); + MatcherAssert.assertThat(((ElasticsearchStatusException) exception.getCause()).status(), is(RestStatus.BAD_REQUEST)); + } + public void testCheckForFailureStatusCode_ThrowsFor429() { var exception = expectThrows(RetryException.class, () -> callCheckForFailureStatusCode(429, "id")); assertTrue(exception.shouldRetry()); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/retry/RetrySettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/retry/RetrySettingsTests.java index 940205a663337..2c63e085a9937 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/retry/RetrySettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/retry/RetrySettingsTests.java @@ -11,6 +11,8 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESTestCase; +import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; + public class RetrySettingsTests extends ESTestCase { /** @@ -24,7 +26,7 @@ public static RetrySettings createDefaultRetrySettings() { public static RetrySettings createRetrySettings(TimeValue initialDelay, TimeValue maxDelayBound, TimeValue timeout) { var settings = buildSettingsWithRetryFields(initialDelay, maxDelayBound, timeout); - return new RetrySettings(settings); + return new RetrySettings(settings, mockClusterServiceEmpty()); } public static Settings buildSettingsWithRetryFields(TimeValue initialDelay, TimeValue maxDelayBound, TimeValue timeout) { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSenderTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSenderTests.java index 8d60c2f5bfa48..30bd40bdcc111 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSenderTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSenderTests.java @@ -10,7 +10,9 @@ import org.apache.http.ConnectionClosedException; import org.apache.http.HttpResponse; import org.apache.http.StatusLine; +import org.apache.http.client.protocol.HttpClientContext; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; @@ -19,14 +21,15 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.inference.external.http.HttpClient; import org.elasticsearch.xpack.inference.external.http.HttpResult; -import org.elasticsearch.xpack.inference.external.http.sender.Sender; import org.elasticsearch.xpack.inference.external.request.HttpRequestTests; import org.elasticsearch.xpack.inference.external.request.Request; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; import org.junit.Before; import org.mockito.stubbing.Answer; +import java.io.IOException; import java.net.UnknownHostException; import static org.elasticsearch.xpack.inference.external.http.retry.RetrySettingsTests.createDefaultRetrySettings; @@ -50,17 +53,17 @@ public void init() throws Exception { taskQueue = new DeterministicTaskQueue(); } - public void testSend_CallsSenderAgain_AfterValidateResponseThrowsAnException() { - var sender = mock(Sender.class); + public void testSend_CallsSenderAgain_AfterValidateResponseThrowsAnException() throws IOException { + var httpClient = mock(HttpClient.class); var httpResponse = mockHttpResponse(); doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onResponse(new HttpResult(httpResponse, new byte[0])); return Void.TYPE; - }).when(sender).send(any(), any()); + }).when(httpClient).send(any(), any(), any()); var inferenceResults = mock(InferenceServiceResults.class); Answer answer = (invocation) -> inferenceResults; @@ -71,72 +74,58 @@ public void testSend_CallsSenderAgain_AfterValidateResponseThrowsAnException() { // bounded wild card list, thenAnswer must be used instead. when(handler.parseResult(any(), any())).thenAnswer(answer); - var retrier = new RetryingHttpSender( - sender, - mock(ThrottlerManager.class), - mock(Logger.class), - createDefaultRetrySettings(), - taskQueue.getThreadPool(), - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mockRequest(), handler, listener), 1); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 1); assertThat(listener.actionGet(TIMEOUT), is(inferenceResults)); - verify(sender, times(2)).send(any(), any()); - verifyNoMoreInteractions(sender); + verify(httpClient, times(2)).send(any(), any(), any()); + verifyNoMoreInteractions(httpClient); } - public void testSend_CallsSenderAgain_WhenAFailureStatusCodeIsReturned() { + public void testSend_CallsSenderAgain_WhenAFailureStatusCodeIsReturned() throws IOException { var statusLine = mock(StatusLine.class); when(statusLine.getStatusCode()).thenReturn(300).thenReturn(200); var httpResponse = mock(HttpResponse.class); when(httpResponse.getStatusLine()).thenReturn(statusLine); - var sender = mock(Sender.class); + var httpClient = mock(HttpClient.class); doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onResponse(new HttpResult(httpResponse, new byte[] { 'a' })); return Void.TYPE; - }).when(sender).send(any(), any()); + }).when(httpClient).send(any(), any(), any()); var inferenceResults = mock(InferenceServiceResults.class); var handler = new AlwaysRetryingResponseHandler("test", result -> inferenceResults); - var retrier = new RetryingHttpSender( - sender, - mock(ThrottlerManager.class), - mock(Logger.class), - createDefaultRetrySettings(), - taskQueue.getThreadPool(), - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mockRequest(), handler, listener), 1); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 1); assertThat(listener.actionGet(TIMEOUT), is(inferenceResults)); - verify(sender, times(2)).send(any(), any()); - verifyNoMoreInteractions(sender); + verify(httpClient, times(2)).send(any(), any(), any()); + verifyNoMoreInteractions(httpClient); } - public void testSend_CallsSenderAgain_WhenParsingFailsOnce() { - var sender = mock(Sender.class); + public void testSend_CallsSenderAgain_WhenParsingFailsOnce() throws IOException { + var httpClient = mock(HttpClient.class); var httpResponse = mockHttpResponse(); doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onResponse(new HttpResult(httpResponse, new byte[] { 'a' })); return Void.TYPE; - }).when(sender).send(any(), any()); + }).when(httpClient).send(any(), any(), any()); var inferenceResults = mock(InferenceServiceResults.class); Answer answer = (invocation) -> inferenceResults; @@ -144,34 +133,27 @@ public void testSend_CallsSenderAgain_WhenParsingFailsOnce() { var handler = mock(ResponseHandler.class); when(handler.parseResult(any(), any())).thenThrow(new RetryException(true, "failed")).thenAnswer(answer); - var retrier = new RetryingHttpSender( - sender, - mock(ThrottlerManager.class), - mock(Logger.class), - createDefaultRetrySettings(), - taskQueue.getThreadPool(), - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mockRequest(), handler, listener), 1); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 1); assertThat(listener.actionGet(TIMEOUT), is(inferenceResults)); - verify(sender, times(2)).send(any(), any()); - verifyNoMoreInteractions(sender); + verify(httpClient, times(2)).send(any(), any(), any()); + verifyNoMoreInteractions(httpClient); } - public void testSend_DoesNotCallSenderAgain_WhenParsingFailsWithNonRetryableException() { - var sender = mock(Sender.class); + public void testSend_DoesNotCallSenderAgain_WhenParsingFailsWithNonRetryableException() throws IOException { + var httpClient = mock(HttpClient.class); var httpResponse = mockHttpResponse(); doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onResponse(new HttpResult(httpResponse, new byte[] { 'a' })); return Void.TYPE; - }).when(sender).send(any(), any()); + }).when(httpClient).send(any(), any(), any()); var inferenceResults = mock(InferenceServiceResults.class); Answer answer = (invocation) -> inferenceResults; @@ -179,41 +161,34 @@ public void testSend_DoesNotCallSenderAgain_WhenParsingFailsWithNonRetryableExce var handler = mock(ResponseHandler.class); when(handler.parseResult(any(), any())).thenThrow(new IllegalStateException("failed")).thenAnswer(answer); - var retrier = new RetryingHttpSender( - sender, - mock(ThrottlerManager.class), - mock(Logger.class), - createDefaultRetrySettings(), - taskQueue.getThreadPool(), - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mockRequest(), handler, listener), 0); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 0); var thrownException = expectThrows(IllegalStateException.class, () -> listener.actionGet(TIMEOUT)); assertThat(thrownException.getMessage(), is("failed")); - verify(sender, times(1)).send(any(), any()); - verifyNoMoreInteractions(sender); + verify(httpClient, times(1)).send(any(), any(), any()); + verifyNoMoreInteractions(httpClient); } - public void testSend_CallsSenderAgain_WhenHttpResultListenerCallsOnFailureOnce() { - var sender = mock(Sender.class); + public void testSend_CallsSenderAgain_WhenHttpResultListenerCallsOnFailureOnce() throws IOException { + var httpClient = mock(HttpClient.class); doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onFailure(new RetryException(true, "failed")); return Void.TYPE; }).doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onResponse(new HttpResult(mock(HttpResponse.class), new byte[] { 'a' })); return Void.TYPE; - }).when(sender).send(any(), any()); + }).when(httpClient).send(any(), any(), any()); var inferenceResults = mock(InferenceServiceResults.class); Answer answer = (invocation) -> inferenceResults; @@ -221,39 +196,32 @@ public void testSend_CallsSenderAgain_WhenHttpResultListenerCallsOnFailureOnce() var handler = mock(ResponseHandler.class); when(handler.parseResult(any(), any())).thenAnswer(answer); - var retrier = new RetryingHttpSender( - sender, - mock(ThrottlerManager.class), - mock(Logger.class), - createDefaultRetrySettings(), - taskQueue.getThreadPool(), - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mockRequest(), handler, listener), 1); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 1); assertThat(listener.actionGet(TIMEOUT), is(inferenceResults)); - verify(sender, times(2)).send(any(), any()); - verifyNoMoreInteractions(sender); + verify(httpClient, times(2)).send(any(), any(), any()); + verifyNoMoreInteractions(httpClient); } - public void testSend_CallsSenderAgain_WhenHttpResultListenerCallsOnFailureOnce_WithContentTooLargeException() { - var sender = mock(Sender.class); + public void testSend_CallsSenderAgain_WhenHttpResultListenerCallsOnFailureOnce_WithContentTooLargeException() throws IOException { + var httpClient = mock(HttpClient.class); doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onFailure(new ContentTooLargeException(new IllegalStateException("failed"))); return Void.TYPE; }).doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onResponse(new HttpResult(mock(HttpResponse.class), new byte[] { 'a' })); return Void.TYPE; - }).when(sender).send(any(), any()); + }).when(httpClient).send(any(), any(), any()); var inferenceResults = mock(InferenceServiceResults.class); Answer answer = (invocation) -> inferenceResults; @@ -261,39 +229,32 @@ public void testSend_CallsSenderAgain_WhenHttpResultListenerCallsOnFailureOnce_W var handler = mock(ResponseHandler.class); when(handler.parseResult(any(), any())).thenAnswer(answer); - var retrier = new RetryingHttpSender( - sender, - mock(ThrottlerManager.class), - mock(Logger.class), - createDefaultRetrySettings(), - taskQueue.getThreadPool(), - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mockRequest(), handler, listener), 1); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 1); assertThat(listener.actionGet(TIMEOUT), is(inferenceResults)); - verify(sender, times(2)).send(any(), any()); - verifyNoMoreInteractions(sender); + verify(httpClient, times(2)).send(any(), any(), any()); + verifyNoMoreInteractions(httpClient); } - public void testSend_CallsSenderAgain_WhenHttpResultListenerCallsOnFailureOnceWithConnectionClosedException() { - var sender = mock(Sender.class); + public void testSend_CallsSenderAgain_WhenHttpResultListenerCallsOnFailureOnceWithConnectionClosedException() throws IOException { + var httpClient = mock(HttpClient.class); doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onFailure(new ConnectionClosedException("failed")); return Void.TYPE; }).doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onResponse(new HttpResult(mock(HttpResponse.class), new byte[] { 'a' })); return Void.TYPE; - }).when(sender).send(any(), any()); + }).when(httpClient).send(any(), any(), any()); var inferenceResults = mock(InferenceServiceResults.class); Answer answer = (invocation) -> inferenceResults; @@ -301,33 +262,26 @@ public void testSend_CallsSenderAgain_WhenHttpResultListenerCallsOnFailureOnceWi var handler = mock(ResponseHandler.class); when(handler.parseResult(any(), any())).thenAnswer(answer); - var retrier = new RetryingHttpSender( - sender, - mock(ThrottlerManager.class), - mock(Logger.class), - createDefaultRetrySettings(), - taskQueue.getThreadPool(), - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mockRequest(), handler, listener), 1); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 1); assertThat(listener.actionGet(TIMEOUT), is(inferenceResults)); - verify(sender, times(2)).send(any(), any()); - verifyNoMoreInteractions(sender); + verify(httpClient, times(2)).send(any(), any(), any()); + verifyNoMoreInteractions(httpClient); } - public void testSend_ReturnsFailure_WhenHttpResultListenerCallsOnFailureOnceWithUnknownHostException() { - var sender = mock(Sender.class); + public void testSend_ReturnsFailure_WhenHttpResultListenerCallsOnFailureOnceWithUnknownHostException() throws IOException { + var httpClient = mock(HttpClient.class); doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onFailure(new UnknownHostException("failed")); return Void.TYPE; - }).when(sender).send(any(), any()); + }).when(httpClient).send(any(), any(), any()); var inferenceResults = mock(InferenceServiceResults.class); Answer answer = (invocation) -> inferenceResults; @@ -335,37 +289,55 @@ public void testSend_ReturnsFailure_WhenHttpResultListenerCallsOnFailureOnceWith var handler = mock(ResponseHandler.class); when(handler.parseResult(any(), any())).thenAnswer(answer); - var retrier = new RetryingHttpSender( - sender, - mock(ThrottlerManager.class), - mock(Logger.class), - createDefaultRetrySettings(), - taskQueue.getThreadPool(), - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mockRequest(), handler, listener), 0); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 0); var thrownException = expectThrows(ElasticsearchStatusException.class, () -> listener.actionGet(TIMEOUT)); assertThat(thrownException.getMessage(), is("Invalid host [null], please check that the URL is correct.")); - verify(sender, times(1)).send(any(), any()); - verifyNoMoreInteractions(sender); + verify(httpClient, times(1)).send(any(), any(), any()); + verifyNoMoreInteractions(httpClient); + } + + public void testSend_ReturnsElasticsearchExceptionFailure_WhenTheHttpClientThrowsAnIllegalStateException() throws IOException { + var httpClient = mock(HttpClient.class); + + doAnswer(invocation -> { throw new IllegalStateException("failed"); }).when(httpClient).send(any(), any(), any()); + + var inferenceResults = mock(InferenceServiceResults.class); + Answer answer = (invocation) -> inferenceResults; + + var handler = mock(ResponseHandler.class); + when(handler.parseResult(any(), any())).thenAnswer(answer); + + var retrier = createRetrier(httpClient); + + var listener = new PlainActionFuture(); + executeTasks( + () -> retrier.send(mock(Logger.class), mockRequest("id"), HttpClientContext.create(), () -> false, handler, listener), + 0 + ); + + var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + assertThat(thrownException.getMessage(), is("Http client failed to send request from inference entity id [id]")); + verify(httpClient, times(1)).send(any(), any(), any()); + verifyNoMoreInteractions(httpClient); } - public void testSend_ReturnsFailure_WhenValidateResponseThrowsAnException_AfterOneRetry() { + public void testSend_ReturnsFailure_WhenValidateResponseThrowsAnException_AfterOneRetry() throws IOException { var httpResponse = mock(HttpResponse.class); when(httpResponse.getStatusLine()).thenReturn(mock(StatusLine.class)); - var sender = mock(Sender.class); + var sender = mock(HttpClient.class); doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onResponse(new HttpResult(httpResponse, new byte[0])); return Void.TYPE; - }).when(sender).send(any(), any()); + }).when(sender).send(any(), any(), any()); var inferenceResults = mock(InferenceServiceResults.class); Answer answer = (invocation) -> inferenceResults; @@ -376,40 +348,33 @@ public void testSend_ReturnsFailure_WhenValidateResponseThrowsAnException_AfterO .validateResponse(any(), any(), any(), any()); when(handler.parseResult(any(), any())).thenAnswer(answer); - var retrier = new RetryingHttpSender( - sender, - mock(ThrottlerManager.class), - mock(Logger.class), - createDefaultRetrySettings(), - taskQueue.getThreadPool(), - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + var retrier = createRetrier(sender); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mockRequest(), handler, listener), 1); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 1); var thrownException = expectThrows(IllegalStateException.class, () -> listener.actionGet(TIMEOUT)); assertThat(thrownException.getMessage(), is("failed again")); assertThat(thrownException.getSuppressed().length, is(1)); assertThat(thrownException.getSuppressed()[0].getMessage(), is("failed")); - verify(sender, times(2)).send(any(), any()); + verify(sender, times(2)).send(any(), any(), any()); verifyNoMoreInteractions(sender); } - public void testSend_ReturnsFailure_WhenValidateResponseThrowsAnElasticsearchException_AfterOneRetry() { + public void testSend_ReturnsFailure_WhenValidateResponseThrowsAnElasticsearchException_AfterOneRetry() throws IOException { var httpResponse = mock(HttpResponse.class); when(httpResponse.getStatusLine()).thenReturn(mock(StatusLine.class)); - var sender = mock(Sender.class); + var httpClient = mock(HttpClient.class); doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onResponse(new HttpResult(httpResponse, new byte[0])); return Void.TYPE; - }).when(sender).send(any(), any()); + }).when(httpClient).send(any(), any(), any()); var inferenceResults = mock(InferenceServiceResults.class); Answer answer = (invocation) -> inferenceResults; @@ -420,101 +385,74 @@ public void testSend_ReturnsFailure_WhenValidateResponseThrowsAnElasticsearchExc .validateResponse(any(), any(), any(), any()); when(handler.parseResult(any(), any())).thenAnswer(answer); - var retrier = new RetryingHttpSender( - sender, - mock(ThrottlerManager.class), - mock(Logger.class), - createDefaultRetrySettings(), - taskQueue.getThreadPool(), - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mockRequest(), handler, listener), 1); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 1); var thrownException = expectThrows(RetryException.class, () -> listener.actionGet(TIMEOUT)); assertThat(thrownException.getMessage(), is("failed again")); assertThat(thrownException.getSuppressed().length, is(1)); assertThat(thrownException.getSuppressed()[0].getMessage(), is("failed")); - verify(sender, times(2)).send(any(), any()); - verifyNoMoreInteractions(sender); + verify(httpClient, times(2)).send(any(), any(), any()); + verifyNoMoreInteractions(httpClient); } - public void testSend_ReturnsFailure_WhenHttpResultsListenerCallsOnFailure_AfterOneRetry() { - var httpResponse = mock(HttpResponse.class); - when(httpResponse.getStatusLine()).thenReturn(mock(StatusLine.class)); - - var sender = mock(Sender.class); + public void testSend_ReturnsFailure_WhenHttpResultsListenerCallsOnFailure_AfterOneRetry() throws IOException { + var httpClient = mock(HttpClient.class); doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onFailure(new RetryException(true, "failed")); return Void.TYPE; }).doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onFailure(new RetryException(false, "failed again")); return Void.TYPE; - }).when(sender).send(any(), any()); + }).when(httpClient).send(any(), any(), any()); var handler = mock(ResponseHandler.class); - var retrier = new RetryingHttpSender( - sender, - mock(ThrottlerManager.class), - mock(Logger.class), - createDefaultRetrySettings(), - taskQueue.getThreadPool(), - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mockRequest(), handler, listener), 1); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 1); var thrownException = expectThrows(RetryException.class, () -> listener.actionGet(TIMEOUT)); assertThat(thrownException.getMessage(), is("failed again")); assertThat(thrownException.getSuppressed().length, is(1)); assertThat(thrownException.getSuppressed()[0].getMessage(), is("failed")); - verify(sender, times(2)).send(any(), any()); - verifyNoMoreInteractions(sender); + verify(httpClient, times(2)).send(any(), any(), any()); + verifyNoMoreInteractions(httpClient); } - public void testSend_ReturnsFailure_WhenHttpResultsListenerCallsOnFailure_WithNonRetryableException() { - var httpResponse = mock(HttpResponse.class); - when(httpResponse.getStatusLine()).thenReturn(mock(StatusLine.class)); - - var sender = mock(Sender.class); + public void testSend_ReturnsFailure_WhenHttpResultsListenerCallsOnFailure_WithNonRetryableException() throws IOException { + var httpClient = mock(HttpClient.class); doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onFailure(new IllegalStateException("failed")); return Void.TYPE; - }).when(sender).send(any(), any()); + }).when(httpClient).send(any(), any(), any()); var handler = mock(ResponseHandler.class); - var retrier = new RetryingHttpSender( - sender, - mock(ThrottlerManager.class), - mock(Logger.class), - createDefaultRetrySettings(), - taskQueue.getThreadPool(), - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mockRequest(), handler, listener), 0); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 0); var thrownException = expectThrows(IllegalStateException.class, () -> listener.actionGet(TIMEOUT)); assertThat(thrownException.getMessage(), is("failed")); assertThat(thrownException.getSuppressed().length, is(0)); - verify(sender, times(1)).send(any(), any()); - verifyNoMoreInteractions(sender); + verify(httpClient, times(1)).send(any(), any(), any()); + verifyNoMoreInteractions(httpClient); } private static HttpResponse mockHttpResponse() { @@ -540,10 +478,25 @@ private void executeTasks(Runnable runnable, int retries) { } private static Request mockRequest() { + return mockRequest("inferenceEntityId"); + } + + private static Request mockRequest(String inferenceEntityId) { var request = mock(Request.class); when(request.truncate()).thenReturn(request); - when(request.createHttpRequest()).thenReturn(HttpRequestTests.createMock("inferenceEntityId")); + when(request.createHttpRequest()).thenReturn(HttpRequestTests.createMock(inferenceEntityId)); + when(request.getInferenceEntityId()).thenReturn(inferenceEntityId); return request; } + + private RetryingHttpSender createRetrier(HttpClient httpClient) { + return new RetryingHttpSender( + httpClient, + mock(ThrottlerManager.class), + createDefaultRetrySettings(), + taskQueue.getThreadPool(), + EsExecutors.DIRECT_EXECUTOR_SERVICE + ); + } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/ExecutableRequestCreatorTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/ExecutableRequestCreatorTests.java new file mode 100644 index 0000000000000..b4e770141939b --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/ExecutableRequestCreatorTests.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.apache.http.client.protocol.HttpClientContext; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.http.retry.RequestSender; +import org.elasticsearch.xpack.inference.external.http.retry.ResponseHandler; +import org.elasticsearch.xpack.inference.external.request.RequestTests; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class ExecutableRequestCreatorTests { + public static ExecutableRequestCreator createMock() { + var mockCreator = mock(ExecutableRequestCreator.class); + when(mockCreator.create(anyList(), any(), any(), any(), any())).thenReturn(() -> {}); + + return mockCreator; + } + + public static ExecutableRequestCreator createMock(RequestSender requestSender) { + return createMock(requestSender, "id"); + } + + public static ExecutableRequestCreator createMock(RequestSender requestSender, String modelId) { + var mockCreator = mock(ExecutableRequestCreator.class); + + doAnswer(invocation -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[4]; + return (Runnable) () -> requestSender.send( + mock(Logger.class), + RequestTests.mockRequest(modelId), + HttpClientContext.create(), + () -> false, + mock(ResponseHandler.class), + listener + ); + }).when(mockCreator).create(anyList(), any(), any(), any(), any()); + + return mockCreator; + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderFactoryTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderTests.java similarity index 53% rename from x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderFactoryTests.java rename to x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderTests.java index 6b085f8dd80a7..79b17f8dff29d 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderFactoryTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.http.MockResponse; import org.elasticsearch.test.http.MockWebServer; @@ -21,14 +22,13 @@ import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.inference.external.http.HttpClient; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; -import org.elasticsearch.xpack.inference.external.http.HttpResult; -import org.elasticsearch.xpack.inference.external.request.HttpRequestTests; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; +import org.elasticsearch.xpack.inference.services.ServiceComponentsTests; import org.junit.After; import org.junit.Before; import java.io.IOException; -import java.nio.charset.StandardCharsets; +import java.util.List; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; @@ -36,7 +36,10 @@ import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; -import static org.elasticsearch.xpack.inference.external.http.HttpClientTests.createHttpPost; +import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; +import static org.elasticsearch.xpack.inference.external.http.Utils.getUrl; +import static org.elasticsearch.xpack.inference.external.request.openai.OpenAiUtils.ORGANIZATION_HEADER; +import static org.elasticsearch.xpack.inference.results.TextEmbeddingResultsTests.buildExpectation; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; @@ -47,7 +50,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -public class HttpRequestSenderFactoryTests extends ESTestCase { +public class HttpRequestSenderTests extends ESTestCase { private static final TimeValue TIMEOUT = new TimeValue(30, TimeUnit.SECONDS); private final MockWebServer webServer = new MockWebServer(); private ThreadPool threadPool; @@ -79,36 +82,63 @@ public void testCreateSender_SendsRequestAndReceivesResponse() throws Exception try (var sender = senderFactory.createSender("test_service")) { sender.start(); - int responseCode = randomIntBetween(200, 203); - String body = randomAlphaOfLengthBetween(2, 8096); - webServer.enqueue(new MockResponse().setResponseCode(responseCode).setBody(body)); - - String paramKey = randomAlphaOfLength(3); - String paramValue = randomAlphaOfLength(3); - var httpPost = createHttpPost(webServer.getPort(), paramKey, paramValue); - - PlainActionFuture listener = new PlainActionFuture<>(); - sender.send(httpPost, null, listener); + String responseJson = """ + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + 0.0123, + -0.0123 + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); + + PlainActionFuture listener = new PlainActionFuture<>(); + sender.send( + OpenAiEmbeddingsExecutableRequestCreatorTests.makeCreator(getUrl(webServer), null, "key", "model", null), + List.of("abc"), + listener + ); var result = listener.actionGet(TIMEOUT); + assertThat(result.asMap(), is(buildExpectation(List.of(List.of(0.0123F, -0.0123F))))); - assertThat(result.response().getStatusLine().getStatusCode(), equalTo(responseCode)); - assertThat(new String(result.body(), StandardCharsets.UTF_8), is(body)); assertThat(webServer.requests(), hasSize(1)); - assertThat(webServer.requests().get(0).getUri().getPath(), equalTo(httpPost.httpRequestBase().getURI().getPath())); - assertThat(webServer.requests().get(0).getUri().getQuery(), equalTo(paramKey + "=" + paramValue)); + assertNull(webServer.requests().get(0).getUri().getQuery()); assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.AUTHORIZATION), equalTo("Bearer key")); + assertNull(webServer.requests().get(0).getHeader(ORGANIZATION_HEADER)); + + var requestMap = entityAsMap(webServer.requests().get(0).getBody()); + assertThat(requestMap.size(), is(2)); + assertThat(requestMap.get("input"), is(List.of("abc"))); + assertThat(requestMap.get("model"), is("model")); } } public void testHttpRequestSender_Throws_WhenCallingSendBeforeStart() throws Exception { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = new HttpRequestSender.Factory( + ServiceComponentsTests.createWithEmptySettings(threadPool), + clientManager, + mockClusterServiceEmpty() + ); try (var sender = senderFactory.createSender("test_service")) { - PlainActionFuture listener = new PlainActionFuture<>(); + PlainActionFuture listener = new PlainActionFuture<>(); var thrownException = expectThrows( AssertionError.class, - () -> sender.send(HttpRequestTests.createMock("inferenceEntityId"), listener) + () -> sender.send(ExecutableRequestCreatorTests.createMock(), List.of(), listener) ); assertThat(thrownException.getMessage(), is("call start() before sending a request")); } @@ -118,23 +148,27 @@ public void testHttpRequestSender_Throws_WhenATimeoutOccurs() throws Exception { var mockManager = mock(HttpClientManager.class); when(mockManager.getHttpClient()).thenReturn(mock(HttpClient.class)); - var senderFactory = new HttpRequestSenderFactory(threadPool, mockManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = new HttpRequestSender.Factory( + ServiceComponentsTests.createWithEmptySettings(threadPool), + mockManager, + mockClusterServiceEmpty() + ); try (var sender = senderFactory.createSender("test_service")) { - assertThat(sender, instanceOf(HttpRequestSenderFactory.HttpRequestSender.class)); + assertThat(sender, instanceOf(HttpRequestSender.class)); // hack to get around the sender interface so we can set the timeout directly - var httpSender = (HttpRequestSenderFactory.HttpRequestSender) sender; + var httpSender = (HttpRequestSender) sender; httpSender.setMaxRequestTimeout(TimeValue.timeValueNanos(1)); sender.start(); - PlainActionFuture listener = new PlainActionFuture<>(); - sender.send(HttpRequestTests.createMock("inferenceEntityId"), TimeValue.timeValueNanos(1), listener); + PlainActionFuture listener = new PlainActionFuture<>(); + sender.send(ExecutableRequestCreatorTests.createMock(), List.of(), TimeValue.timeValueNanos(1), listener); var thrownException = expectThrows(ElasticsearchTimeoutException.class, () -> listener.actionGet(TIMEOUT)); assertThat( thrownException.getMessage(), - is(format("Request timed out waiting to be executed after [%s]", TimeValue.timeValueNanos(1))) + is(format("Request timed out waiting to be sent after [%s]", TimeValue.timeValueNanos(1))) ); } } @@ -143,24 +177,28 @@ public void testHttpRequestSenderWithTimeout_Throws_WhenATimeoutOccurs() throws var mockManager = mock(HttpClientManager.class); when(mockManager.getHttpClient()).thenReturn(mock(HttpClient.class)); - var senderFactory = new HttpRequestSenderFactory(threadPool, mockManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = new HttpRequestSender.Factory( + ServiceComponentsTests.createWithEmptySettings(threadPool), + mockManager, + mockClusterServiceEmpty() + ); try (var sender = senderFactory.createSender("test_service")) { sender.start(); - PlainActionFuture listener = new PlainActionFuture<>(); - sender.send(HttpRequestTests.createMock("id"), TimeValue.timeValueNanos(1), listener); + PlainActionFuture listener = new PlainActionFuture<>(); + sender.send(ExecutableRequestCreatorTests.createMock(), List.of(), TimeValue.timeValueNanos(1), listener); var thrownException = expectThrows(ElasticsearchTimeoutException.class, () -> listener.actionGet(TIMEOUT)); assertThat( thrownException.getMessage(), - is(format("Request timed out waiting to be executed after [%s]", TimeValue.timeValueNanos(1))) + is(format("Request timed out waiting to be sent after [%s]", TimeValue.timeValueNanos(1))) ); } } - private static HttpRequestSenderFactory createSenderFactory(HttpClientManager clientManager, AtomicReference threadRef) { + private static HttpRequestSender.Factory createSenderFactory(HttpClientManager clientManager, AtomicReference threadRef) { var mockExecutorService = mock(ExecutorService.class); doAnswer(invocation -> { Runnable runnable = (Runnable) invocation.getArguments()[0]; @@ -175,6 +213,34 @@ private static HttpRequestSenderFactory createSenderFactory(HttpClientManager cl when(mockThreadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); when(mockThreadPool.schedule(any(Runnable.class), any(), any())).thenReturn(mock(Scheduler.ScheduledCancellable.class)); - return new HttpRequestSenderFactory(mockThreadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + return new HttpRequestSender.Factory( + ServiceComponentsTests.createWithEmptySettings(mockThreadPool), + clientManager, + mockClusterServiceEmpty() + ); + } + + public static HttpRequestSender.Factory createSenderFactory(ThreadPool threadPool, HttpClientManager httpClientManager) { + return new HttpRequestSender.Factory( + ServiceComponentsTests.createWithEmptySettings(threadPool), + httpClientManager, + mockClusterServiceEmpty() + ); + } + + public static HttpRequestSender.Factory createSenderFactory( + ThreadPool threadPool, + HttpClientManager httpClientManager, + Settings settings + ) { + return new HttpRequestSender.Factory( + ServiceComponentsTests.createWithSettings(threadPool, settings), + httpClientManager, + mockClusterServiceEmpty() + ); + } + + public static Sender createSenderWithSingleRequestManager(HttpRequestSender.Factory factory, String serviceName) { + return factory.createSender(serviceName); } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiEmbeddingsExecutableRequestCreatorTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiEmbeddingsExecutableRequestCreatorTests.java new file mode 100644 index 0000000000000..53537a3ff77c2 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiEmbeddingsExecutableRequestCreatorTests.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.elasticsearch.core.Nullable; +import org.elasticsearch.xpack.inference.common.TruncatorTests; + +import static org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsModelTests.createModel; + +public class OpenAiEmbeddingsExecutableRequestCreatorTests { + public static OpenAiEmbeddingsExecutableRequestCreator makeCreator( + String url, + @Nullable String org, + String apiKey, + String modelName, + @Nullable String user + ) { + var model = createModel(url, org, apiKey, modelName, user); + + return new OpenAiEmbeddingsExecutableRequestCreator(model, TruncatorTests.createTruncator()); + } + + public static OpenAiEmbeddingsExecutableRequestCreator makeCreator( + String url, + @Nullable String org, + String apiKey, + String modelName, + @Nullable String user, + String inferenceEntityId + ) { + var model = createModel(url, org, apiKey, modelName, user, inferenceEntityId); + + return new OpenAiEmbeddingsExecutableRequestCreator(model, TruncatorTests.createTruncator()); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorServiceTests.java index ef8731746e187..5e88c3f1bb8f5 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorServiceTests.java @@ -9,22 +9,23 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.inference.external.http.HttpClient; -import org.elasticsearch.xpack.inference.external.http.HttpResult; -import org.elasticsearch.xpack.inference.external.request.HttpRequestTests; +import org.elasticsearch.xpack.inference.external.http.retry.RetryingHttpSender; import org.junit.After; import org.junit.Before; import org.mockito.ArgumentCaptor; import java.io.IOException; +import java.util.List; import java.util.concurrent.BlockingQueue; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; @@ -36,7 +37,6 @@ import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.common.AdjustableCapacityBlockingQueueTests.mockQueueCreator; -import static org.elasticsearch.xpack.inference.external.http.HttpClientTests.createHttpPost; import static org.elasticsearch.xpack.inference.external.http.sender.RequestExecutorServiceSettingsTests.createRequestExecutorServiceSettings; import static org.elasticsearch.xpack.inference.external.http.sender.RequestExecutorServiceSettingsTests.createRequestExecutorServiceSettingsEmpty; import static org.hamcrest.Matchers.instanceOf; @@ -70,7 +70,7 @@ public void testQueueSize_IsEmpty() { public void testQueueSize_IsOne() { var service = createRequestExecutorServiceWithMocks(); - service.execute(HttpRequestTests.createMock("inferenceEntityId"), null, new PlainActionFuture<>()); + service.execute(ExecutableRequestCreatorTests.createMock(), List.of(), null, new PlainActionFuture<>()); assertThat(service.queueSize(), is(1)); } @@ -83,7 +83,7 @@ public void testIsTerminated_IsFalse() { public void testIsTerminated_IsTrue() throws InterruptedException { var latch = new CountDownLatch(1); - var service = createRequestExecutorService(null, latch); + var service = createRequestExecutorService(latch, mock(RetryingHttpSender.class)); service.shutdown(); service.start(); @@ -96,19 +96,24 @@ public void testIsTerminated_AfterStopFromSeparateThread() throws Exception { var waitToShutdown = new CountDownLatch(1); var waitToReturnFromSend = new CountDownLatch(1); - var mockHttpClient = mock(HttpClient.class); + var requestSender = mock(RetryingHttpSender.class); doAnswer(invocation -> { waitToShutdown.countDown(); waitToReturnFromSend.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS); return Void.TYPE; - }).when(mockHttpClient).send(any(), any(), any()); + }).when(requestSender).send(any(), any(), any(), any(), any(), any()); - var service = createRequestExecutorService(mockHttpClient, null); + var service = createRequestExecutorService(null, requestSender); Future executorTermination = submitShutdownRequest(waitToShutdown, waitToReturnFromSend, service); - PlainActionFuture listener = new PlainActionFuture<>(); - service.execute(HttpRequestTests.createMock("inferenceEntityId"), null, listener); + PlainActionFuture listener = new PlainActionFuture<>(); + service.execute( + OpenAiEmbeddingsExecutableRequestCreatorTests.makeCreator("url", null, "key", "id", null), + List.of(), + null, + listener + ); service.start(); @@ -127,8 +132,8 @@ public void testSend_AfterShutdown_Throws() { service.shutdown(); - var listener = new PlainActionFuture(); - service.execute(HttpRequestTests.createMock("inferenceEntityId"), null, listener); + var listener = new PlainActionFuture(); + service.execute(ExecutableRequestCreatorTests.createMock(), List.of(), null, listener); var thrownException = expectThrows(EsRejectedExecutionException.class, () -> listener.actionGet(TIMEOUT)); @@ -142,15 +147,15 @@ public void testSend_AfterShutdown_Throws() { public void testSend_Throws_WhenQueueIsFull() { var service = new RequestExecutorService( "test_service", - mock(HttpClient.class), threadPool, null, - RequestExecutorServiceSettingsTests.createRequestExecutorServiceSettings(1) + RequestExecutorServiceSettingsTests.createRequestExecutorServiceSettings(1), + new SingleRequestManager(mock(RetryingHttpSender.class)) ); - service.execute(HttpRequestTests.createMock("inferenceEntityId"), null, new PlainActionFuture<>()); - var listener = new PlainActionFuture(); - service.execute(HttpRequestTests.createMock("inferenceEntityId"), null, listener); + service.execute(ExecutableRequestCreatorTests.createMock(), List.of(), null, new PlainActionFuture<>()); + var listener = new PlainActionFuture(); + service.execute(ExecutableRequestCreatorTests.createMock(), List.of(), null, listener); var thrownException = expectThrows(EsRejectedExecutionException.class, () -> listener.actionGet(TIMEOUT)); @@ -161,27 +166,28 @@ public void testSend_Throws_WhenQueueIsFull() { assertFalse(thrownException.isExecutorShutdown()); } - public void testTaskThrowsError_CallsOnFailure() throws Exception { - var httpClient = mock(HttpClient.class); + public void testTaskThrowsError_CallsOnFailure() { + var requestSender = mock(RetryingHttpSender.class); - var service = createRequestExecutorService(httpClient, null); + var service = createRequestExecutorService(null, requestSender); doAnswer(invocation -> { service.shutdown(); throw new IllegalArgumentException("failed"); - }).when(httpClient).send(any(), any(), any()); + }).when(requestSender).send(any(), any(), any(), any(), any(), any()); - PlainActionFuture listener = new PlainActionFuture<>(); + PlainActionFuture listener = new PlainActionFuture<>(); - var request = createHttpPost(0, "a", "b"); - service.execute(request, null, listener); + service.execute( + OpenAiEmbeddingsExecutableRequestCreatorTests.makeCreator("url", null, "key", "id", null), + List.of(), + null, + listener + ); service.start(); var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); - assertThat( - thrownException.getMessage(), - is(format("Failed to send request from inference entity id [%s]", request.inferenceEntityId())) - ); + assertThat(thrownException.getMessage(), is(format("Failed to send request from inference entity id [%s]", "id"))); assertThat(thrownException.getCause(), instanceOf(IllegalArgumentException.class)); assertTrue(service.isTerminated()); } @@ -200,22 +206,81 @@ public void testShutdown_AllowsMultipleCalls() { public void testSend_CallsOnFailure_WhenRequestTimesOut() { var service = createRequestExecutorServiceWithMocks(); - var listener = new PlainActionFuture(); - service.execute(HttpRequestTests.createMock("inferenceEntityId"), TimeValue.timeValueNanos(1), listener); + var listener = new PlainActionFuture(); + service.execute(ExecutableRequestCreatorTests.createMock(), List.of(), TimeValue.timeValueNanos(1), listener); var thrownException = expectThrows(ElasticsearchTimeoutException.class, () -> listener.actionGet(TIMEOUT)); assertThat( thrownException.getMessage(), - is(format("Request timed out waiting to be executed after [%s]", TimeValue.timeValueNanos(1))) + is(format("Request timed out waiting to be sent after [%s]", TimeValue.timeValueNanos(1))) ); } + public void testSend_PreservesThreadContext() throws InterruptedException, ExecutionException, TimeoutException { + var headerKey = "not empty"; + var headerValue = "value"; + + var service = createRequestExecutorServiceWithMocks(); + + // starting this on a separate thread to ensure we aren't using the same thread context that the rest of the test will execute with + threadPool.generic().execute(service::start); + + ThreadContext threadContext = threadPool.getThreadContext(); + threadContext.putHeader(headerKey, headerValue); + + var requestSender = mock(RetryingHttpSender.class); + + var waitToShutdown = new CountDownLatch(1); + var waitToReturnFromSend = new CountDownLatch(1); + + // this code will be executed by the queue's thread + doAnswer(invocation -> { + var serviceThreadContext = threadPool.getThreadContext(); + // ensure that the spawned thread didn't pick up the header that was set initially on a separate thread + assertNull(serviceThreadContext.getHeader(headerKey)); + + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[5]; + listener.onResponse(null); + + waitToShutdown.countDown(); + waitToReturnFromSend.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS); + return Void.TYPE; + }).when(requestSender).send(any(), any(), any(), any(), any(), any()); + + var finishedOnResponse = new CountDownLatch(1); + ActionListener listener = new ActionListener<>() { + @Override + public void onResponse(InferenceServiceResults ignore) { + // if we've preserved the thread context correctly then the header should still exist + ThreadContext listenerContext = threadPool.getThreadContext(); + assertThat(listenerContext.getHeader(headerKey), is(headerValue)); + finishedOnResponse.countDown(); + } + + @Override + public void onFailure(Exception e) { + throw new RuntimeException("onFailure shouldn't be called", e); + } + }; + + service.execute(ExecutableRequestCreatorTests.createMock(requestSender), List.of(), null, listener); + + Future executorTermination = submitShutdownRequest(waitToShutdown, waitToReturnFromSend, service); + + executorTermination.get(TIMEOUT.millis(), TimeUnit.MILLISECONDS); + assertTrue(service.isTerminated()); + + finishedOnResponse.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS); + } + public void testSend_NotifiesTasksOfShutdown() { var service = createRequestExecutorServiceWithMocks(); - var listener = new PlainActionFuture(); - service.execute(HttpRequestTests.createMock("inferenceEntityId"), null, listener); + var listener = new PlainActionFuture(); + service.execute(ExecutableRequestCreatorTests.createMock(), List.of(), null, listener); + service.shutdown(); service.start(); @@ -231,15 +296,15 @@ public void testSend_NotifiesTasksOfShutdown() { public void testQueueTake_DoesNotCauseServiceToTerminate_WhenItThrows() throws InterruptedException { @SuppressWarnings("unchecked") - BlockingQueue queue = mock(LinkedBlockingQueue.class); + BlockingQueue queue = mock(LinkedBlockingQueue.class); var service = new RequestExecutorService( getTestName(), - mock(HttpClient.class), threadPool, mockQueueCreator(queue), null, - createRequestExecutorServiceSettingsEmpty() + createRequestExecutorServiceSettingsEmpty(), + new SingleRequestManager(mock(RetryingHttpSender.class)) ); when(queue.take()).thenThrow(new ElasticsearchException("failed")).thenAnswer(invocation -> { @@ -254,16 +319,16 @@ public void testQueueTake_DoesNotCauseServiceToTerminate_WhenItThrows() throws I public void testQueueTake_ThrowingInterruptedException_TerminatesService() throws Exception { @SuppressWarnings("unchecked") - BlockingQueue queue = mock(LinkedBlockingQueue.class); + BlockingQueue queue = mock(LinkedBlockingQueue.class); when(queue.take()).thenThrow(new InterruptedException("failed")); var service = new RequestExecutorService( getTestName(), - mock(HttpClient.class), threadPool, mockQueueCreator(queue), null, - createRequestExecutorServiceSettingsEmpty() + createRequestExecutorServiceSettingsEmpty(), + new SingleRequestManager(mock(RetryingHttpSender.class)) ); Future executorTermination = threadPool.generic().submit(() -> { @@ -281,17 +346,17 @@ public void testQueueTake_ThrowingInterruptedException_TerminatesService() throw } public void testQueueTake_RejectsTask_WhenServiceShutsDown() throws Exception { - var mockTask = mock(AbstractRunnable.class); + var mockTask = mock(RejectableTask.class); @SuppressWarnings("unchecked") - BlockingQueue queue = mock(LinkedBlockingQueue.class); + BlockingQueue queue = mock(LinkedBlockingQueue.class); var service = new RequestExecutorService( "test_service", - mock(HttpClient.class), threadPool, mockQueueCreator(queue), null, - createRequestExecutorServiceSettingsEmpty() + createRequestExecutorServiceSettingsEmpty(), + new SingleRequestManager(mock(RetryingHttpSender.class)) ); doAnswer(invocation -> { @@ -316,17 +381,17 @@ public void testQueueTake_RejectsTask_WhenServiceShutsDown() throws Exception { assertTrue(rejectionException.isExecutorShutdown()); } - public void testChangingCapacity_SetsCapacityToTwo() throws ExecutionException, InterruptedException, TimeoutException, IOException { - var httpClient = mock(HttpClient.class); + public void testChangingCapacity_SetsCapacityToTwo() throws ExecutionException, InterruptedException, TimeoutException { + var requestSender = mock(RetryingHttpSender.class); var settings = createRequestExecutorServiceSettings(1); - var service = new RequestExecutorService("test_service", httpClient, threadPool, null, settings); + var service = new RequestExecutorService("test_service", threadPool, null, settings, new SingleRequestManager(requestSender)); - service.execute(HttpRequestTests.createMock("inferenceEntityId"), null, new PlainActionFuture<>()); + service.execute(ExecutableRequestCreatorTests.createMock(requestSender), List.of(), null, new PlainActionFuture<>()); assertThat(service.queueSize(), is(1)); - PlainActionFuture listener = new PlainActionFuture<>(); - service.execute(HttpRequestTests.createMock("inferenceEntityId"), null, listener); + PlainActionFuture listener = new PlainActionFuture<>(); + service.execute(ExecutableRequestCreatorTests.createMock(requestSender), List.of(), null, listener); var thrownException = expectThrows(EsRejectedExecutionException.class, () -> listener.actionGet(TIMEOUT)); assertThat( @@ -343,7 +408,7 @@ public void testChangingCapacity_SetsCapacityToTwo() throws ExecutionException, waitToShutdown.countDown(); waitToReturnFromSend.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS); return Void.TYPE; - }).when(httpClient).send(any(), any(), any()); + }).when(requestSender).send(any(), any(), any(), any(), any(), any()); Future executorTermination = submitShutdownRequest(waitToShutdown, waitToReturnFromSend, service); @@ -354,18 +419,18 @@ public void testChangingCapacity_SetsCapacityToTwo() throws ExecutionException, assertThat(service.remainingQueueCapacity(), is(2)); } - public void testChangingCapacity_DoesNotRejectsOverflowTasks_BecauseOfQueueFull() throws IOException, ExecutionException, - InterruptedException, TimeoutException { - var httpClient = mock(HttpClient.class); + public void testChangingCapacity_DoesNotRejectsOverflowTasks_BecauseOfQueueFull() throws ExecutionException, InterruptedException, + TimeoutException { + var requestSender = mock(RetryingHttpSender.class); var settings = createRequestExecutorServiceSettings(3); - var service = new RequestExecutorService("test_service", httpClient, threadPool, null, settings); + var service = new RequestExecutorService("test_service", threadPool, null, settings, new SingleRequestManager(requestSender)); - service.execute(HttpRequestTests.createMock("inferenceEntityId"), null, new PlainActionFuture<>()); - service.execute(HttpRequestTests.createMock("inferenceEntityId"), null, new PlainActionFuture<>()); + service.execute(ExecutableRequestCreatorTests.createMock(requestSender), List.of(), null, new PlainActionFuture<>()); + service.execute(ExecutableRequestCreatorTests.createMock(requestSender), List.of(), null, new PlainActionFuture<>()); - PlainActionFuture listener = new PlainActionFuture<>(); - service.execute(HttpRequestTests.createMock("inferenceEntityId"), null, listener); + PlainActionFuture listener = new PlainActionFuture<>(); + service.execute(ExecutableRequestCreatorTests.createMock(requestSender), List.of(), null, listener); assertThat(service.queueSize(), is(3)); settings.setQueueCapacity(1); @@ -377,7 +442,7 @@ public void testChangingCapacity_DoesNotRejectsOverflowTasks_BecauseOfQueueFull( waitToShutdown.countDown(); waitToReturnFromSend.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS); return Void.TYPE; - }).when(httpClient).send(any(), any(), any()); + }).when(requestSender).send(any(), any(), any(), any(), any(), any()); Future executorTermination = submitShutdownRequest(waitToShutdown, waitToReturnFromSend, service); @@ -401,16 +466,16 @@ public void testChangingCapacity_DoesNotRejectsOverflowTasks_BecauseOfQueueFull( public void testChangingCapacity_ToZero_SetsQueueCapacityToUnbounded() throws IOException, ExecutionException, InterruptedException, TimeoutException { - var httpClient = mock(HttpClient.class); + var requestSender = mock(RetryingHttpSender.class); var settings = createRequestExecutorServiceSettings(1); - var service = new RequestExecutorService("test_service", httpClient, threadPool, null, settings); + var service = new RequestExecutorService("test_service", threadPool, null, settings, new SingleRequestManager(requestSender)); - service.execute(HttpRequestTests.createMock("inferenceEntityId"), null, new PlainActionFuture<>()); + service.execute(ExecutableRequestCreatorTests.createMock(requestSender), List.of(), null, new PlainActionFuture<>()); assertThat(service.queueSize(), is(1)); - PlainActionFuture listener = new PlainActionFuture<>(); - service.execute(HttpRequestTests.createMock("inferenceEntityId"), null, listener); + PlainActionFuture listener = new PlainActionFuture<>(); + service.execute(ExecutableRequestCreatorTests.createMock(requestSender), List.of(), null, listener); var thrownException = expectThrows(EsRejectedExecutionException.class, () -> listener.actionGet(TIMEOUT)); assertThat( @@ -427,7 +492,7 @@ public void testChangingCapacity_ToZero_SetsQueueCapacityToUnbounded() throws IO waitToShutdown.countDown(); waitToReturnFromSend.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS); return Void.TYPE; - }).when(httpClient).send(any(), any(), any()); + }).when(requestSender).send(any(), any(), any(), any(), any(), any()); Future executorTermination = submitShutdownRequest(waitToShutdown, waitToReturnFromSend, service); @@ -458,17 +523,16 @@ private Future submitShutdownRequest( } private RequestExecutorService createRequestExecutorServiceWithMocks() { - return createRequestExecutorService(null, null); + return createRequestExecutorService(null, mock(RetryingHttpSender.class)); } - private RequestExecutorService createRequestExecutorService(@Nullable HttpClient httpClient, @Nullable CountDownLatch startupLatch) { - var httpClientToUse = httpClient == null ? mock(HttpClient.class) : httpClient; + private RequestExecutorService createRequestExecutorService(@Nullable CountDownLatch startupLatch, RetryingHttpSender requestSender) { return new RequestExecutorService( "test_service", - httpClientToUse, threadPool, startupLatch, - createRequestExecutorServiceSettingsEmpty() + createRequestExecutorServiceSettingsEmpty(), + new SingleRequestManager(requestSender) ); } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestTaskTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestTaskTests.java index eaf1a0ac267cf..5c35d8ce49b60 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestTaskTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestTaskTests.java @@ -7,30 +7,19 @@ package org.elasticsearch.xpack.inference.external.http.sender; -import org.apache.http.HttpHeaders; -import org.apache.http.HttpResponse; -import org.apache.http.client.protocol.HttpClientContext; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.http.MockResponse; -import org.elasticsearch.test.http.MockWebServer; import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xcontent.XContentType; -import org.elasticsearch.xpack.inference.external.http.HttpClient; -import org.elasticsearch.xpack.inference.external.http.HttpResult; -import org.elasticsearch.xpack.inference.external.request.HttpRequestTests; -import org.elasticsearch.xpack.inference.logging.ThrottlerManager; import org.junit.After; import org.junit.Before; import org.mockito.ArgumentCaptor; -import java.io.IOException; -import java.nio.charset.StandardCharsets; +import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; @@ -38,16 +27,9 @@ import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; -import static org.elasticsearch.xpack.inference.external.http.HttpClientTests.createConnectionManager; -import static org.elasticsearch.xpack.inference.external.http.HttpClientTests.createHttpPost; -import static org.elasticsearch.xpack.inference.external.http.HttpClientTests.emptyHttpSettings; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -57,134 +39,65 @@ public class RequestTaskTests extends ESTestCase { private static final TimeValue TIMEOUT = new TimeValue(30, TimeUnit.SECONDS); - private final MockWebServer webServer = new MockWebServer(); private ThreadPool threadPool; @Before public void init() throws Exception { - webServer.start(); threadPool = createThreadPool(inferenceUtilityPool()); } @After public void shutdown() { terminate(threadPool); - webServer.close(); } - public void testDoRun_SendsRequestAndReceivesResponse() throws Exception { - int responseCode = randomIntBetween(200, 203); - String body = randomAlphaOfLengthBetween(2, 8096); - webServer.enqueue(new MockResponse().setResponseCode(responseCode).setBody(body)); - - String paramKey = randomAlphaOfLength(3); - String paramValue = randomAlphaOfLength(3); - var httpPost = createHttpPost(webServer.getPort(), paramKey, paramValue); - - try (var httpClient = HttpClient.create(emptyHttpSettings(), threadPool, createConnectionManager(), mock(ThrottlerManager.class))) { - httpClient.start(); - - PlainActionFuture listener = new PlainActionFuture<>(); - var requestTask = new RequestTask(httpPost, httpClient, HttpClientContext.create(), null, threadPool, listener); - requestTask.doRun(); - var result = listener.actionGet(TIMEOUT); - - assertThat(result.response().getStatusLine().getStatusCode(), equalTo(responseCode)); - assertThat(new String(result.body(), StandardCharsets.UTF_8), is(body)); - assertThat(webServer.requests(), hasSize(1)); - assertThat(webServer.requests().get(0).getUri().getPath(), equalTo(httpPost.httpRequestBase().getURI().getPath())); - assertThat(webServer.requests().get(0).getUri().getQuery(), equalTo(paramKey + "=" + paramValue)); - assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); - } - } - - public void testDoRun_SendThrowsIOException() throws Exception { - var httpClient = mock(HttpClient.class); - doThrow(new IOException("exception")).when(httpClient).send(any(), any(), any()); - - String paramKey = randomAlphaOfLength(3); - String paramValue = randomAlphaOfLength(3); - var httpPost = createHttpPost(webServer.getPort(), paramKey, paramValue); - - PlainActionFuture listener = new PlainActionFuture<>(); - var requestTask = new RequestTask(httpPost, httpClient, HttpClientContext.create(), null, threadPool, listener); - requestTask.doRun(); - - var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); - assertThat( - thrownException.getMessage(), - is(format("Failed to send request from inference entity id [%s]", httpPost.inferenceEntityId())) - ); - } - - public void testRequest_DoesNotCallOnFailureForTimeout_AfterSendThrowsIllegalArgumentException() throws Exception { + public void testExecuting_DoesNotCallOnFailureForTimeout_AfterIllegalArgumentException() { AtomicReference onTimeout = new AtomicReference<>(); var mockThreadPool = mockThreadPoolForTimeout(onTimeout); - var httpClient = mock(HttpClient.class); - doThrow(new IllegalArgumentException("failed")).when(httpClient).send(any(), any(), any()); - - var httpPost = createHttpPost(webServer.getPort(), "a", "b"); - @SuppressWarnings("unchecked") - ActionListener listener = mock(ActionListener.class); + ActionListener listener = mock(ActionListener.class); var requestTask = new RequestTask( - httpPost, - httpClient, - HttpClientContext.create(), + OpenAiEmbeddingsExecutableRequestCreatorTests.makeCreator("url", null, "key", "model", null, "id"), + List.of("abc"), TimeValue.timeValueMillis(1), mockThreadPool, listener ); - requestTask.doRun(); - - ArgumentCaptor argument = ArgumentCaptor.forClass(Exception.class); - verify(listener, times(1)).onFailure(argument.capture()); - assertThat( - argument.getValue().getMessage(), - is(format("Failed to send request from inference entity id [%s]", httpPost.inferenceEntityId())) - ); - assertThat(argument.getValue(), instanceOf(ElasticsearchException.class)); - assertThat(argument.getValue().getCause(), instanceOf(IllegalArgumentException.class)); + requestTask.getListener().onFailure(new IllegalArgumentException("failed")); + verify(listener, times(1)).onFailure(any()); + assertTrue(requestTask.hasCompleted()); + assertTrue(requestTask.getRequestCompletedFunction().get()); onTimeout.get().run(); verifyNoMoreInteractions(listener); } public void testRequest_ReturnsTimeoutException() { - var httpClient = mock(HttpClient.class); - PlainActionFuture listener = new PlainActionFuture<>(); + PlainActionFuture listener = new PlainActionFuture<>(); var requestTask = new RequestTask( - HttpRequestTests.createMock("inferenceEntityId"), - httpClient, - HttpClientContext.create(), + OpenAiEmbeddingsExecutableRequestCreatorTests.makeCreator("url", null, "key", "model", null, "id"), + List.of("abc"), TimeValue.timeValueMillis(1), threadPool, listener ); - requestTask.doRun(); var thrownException = expectThrows(ElasticsearchTimeoutException.class, () -> listener.actionGet(TIMEOUT)); assertThat( thrownException.getMessage(), - is(format("Request timed out waiting to be executed after [%s]", TimeValue.timeValueMillis(1))) + is(format("Request timed out waiting to be sent after [%s]", TimeValue.timeValueMillis(1))) ); + assertTrue(requestTask.hasCompleted()); + assertTrue(requestTask.getRequestCompletedFunction().get()); } public void testRequest_DoesNotCallOnFailureTwiceWhenTimingOut() throws Exception { - var httpClient = mock(HttpClient.class); - doAnswer(invocation -> { - @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[2]; - listener.onFailure(new ElasticsearchException("failed")); - return Void.TYPE; - }).when(httpClient).send(any(), any(), any()); - @SuppressWarnings("unchecked") - ActionListener listener = mock(ActionListener.class); + ActionListener listener = mock(ActionListener.class); var calledOnFailureLatch = new CountDownLatch(1); doAnswer(invocation -> { calledOnFailureLatch.countDown(); @@ -192,9 +105,8 @@ public void testRequest_DoesNotCallOnFailureTwiceWhenTimingOut() throws Exceptio }).when(listener).onFailure(any()); var requestTask = new RequestTask( - HttpRequestTests.createMock("inferenceEntityId"), - httpClient, - HttpClientContext.create(), + OpenAiEmbeddingsExecutableRequestCreatorTests.makeCreator("url", null, "key", "model", null, "id"), + List.of("abc"), TimeValue.timeValueMillis(1), threadPool, listener @@ -206,25 +118,18 @@ public void testRequest_DoesNotCallOnFailureTwiceWhenTimingOut() throws Exceptio verify(listener, times(1)).onFailure(argument.capture()); assertThat( argument.getValue().getMessage(), - is(format("Request timed out waiting to be executed after [%s]", TimeValue.timeValueMillis(1))) + is(format("Request timed out waiting to be sent after [%s]", TimeValue.timeValueMillis(1))) ); + assertTrue(requestTask.hasCompleted()); + assertTrue(requestTask.getRequestCompletedFunction().get()); - requestTask.doRun(); + requestTask.getListener().onFailure(new IllegalArgumentException("failed")); verifyNoMoreInteractions(listener); } public void testRequest_DoesNotCallOnResponseAfterTimingOut() throws Exception { - var httpClient = mock(HttpClient.class); - doAnswer(invocation -> { - @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[2]; - var result = new HttpResult(mock(HttpResponse.class), new byte[0]); - listener.onResponse(result); - return Void.TYPE; - }).when(httpClient).send(any(), any(), any()); - @SuppressWarnings("unchecked") - ActionListener listener = mock(ActionListener.class); + ActionListener listener = mock(ActionListener.class); var calledOnFailureLatch = new CountDownLatch(1); doAnswer(invocation -> { calledOnFailureLatch.countDown(); @@ -232,9 +137,8 @@ public void testRequest_DoesNotCallOnResponseAfterTimingOut() throws Exception { }).when(listener).onFailure(any()); var requestTask = new RequestTask( - HttpRequestTests.createMock("inferenceEntityId"), - httpClient, - HttpClientContext.create(), + OpenAiEmbeddingsExecutableRequestCreatorTests.makeCreator("url", null, "key", "model", null, "id"), + List.of("abc"), TimeValue.timeValueMillis(1), threadPool, listener @@ -246,44 +150,12 @@ public void testRequest_DoesNotCallOnResponseAfterTimingOut() throws Exception { verify(listener, times(1)).onFailure(argument.capture()); assertThat( argument.getValue().getMessage(), - is(format("Request timed out waiting to be executed after [%s]", TimeValue.timeValueMillis(1))) - ); - - requestTask.doRun(); - verifyNoMoreInteractions(listener); - } - - public void testRequest_DoesNotCallOnFailureForTimeout_AfterAlreadyCallingOnFailure() throws Exception { - AtomicReference onTimeout = new AtomicReference<>(); - var mockThreadPool = mockThreadPoolForTimeout(onTimeout); - - var httpClient = mock(HttpClient.class); - doAnswer(invocation -> { - @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[2]; - listener.onFailure(new ElasticsearchException("failed")); - return Void.TYPE; - }).when(httpClient).send(any(), any(), any()); - - @SuppressWarnings("unchecked") - ActionListener listener = mock(ActionListener.class); - - var requestTask = new RequestTask( - HttpRequestTests.createMock("inferenceEntityId"), - httpClient, - HttpClientContext.create(), - TimeValue.timeValueMillis(1), - mockThreadPool, - listener + is(format("Request timed out waiting to be sent after [%s]", TimeValue.timeValueMillis(1))) ); + assertTrue(requestTask.hasCompleted()); + assertTrue(requestTask.getRequestCompletedFunction().get()); - requestTask.doRun(); - - ArgumentCaptor argument = ArgumentCaptor.forClass(Exception.class); - verify(listener, times(1)).onFailure(argument.capture()); - assertThat(argument.getValue().getMessage(), is("failed")); - - onTimeout.get().run(); + requestTask.getListener().onResponse(mock(InferenceServiceResults.class)); verifyNoMoreInteractions(listener); } @@ -291,29 +163,21 @@ public void testRequest_DoesNotCallOnFailureForTimeout_AfterAlreadyCallingOnResp AtomicReference onTimeout = new AtomicReference<>(); var mockThreadPool = mockThreadPoolForTimeout(onTimeout); - var httpClient = mock(HttpClient.class); - doAnswer(invocation -> { - @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[2]; - listener.onResponse(new HttpResult(mock(HttpResponse.class), new byte[0])); - return Void.TYPE; - }).when(httpClient).send(any(), any(), any()); - @SuppressWarnings("unchecked") - ActionListener listener = mock(ActionListener.class); + ActionListener listener = mock(ActionListener.class); var requestTask = new RequestTask( - HttpRequestTests.createMock("inferenceEntityId"), - httpClient, - HttpClientContext.create(), + OpenAiEmbeddingsExecutableRequestCreatorTests.makeCreator("url", null, "key", "model", null, "id"), + List.of("abc"), TimeValue.timeValueMillis(1), mockThreadPool, listener ); - requestTask.doRun(); - + requestTask.getListener().onResponse(mock(InferenceServiceResults.class)); verify(listener, times(1)).onResponse(any()); + assertTrue(requestTask.hasCompleted()); + assertTrue(requestTask.getRequestCompletedFunction().get()); onTimeout.get().run(); verifyNoMoreInteractions(listener); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/SingleRequestManagerTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/SingleRequestManagerTests.java new file mode 100644 index 0000000000000..ab8bf244a4d2c --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/SingleRequestManagerTests.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.apache.http.client.protocol.HttpClientContext; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.inference.external.http.retry.RetryingHttpSender; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verifyNoInteractions; +import static org.mockito.Mockito.when; + +public class SingleRequestManagerTests extends ESTestCase { + public void testExecute_DoesNotCallRequestCreatorCreate_WhenInputIsNull() { + var requestCreator = mock(ExecutableRequestCreator.class); + var request = mock(InferenceRequest.class); + when(request.getRequestCreator()).thenReturn(requestCreator); + + new SingleRequestManager(mock(RetryingHttpSender.class)).execute(mock(InferenceRequest.class), HttpClientContext.create()); + verifyNoInteractions(requestCreator); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/openai/OpenAiClientTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/openai/OpenAiClientTests.java deleted file mode 100644 index bb9612f01d8ff..0000000000000 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/openai/OpenAiClientTests.java +++ /dev/null @@ -1,297 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.inference.external.openai; - -import org.apache.http.HttpHeaders; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.inference.InferenceServiceResults; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.http.MockResponse; -import org.elasticsearch.test.http.MockWebServer; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xcontent.XContentType; -import org.elasticsearch.xpack.inference.common.TruncatorTests; -import org.elasticsearch.xpack.inference.external.http.HttpClientManager; -import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; -import org.elasticsearch.xpack.inference.external.http.sender.Sender; -import org.elasticsearch.xpack.inference.services.ServiceComponents; -import org.junit.After; -import org.junit.Before; - -import java.io.IOException; -import java.net.URISyntaxException; -import java.util.List; -import java.util.concurrent.TimeUnit; - -import static org.elasticsearch.core.Strings.format; -import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; -import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; -import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; -import static org.elasticsearch.xpack.inference.external.http.Utils.getUrl; -import static org.elasticsearch.xpack.inference.external.http.retry.RetrySettingsTests.buildSettingsWithRetryFields; -import static org.elasticsearch.xpack.inference.external.request.openai.OpenAiEmbeddingsRequestTests.createRequest; -import static org.elasticsearch.xpack.inference.external.request.openai.OpenAiUtils.ORGANIZATION_HEADER; -import static org.elasticsearch.xpack.inference.logging.ThrottlerManagerTests.mockThrottlerManager; -import static org.elasticsearch.xpack.inference.results.TextEmbeddingResultsTests.buildExpectation; -import static org.elasticsearch.xpack.inference.services.ServiceComponentsTests.createWithEmptySettings; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.is; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.mock; - -public class OpenAiClientTests extends ESTestCase { - private static final TimeValue TIMEOUT = new TimeValue(30, TimeUnit.SECONDS); - private final MockWebServer webServer = new MockWebServer(); - private ThreadPool threadPool; - private HttpClientManager clientManager; - - @Before - public void init() throws Exception { - webServer.start(); - threadPool = createThreadPool(inferenceUtilityPool()); - clientManager = HttpClientManager.create(Settings.EMPTY, threadPool, mockClusterServiceEmpty(), mockThrottlerManager()); - } - - @After - public void shutdown() throws IOException { - clientManager.close(); - terminate(threadPool); - webServer.close(); - } - - public void testSend_SuccessfulResponse() throws IOException, URISyntaxException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); - - try (var sender = senderFactory.createSender("test_service")) { - sender.start(); - - String responseJson = """ - { - "object": "list", - "data": [ - { - "object": "embedding", - "index": 0, - "embedding": [ - 0.0123, - -0.0123 - ] - } - ], - "model": "text-embedding-ada-002-v2", - "usage": { - "prompt_tokens": 8, - "total_tokens": 8 - } - } - """; - webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); - - OpenAiClient openAiClient = new OpenAiClient(sender, createWithEmptySettings(threadPool)); - - PlainActionFuture listener = new PlainActionFuture<>(); - openAiClient.send(createRequest(getUrl(webServer), "org", "secret", "abc", "model", "user"), listener); - - var result = listener.actionGet(TIMEOUT); - - assertThat(result.asMap(), is(buildExpectation(List.of(List.of(0.0123F, -0.0123F))))); - - assertThat(webServer.requests(), hasSize(1)); - assertNull(webServer.requests().get(0).getUri().getQuery()); - assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); - assertThat(webServer.requests().get(0).getHeader(HttpHeaders.AUTHORIZATION), equalTo("Bearer secret")); - assertThat(webServer.requests().get(0).getHeader(ORGANIZATION_HEADER), equalTo("org")); - - var requestMap = entityAsMap(webServer.requests().get(0).getBody()); - assertThat(requestMap.size(), is(3)); - assertThat(requestMap.get("input"), is(List.of("abc"))); - assertThat(requestMap.get("model"), is("model")); - assertThat(requestMap.get("user"), is("user")); - } - } - - public void testSend_SuccessfulResponse_WithoutUser() throws IOException, URISyntaxException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); - - try (var sender = senderFactory.createSender("test_service")) { - sender.start(); - - String responseJson = """ - { - "object": "list", - "data": [ - { - "object": "embedding", - "index": 0, - "embedding": [ - 0.0123, - -0.0123 - ] - } - ], - "model": "text-embedding-ada-002-v2", - "usage": { - "prompt_tokens": 8, - "total_tokens": 8 - } - } - """; - webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); - - OpenAiClient openAiClient = new OpenAiClient(sender, createWithEmptySettings(threadPool)); - - PlainActionFuture listener = new PlainActionFuture<>(); - openAiClient.send(createRequest(getUrl(webServer), "org", "secret", "abc", "model", null), listener); - - var result = listener.actionGet(TIMEOUT); - - assertThat(result.asMap(), is(buildExpectation(List.of(List.of(0.0123F, -0.0123F))))); - - assertThat(webServer.requests(), hasSize(1)); - assertNull(webServer.requests().get(0).getUri().getQuery()); - assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); - assertThat(webServer.requests().get(0).getHeader(HttpHeaders.AUTHORIZATION), equalTo("Bearer secret")); - assertThat(webServer.requests().get(0).getHeader(ORGANIZATION_HEADER), equalTo("org")); - - var requestMap = entityAsMap(webServer.requests().get(0).getBody()); - assertThat(requestMap.size(), is(2)); - assertThat(requestMap.get("input"), is(List.of("abc"))); - assertThat(requestMap.get("model"), is("model")); - } - } - - public void testSend_SuccessfulResponse_WithoutOrganization() throws IOException, URISyntaxException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); - - try (var sender = senderFactory.createSender("test_service")) { - sender.start(); - - String responseJson = """ - { - "object": "list", - "data": [ - { - "object": "embedding", - "index": 0, - "embedding": [ - 0.0123, - -0.0123 - ] - } - ], - "model": "text-embedding-ada-002-v2", - "usage": { - "prompt_tokens": 8, - "total_tokens": 8 - } - } - """; - webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); - - OpenAiClient openAiClient = new OpenAiClient(sender, createWithEmptySettings(threadPool)); - - PlainActionFuture listener = new PlainActionFuture<>(); - openAiClient.send(createRequest(getUrl(webServer), null, "secret", "abc", "model", null), listener); - - var result = listener.actionGet(TIMEOUT); - - assertThat(result.asMap(), is(buildExpectation(List.of(List.of(0.0123F, -0.0123F))))); - - assertThat(webServer.requests(), hasSize(1)); - assertNull(webServer.requests().get(0).getUri().getQuery()); - assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); - assertThat(webServer.requests().get(0).getHeader(HttpHeaders.AUTHORIZATION), equalTo("Bearer secret")); - assertNull(webServer.requests().get(0).getHeader(ORGANIZATION_HEADER)); - - var requestMap = entityAsMap(webServer.requests().get(0).getBody()); - assertThat(requestMap.size(), is(2)); - assertThat(requestMap.get("input"), is(List.of("abc"))); - assertThat(requestMap.get("model"), is("model")); - } - } - - public void testSend_FailsFromInvalidResponseFormat() throws IOException, URISyntaxException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); - - try (var sender = senderFactory.createSender("test_service")) { - sender.start(); - - String responseJson = """ - { - "object": "list", - "data_does_not_exist": [ - { - "object": "embedding", - "index": 0, - "embedding": [ - 0.0123, - -0.0123 - ] - } - ], - "model": "text-embedding-ada-002-v2", - "usage": { - "prompt_tokens": 8, - "total_tokens": 8 - } - } - """; - webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); - - OpenAiClient openAiClient = new OpenAiClient( - sender, - new ServiceComponents( - threadPool, - mockThrottlerManager(), - // timeout as zero for no retries - buildSettingsWithRetryFields( - TimeValue.timeValueMillis(1), - TimeValue.timeValueMinutes(1), - TimeValue.timeValueSeconds(0) - ), - TruncatorTests.createTruncator() - ) - ); - - PlainActionFuture listener = new PlainActionFuture<>(); - openAiClient.send(createRequest(getUrl(webServer), "org", "secret", "abc", "model", "user"), listener); - - var thrownException = expectThrows(IllegalStateException.class, () -> listener.actionGet(TIMEOUT)); - assertThat(thrownException.getMessage(), is(format("Failed to find required field [data] in OpenAI embeddings response"))); - - assertThat(webServer.requests(), hasSize(1)); - assertNull(webServer.requests().get(0).getUri().getQuery()); - assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); - assertThat(webServer.requests().get(0).getHeader(HttpHeaders.AUTHORIZATION), equalTo("Bearer secret")); - assertThat(webServer.requests().get(0).getHeader(ORGANIZATION_HEADER), equalTo("org")); - - var requestMap = entityAsMap(webServer.requests().get(0).getBody()); - assertThat(requestMap.size(), is(3)); - assertThat(requestMap.get("input"), is(List.of("abc"))); - assertThat(requestMap.get("model"), is("model")); - assertThat(requestMap.get("user"), is("user")); - } - } - - public void testSend_ThrowsException() throws URISyntaxException, IOException { - var sender = mock(Sender.class); - doThrow(new ElasticsearchException("failed")).when(sender).send(any(), any()); - - OpenAiClient openAiClient = new OpenAiClient(sender, createWithEmptySettings(threadPool)); - PlainActionFuture listener = new PlainActionFuture<>(); - openAiClient.send(createRequest(getUrl(webServer), "org", "secret", "abc", "model", "user"), listener); - - var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); - assertThat(thrownException.getMessage(), is("failed")); - } -} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiEmbeddingsRequestTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiEmbeddingsRequestTests.java index 4c4c40e9c1056..ebff1c5e096e8 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiEmbeddingsRequestTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiEmbeddingsRequestTests.java @@ -116,7 +116,7 @@ public static OpenAiEmbeddingsRequest createRequest( String model, @Nullable String user ) { - var embeddingsModel = OpenAiEmbeddingsModelTests.createModel(url, org, apiKey, model, user, null); + var embeddingsModel = OpenAiEmbeddingsModelTests.createModel(url, org, apiKey, model, user, (Integer) null); var account = new OpenAiAccount(embeddingsModel.getServiceSettings().uri(), org, embeddingsModel.getSecretSettings().apiKey()); return new OpenAiEmbeddingsRequest( diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/SenderServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/SenderServiceTests.java index 22a7224d73549..5c438644a18c5 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/SenderServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/SenderServiceTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.inference.services; -import org.apache.lucene.util.SetOnce; import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; @@ -20,7 +19,7 @@ import org.elasticsearch.inference.TaskType; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.external.http.sender.Sender; import org.junit.After; import org.junit.Before; @@ -58,10 +57,10 @@ public void shutdown() throws IOException { public void testStart_InitializesTheSender() throws IOException { var sender = mock(Sender.class); - var factory = mock(HttpRequestSenderFactory.class); + var factory = mock(HttpRequestSender.Factory.class); when(factory.createSender(anyString())).thenReturn(sender); - try (var service = new TestSenderService(new SetOnce<>(factory), new SetOnce<>(createWithEmptySettings(threadPool)))) { + try (var service = new TestSenderService(factory, createWithEmptySettings(threadPool))) { PlainActionFuture listener = new PlainActionFuture<>(); service.start(mock(Model.class), listener); @@ -78,10 +77,10 @@ public void testStart_InitializesTheSender() throws IOException { public void testStart_CallingStartTwiceKeepsSameSenderReference() throws IOException { var sender = mock(Sender.class); - var factory = mock(HttpRequestSenderFactory.class); + var factory = mock(HttpRequestSender.Factory.class); when(factory.createSender(anyString())).thenReturn(sender); - try (var service = new TestSenderService(new SetOnce<>(factory), new SetOnce<>(createWithEmptySettings(threadPool)))) { + try (var service = new TestSenderService(factory, createWithEmptySettings(threadPool))) { PlainActionFuture listener = new PlainActionFuture<>(); service.start(mock(Model.class), listener); listener.actionGet(TIMEOUT); @@ -99,7 +98,7 @@ public void testStart_CallingStartTwiceKeepsSameSenderReference() throws IOExcep } private static final class TestSenderService extends SenderService { - TestSenderService(SetOnce factory, SetOnce serviceComponents) { + TestSenderService(HttpRequestSender.Factory factory, ServiceComponents serviceComponents) { super(factory, serviceComponents); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ServiceComponentsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ServiceComponentsTests.java index 77713fbfc30a5..fd568bf7f15da 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ServiceComponentsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ServiceComponentsTests.java @@ -18,4 +18,8 @@ public class ServiceComponentsTests extends ESTestCase { public static ServiceComponents createWithEmptySettings(ThreadPool threadPool) { return new ServiceComponents(threadPool, mockThrottlerManager(), Settings.EMPTY, TruncatorTests.createTruncator()); } + + public static ServiceComponents createWithSettings(ThreadPool threadPool, Settings settings) { + return new ServiceComponents(threadPool, mockThrottlerManager(), settings, TruncatorTests.createTruncator()); + } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java index f9b76dfcf2528..356da0ece08af 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java @@ -10,7 +10,6 @@ package org.elasticsearch.xpack.inference.services.cohere; import org.apache.http.HttpHeaders; -import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; @@ -29,7 +28,8 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; -import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderTests; import org.elasticsearch.xpack.inference.external.http.sender.Sender; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; import org.elasticsearch.xpack.inference.services.cohere.embeddings.CohereEmbeddingType; @@ -93,13 +93,7 @@ public void shutdown() throws IOException { } public void testParseRequestConfig_CreatesACohereEmbeddingsModel() throws IOException { - try ( - var service = new CohereService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { - + try (var service = createCohereService()) { ActionListener modelListener = ActionListener.wrap(model -> { MatcherAssert.assertThat(model, instanceOf(CohereEmbeddingsModel.class)); @@ -130,13 +124,7 @@ public void testParseRequestConfig_CreatesACohereEmbeddingsModel() throws IOExce } public void testParseRequestConfig_ThrowsUnsupportedModelType() throws IOException { - try ( - var service = new CohereService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { - + try (var service = createCohereService()) { var failureListener = getModelListenerForException( ElasticsearchStatusException.class, "The [cohere] service does not support task type [sparse_embedding]" @@ -164,12 +152,7 @@ private static ActionListener getModelListenerForException(Class excep } public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInConfig() throws IOException { - try ( - var service = new CohereService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createCohereService()) { var config = getRequestConfigMap( CohereEmbeddingsServiceSettingsTests.getServiceSettingsMap("url", null, null), getTaskSettingsMapEmpty(), @@ -186,12 +169,7 @@ public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInConfig() throws I } public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInServiceSettingsMap() throws IOException { - try ( - var service = new CohereService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createCohereService()) { var serviceSettings = CohereEmbeddingsServiceSettingsTests.getServiceSettingsMap("url", "model", null); serviceSettings.put("extra_key", "value"); @@ -206,12 +184,7 @@ public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInServiceSettingsMa } public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInTaskSettingsMap() throws IOException { - try ( - var service = new CohereService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createCohereService()) { var taskSettingsMap = getTaskSettingsMap(InputType.INGEST, null); taskSettingsMap.put("extra_key", "value"); @@ -231,12 +204,7 @@ public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInTaskSettingsMap() } public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInSecretSettingsMap() throws IOException { - try ( - var service = new CohereService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createCohereService()) { var secretSettingsMap = getSecretSettingsMap("secret"); secretSettingsMap.put("extra_key", "value"); @@ -255,12 +223,7 @@ public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInSecretSettingsMap } public void testParseRequestConfig_CreatesACohereEmbeddingsModelWithoutUrl() throws IOException { - try ( - var service = new CohereService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createCohereService()) { var modelListener = ActionListener.wrap((model) -> { MatcherAssert.assertThat(model, instanceOf(CohereEmbeddingsModel.class)); @@ -286,12 +249,7 @@ public void testParseRequestConfig_CreatesACohereEmbeddingsModelWithoutUrl() thr } public void testParsePersistedConfigWithSecrets_CreatesACohereEmbeddingsModel() throws IOException { - try ( - var service = new CohereService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createCohereService()) { var persistedConfig = getPersistedConfigMap( CohereEmbeddingsServiceSettingsTests.getServiceSettingsMap("url", "model", null), getTaskSettingsMap(null, null), @@ -316,12 +274,7 @@ public void testParsePersistedConfigWithSecrets_CreatesACohereEmbeddingsModel() } public void testParsePersistedConfigWithSecrets_ThrowsErrorTryingToParseInvalidModel() throws IOException { - try ( - var service = new CohereService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createCohereService()) { var persistedConfig = getPersistedConfigMap( CohereEmbeddingsServiceSettingsTests.getServiceSettingsMap("url", null, null), getTaskSettingsMapEmpty(), @@ -346,12 +299,7 @@ public void testParsePersistedConfigWithSecrets_ThrowsErrorTryingToParseInvalidM } public void testParsePersistedConfigWithSecrets_CreatesACohereEmbeddingsModelWithoutUrl() throws IOException { - try ( - var service = new CohereService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createCohereService()) { var persistedConfig = getPersistedConfigMap( CohereEmbeddingsServiceSettingsTests.getServiceSettingsMap(null, null, null), getTaskSettingsMap(InputType.INGEST, null), @@ -375,12 +323,7 @@ public void testParsePersistedConfigWithSecrets_CreatesACohereEmbeddingsModelWit } public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExistsInConfig() throws IOException { - try ( - var service = new CohereService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createCohereService()) { var persistedConfig = getPersistedConfigMap( CohereEmbeddingsServiceSettingsTests.getServiceSettingsMap("url", "model", CohereEmbeddingType.INT8), getTaskSettingsMap(InputType.SEARCH, CohereTruncation.NONE), @@ -410,12 +353,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists } public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExistsInSecretsSettings() throws IOException { - try ( - var service = new CohereService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createCohereService()) { var secretSettingsMap = getSecretSettingsMap("secret"); secretSettingsMap.put("extra_key", "value"); @@ -442,12 +380,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists } public void testParsePersistedConfigWithSecrets_NotThrowWhenAnExtraKeyExistsInSecrets() throws IOException { - try ( - var service = new CohereService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createCohereService()) { var persistedConfig = getPersistedConfigMap( CohereEmbeddingsServiceSettingsTests.getServiceSettingsMap("url", "model", null), getTaskSettingsMap(null, null), @@ -473,12 +406,7 @@ public void testParsePersistedConfigWithSecrets_NotThrowWhenAnExtraKeyExistsInSe } public void testParsePersistedConfigWithSecrets_NotThrowWhenAnExtraKeyExistsInServiceSettings() throws IOException { - try ( - var service = new CohereService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createCohereService()) { var serviceSettingsMap = CohereEmbeddingsServiceSettingsTests.getServiceSettingsMap("url", null, null); serviceSettingsMap.put("extra_key", "value"); @@ -501,12 +429,7 @@ public void testParsePersistedConfigWithSecrets_NotThrowWhenAnExtraKeyExistsInSe } public void testParsePersistedConfigWithSecrets_NotThrowWhenAnExtraKeyExistsInTaskSettings() throws IOException { - try ( - var service = new CohereService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createCohereService()) { var taskSettingsMap = getTaskSettingsMap(InputType.SEARCH, null); taskSettingsMap.put("extra_key", "value"); @@ -534,12 +457,7 @@ public void testParsePersistedConfigWithSecrets_NotThrowWhenAnExtraKeyExistsInTa } public void testParsePersistedConfig_CreatesACohereEmbeddingsModel() throws IOException { - try ( - var service = new CohereService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createCohereService()) { var persistedConfig = getPersistedConfigMap( CohereEmbeddingsServiceSettingsTests.getServiceSettingsMap("url", "model", null), getTaskSettingsMap(null, CohereTruncation.NONE) @@ -558,12 +476,7 @@ public void testParsePersistedConfig_CreatesACohereEmbeddingsModel() throws IOEx } public void testParsePersistedConfig_ThrowsErrorTryingToParseInvalidModel() throws IOException { - try ( - var service = new CohereService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createCohereService()) { var persistedConfig = getPersistedConfigMap( CohereEmbeddingsServiceSettingsTests.getServiceSettingsMap("url", null, null), getTaskSettingsMapEmpty() @@ -582,12 +495,7 @@ public void testParsePersistedConfig_ThrowsErrorTryingToParseInvalidModel() thro } public void testParsePersistedConfig_CreatesACohereEmbeddingsModelWithoutUrl() throws IOException { - try ( - var service = new CohereService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createCohereService()) { var persistedConfig = getPersistedConfigMap( CohereEmbeddingsServiceSettingsTests.getServiceSettingsMap(null, "model", CohereEmbeddingType.FLOAT), getTaskSettingsMap(null, null) @@ -607,12 +515,7 @@ public void testParsePersistedConfig_CreatesACohereEmbeddingsModelWithoutUrl() t } public void testParsePersistedConfig_DoesNotThrowWhenAnExtraKeyExistsInConfig() throws IOException { - try ( - var service = new CohereService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createCohereService()) { var persistedConfig = getPersistedConfigMap( CohereEmbeddingsServiceSettingsTests.getServiceSettingsMap("url", null, null), getTaskSettingsMapEmpty() @@ -631,12 +534,7 @@ public void testParsePersistedConfig_DoesNotThrowWhenAnExtraKeyExistsInConfig() } public void testParsePersistedConfig_NotThrowWhenAnExtraKeyExistsInServiceSettings() throws IOException { - try ( - var service = new CohereService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createCohereService()) { var serviceSettingsMap = CohereEmbeddingsServiceSettingsTests.getServiceSettingsMap("url", null, null); serviceSettingsMap.put("extra_key", "value"); @@ -654,12 +552,7 @@ public void testParsePersistedConfig_NotThrowWhenAnExtraKeyExistsInServiceSettin } public void testParsePersistedConfig_NotThrowWhenAnExtraKeyExistsInTaskSettings() throws IOException { - try ( - var service = new CohereService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createCohereService()) { var taskSettingsMap = getTaskSettingsMap(InputType.INGEST, null); taskSettingsMap.put("extra_key", "value"); @@ -683,12 +576,12 @@ public void testParsePersistedConfig_NotThrowWhenAnExtraKeyExistsInTaskSettings( public void testInfer_ThrowsErrorWhenModelIsNotCohereModel() throws IOException { var sender = mock(Sender.class); - var factory = mock(HttpRequestSenderFactory.class); + var factory = mock(HttpRequestSender.Factory.class); when(factory.createSender(anyString())).thenReturn(sender); var mockModel = getInvalidModel("model_id", "service_name"); - try (var service = new CohereService(new SetOnce<>(factory), new SetOnce<>(createWithEmptySettings(threadPool)))) { + try (var service = new CohereService(factory, createWithEmptySettings(threadPool))) { PlainActionFuture listener = new PlainActionFuture<>(); service.infer(mockModel, List.of(""), new HashMap<>(), InputType.INGEST, listener); @@ -708,9 +601,9 @@ public void testInfer_ThrowsErrorWhenModelIsNotCohereModel() throws IOException } public void testInfer_SendsRequest() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); - try (var service = new CohereService(new SetOnce<>(senderFactory), new SetOnce<>(createWithEmptySettings(threadPool)))) { + try (var service = new CohereService(senderFactory, createWithEmptySettings(threadPool))) { String responseJson = """ { @@ -768,9 +661,9 @@ public void testInfer_SendsRequest() throws IOException { } public void testCheckModelConfig_UpdatesDimensions() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); - try (var service = new CohereService(new SetOnce<>(senderFactory), new SetOnce<>(createWithEmptySettings(threadPool)))) { + try (var service = new CohereService(senderFactory, createWithEmptySettings(threadPool))) { String responseJson = """ { @@ -831,9 +724,9 @@ public void testCheckModelConfig_UpdatesDimensions() throws IOException { } public void testInfer_UnauthorisedResponse() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); - try (var service = new CohereService(new SetOnce<>(senderFactory), new SetOnce<>(createWithEmptySettings(threadPool)))) { + try (var service = new CohereService(senderFactory, createWithEmptySettings(threadPool))) { String responseJson = """ { @@ -862,9 +755,9 @@ public void testInfer_UnauthorisedResponse() throws IOException { } public void testInfer_SetsInputTypeToIngest_FromInferParameter_WhenTaskSettingsAreEmpty() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); - try (var service = new CohereService(new SetOnce<>(senderFactory), new SetOnce<>(createWithEmptySettings(threadPool)))) { + try (var service = new CohereService(senderFactory, createWithEmptySettings(threadPool))) { String responseJson = """ { @@ -923,9 +816,9 @@ public void testInfer_SetsInputTypeToIngest_FromInferParameter_WhenTaskSettingsA public void testInfer_SetsInputTypeToIngestFromInferParameter_WhenModelSettingIsNull_AndRequestTaskSettingsIsSearch() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); - try (var service = new CohereService(new SetOnce<>(senderFactory), new SetOnce<>(createWithEmptySettings(threadPool)))) { + try (var service = new CohereService(senderFactory, createWithEmptySettings(threadPool))) { String responseJson = """ { @@ -989,9 +882,9 @@ public void testInfer_SetsInputTypeToIngestFromInferParameter_WhenModelSettingIs } public void testInfer_DoesNotSetInputType_WhenNotPresentInTaskSettings_AndUnspecifiedIsPassedInRequest() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); - try (var service = new CohereService(new SetOnce<>(senderFactory), new SetOnce<>(createWithEmptySettings(threadPool)))) { + try (var service = new CohereService(senderFactory, createWithEmptySettings(threadPool))) { String responseJson = """ { @@ -1062,6 +955,10 @@ private Map getRequestConfigMap( ); } + private CohereService createCohereService() { + return new CohereService(mock(HttpRequestSender.Factory.class), createWithEmptySettings(threadPool)); + } + private PeristedConfig getPersistedConfigMap( Map serviceSettings, Map taskSettings, diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/textembedding/TextEmbeddingInternalServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java similarity index 86% rename from x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/textembedding/TextEmbeddingInternalServiceTests.java rename to x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java index 42586b0b15f4e..4f0deaceb17da 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/textembedding/TextEmbeddingInternalServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java @@ -7,7 +7,7 @@ * This file was contributed to by a Generative AI */ -package org.elasticsearch.xpack.inference.services.textembedding; +package org.elasticsearch.xpack.inference.services.elasticsearch; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; @@ -45,7 +45,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -public class TextEmbeddingInternalServiceTests extends ESTestCase { +public class ElasticsearchInternalServiceTests extends ESTestCase { TaskType taskType = TaskType.TEXT_EMBEDDING; String randomInferenceEntityId = randomAlphaOfLength(10); @@ -59,7 +59,7 @@ public void testParseRequestConfig() { settings.put( ModelConfigurations.SERVICE_SETTINGS, new HashMap<>( - Map.of(TextEmbeddingInternalServiceSettings.NUM_ALLOCATIONS, 1, TextEmbeddingInternalServiceSettings.NUM_THREADS, 4) + Map.of(ElasticsearchInternalServiceSettings.NUM_ALLOCATIONS, 1, ElasticsearchInternalServiceSettings.NUM_THREADS, 4) ) ); @@ -79,12 +79,12 @@ public void testParseRequestConfig() { ModelConfigurations.SERVICE_SETTINGS, new HashMap<>( Map.of( - TextEmbeddingInternalServiceSettings.NUM_ALLOCATIONS, + ElasticsearchInternalServiceSettings.NUM_ALLOCATIONS, 1, - TextEmbeddingInternalServiceSettings.NUM_THREADS, + ElasticsearchInternalServiceSettings.NUM_THREADS, 4, InternalServiceSettings.MODEL_ID, - TextEmbeddingInternalService.MULTILINGUAL_E5_SMALL_MODEL_ID + ElasticsearchInternalService.MULTILINGUAL_E5_SMALL_MODEL_ID ) ) ); @@ -92,7 +92,7 @@ public void testParseRequestConfig() { var e5ServiceSettings = new MultilingualE5SmallInternalServiceSettings( 1, 4, - TextEmbeddingInternalService.MULTILINGUAL_E5_SMALL_MODEL_ID + ElasticsearchInternalService.MULTILINGUAL_E5_SMALL_MODEL_ID ); service.parseRequestConfig( @@ -111,7 +111,7 @@ public void testParseRequestConfig() { settings.put( ModelConfigurations.SERVICE_SETTINGS, new HashMap<>( - Map.of(TextEmbeddingInternalServiceSettings.NUM_ALLOCATIONS, 1, TextEmbeddingInternalServiceSettings.NUM_THREADS, 4) + Map.of(ElasticsearchInternalServiceSettings.NUM_ALLOCATIONS, 1, ElasticsearchInternalServiceSettings.NUM_THREADS, 4) ) ); settings.put("not_a_valid_config_setting", randomAlphaOfLength(10)); @@ -132,12 +132,12 @@ public void testParseRequestConfig() { ModelConfigurations.SERVICE_SETTINGS, new HashMap<>( Map.of( - TextEmbeddingInternalServiceSettings.NUM_ALLOCATIONS, + ElasticsearchInternalServiceSettings.NUM_ALLOCATIONS, 1, - TextEmbeddingInternalServiceSettings.NUM_THREADS, + ElasticsearchInternalServiceSettings.NUM_THREADS, 4, InternalServiceSettings.MODEL_ID, - TextEmbeddingInternalService.MULTILINGUAL_E5_SMALL_MODEL_ID, // we can't directly test the eland case until we mock + ElasticsearchInternalService.MULTILINGUAL_E5_SMALL_MODEL_ID, // we can't directly test the eland case until we mock // the threadpool within the client "not_a_valid_service_setting", randomAlphaOfLength(10) @@ -161,12 +161,12 @@ public void testParseRequestConfig() { ModelConfigurations.SERVICE_SETTINGS, new HashMap<>( Map.of( - TextEmbeddingInternalServiceSettings.NUM_ALLOCATIONS, + ElasticsearchInternalServiceSettings.NUM_ALLOCATIONS, 1, - TextEmbeddingInternalServiceSettings.NUM_THREADS, + ElasticsearchInternalServiceSettings.NUM_THREADS, 4, InternalServiceSettings.MODEL_ID, - TextEmbeddingInternalService.MULTILINGUAL_E5_SMALL_MODEL_ID, // we can't directly test the eland case until we mock + ElasticsearchInternalService.MULTILINGUAL_E5_SMALL_MODEL_ID, // we can't directly test the eland case until we mock // the threadpool within the client "extra_setting_that_should_not_be_here", randomAlphaOfLength(10) @@ -190,12 +190,12 @@ public void testParseRequestConfig() { ModelConfigurations.SERVICE_SETTINGS, new HashMap<>( Map.of( - TextEmbeddingInternalServiceSettings.NUM_ALLOCATIONS, + ElasticsearchInternalServiceSettings.NUM_ALLOCATIONS, 1, - TextEmbeddingInternalServiceSettings.NUM_THREADS, + ElasticsearchInternalServiceSettings.NUM_THREADS, 4, InternalServiceSettings.MODEL_ID, - TextEmbeddingInternalService.MULTILINGUAL_E5_SMALL_MODEL_ID // we can't directly test the eland case until we mock + ElasticsearchInternalService.MULTILINGUAL_E5_SMALL_MODEL_ID // we can't directly test the eland case until we mock // the threadpool within the client ) ) @@ -214,7 +214,7 @@ public void testParseRequestConfig() { private ActionListener getModelVerificationActionListener(MultilingualE5SmallInternalServiceSettings e5ServiceSettings) { return ActionListener.wrap(model -> { assertEquals( - new MultilingualE5SmallModel(randomInferenceEntityId, taskType, TextEmbeddingInternalService.NAME, e5ServiceSettings), + new MultilingualE5SmallModel(randomInferenceEntityId, taskType, ElasticsearchInternalService.NAME, e5ServiceSettings), model ); }, e -> { fail("Model parsing failed " + e.getMessage()); }); @@ -229,14 +229,14 @@ public void testParsePersistedConfig() { settings.put( ModelConfigurations.SERVICE_SETTINGS, new HashMap<>( - Map.of(TextEmbeddingInternalServiceSettings.NUM_ALLOCATIONS, 1, TextEmbeddingInternalServiceSettings.NUM_THREADS, 4) + Map.of(ElasticsearchInternalServiceSettings.NUM_ALLOCATIONS, 1, ElasticsearchInternalServiceSettings.NUM_THREADS, 4) ) ); var e5ServiceSettings = new MultilingualE5SmallInternalServiceSettings( 1, 4, - TextEmbeddingInternalService.MULTILINGUAL_E5_SMALL_MODEL_ID + ElasticsearchInternalService.MULTILINGUAL_E5_SMALL_MODEL_ID ); expectThrows(IllegalArgumentException.class, () -> service.parsePersistedConfig(randomInferenceEntityId, taskType, settings)); @@ -253,9 +253,9 @@ public void testParsePersistedConfig() { ModelConfigurations.SERVICE_SETTINGS, new HashMap<>( Map.of( - TextEmbeddingInternalServiceSettings.NUM_ALLOCATIONS, + ElasticsearchInternalServiceSettings.NUM_ALLOCATIONS, 1, - TextEmbeddingInternalServiceSettings.NUM_THREADS, + ElasticsearchInternalServiceSettings.NUM_THREADS, 4, InternalServiceSettings.MODEL_ID, "invalid" @@ -266,7 +266,7 @@ public void testParsePersistedConfig() { CustomElandModel parsedModel = (CustomElandModel) service.parsePersistedConfig(randomInferenceEntityId, taskType, settings); var elandServiceSettings = new CustomElandInternalServiceSettings(1, 4, "invalid"); assertEquals( - new CustomElandModel(randomInferenceEntityId, taskType, TextEmbeddingInternalService.NAME, elandServiceSettings), + new CustomElandModel(randomInferenceEntityId, taskType, ElasticsearchInternalService.NAME, elandServiceSettings), parsedModel ); } @@ -279,12 +279,12 @@ public void testParsePersistedConfig() { ModelConfigurations.SERVICE_SETTINGS, new HashMap<>( Map.of( - TextEmbeddingInternalServiceSettings.NUM_ALLOCATIONS, + ElasticsearchInternalServiceSettings.NUM_ALLOCATIONS, 1, - TextEmbeddingInternalServiceSettings.NUM_THREADS, + ElasticsearchInternalServiceSettings.NUM_THREADS, 4, InternalServiceSettings.MODEL_ID, - TextEmbeddingInternalService.MULTILINGUAL_E5_SMALL_MODEL_ID + ElasticsearchInternalService.MULTILINGUAL_E5_SMALL_MODEL_ID ) ) ); @@ -292,7 +292,7 @@ public void testParsePersistedConfig() { var e5ServiceSettings = new MultilingualE5SmallInternalServiceSettings( 1, 4, - TextEmbeddingInternalService.MULTILINGUAL_E5_SMALL_MODEL_ID + ElasticsearchInternalService.MULTILINGUAL_E5_SMALL_MODEL_ID ); MultilingualE5SmallModel parsedModel = (MultilingualE5SmallModel) service.parsePersistedConfig( @@ -301,7 +301,7 @@ public void testParsePersistedConfig() { settings ); assertEquals( - new MultilingualE5SmallModel(randomInferenceEntityId, taskType, TextEmbeddingInternalService.NAME, e5ServiceSettings), + new MultilingualE5SmallModel(randomInferenceEntityId, taskType, ElasticsearchInternalService.NAME, e5ServiceSettings), parsedModel ); } @@ -313,7 +313,7 @@ public void testParsePersistedConfig() { settings.put( ModelConfigurations.SERVICE_SETTINGS, new HashMap<>( - Map.of(TextEmbeddingInternalServiceSettings.NUM_ALLOCATIONS, 1, TextEmbeddingInternalServiceSettings.NUM_THREADS, 4) + Map.of(ElasticsearchInternalServiceSettings.NUM_ALLOCATIONS, 1, ElasticsearchInternalServiceSettings.NUM_THREADS, 4) ) ); settings.put("not_a_valid_config_setting", randomAlphaOfLength(10)); @@ -328,9 +328,9 @@ public void testParsePersistedConfig() { ModelConfigurations.SERVICE_SETTINGS, new HashMap<>( Map.of( - TextEmbeddingInternalServiceSettings.NUM_ALLOCATIONS, + ElasticsearchInternalServiceSettings.NUM_ALLOCATIONS, 1, - TextEmbeddingInternalServiceSettings.NUM_THREADS, + ElasticsearchInternalServiceSettings.NUM_THREADS, 4, "not_a_valid_service_setting", randomAlphaOfLength(10) @@ -403,9 +403,9 @@ public void testChunkInfer() { assertTrue("Listener not called", gotResults.get()); } - private TextEmbeddingInternalService createService(Client client) { + private ElasticsearchInternalService createService(Client client) { var context = new InferenceServiceExtension.InferenceServiceFactoryContext(client); - return new TextEmbeddingInternalService(context); + return new ElasticsearchInternalService(context); } public static Model randomModelConfig(String inferenceEntityId) { @@ -417,7 +417,7 @@ public static Model randomModelConfig(String inferenceEntityId) { case "MultilingualE5SmallModel" -> new MultilingualE5SmallModel( inferenceEntityId, TaskType.TEXT_EMBEDDING, - TextEmbeddingInternalService.NAME, + ElasticsearchInternalService.NAME, MultilingualE5SmallInternalServiceSettingsTests.createRandom() ); default -> throw new IllegalArgumentException("model " + model + " is not supported for testing"); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/textembedding/MultilingualE5SmallInternalServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/MultilingualE5SmallInternalServiceSettingsTests.java similarity index 95% rename from x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/textembedding/MultilingualE5SmallInternalServiceSettingsTests.java rename to x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/MultilingualE5SmallInternalServiceSettingsTests.java index 10e34a277eea3..fbff04efe6883 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/textembedding/MultilingualE5SmallInternalServiceSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/MultilingualE5SmallInternalServiceSettingsTests.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.inference.services.textembedding; +package org.elasticsearch.xpack.inference.services.elasticsearch; import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.io.stream.Writeable; @@ -24,7 +24,7 @@ public static MultilingualE5SmallInternalServiceSettings createRandom() { return new MultilingualE5SmallInternalServiceSettings( randomIntBetween(1, 4), randomIntBetween(1, 4), - randomFrom(TextEmbeddingInternalService.MULTILINGUAL_E5_SMALL_VALID_IDS) + randomFrom(ElasticsearchInternalService.MULTILINGUAL_E5_SMALL_VALID_IDS) ); } @@ -43,7 +43,7 @@ public void testFromMap_DefaultModelVersion() { } public void testFromMap() { - String randomModelVariant = randomFrom(TextEmbeddingInternalService.MULTILINGUAL_E5_SMALL_VALID_IDS); + String randomModelVariant = randomFrom(ElasticsearchInternalService.MULTILINGUAL_E5_SMALL_VALID_IDS); var serviceSettings = MultilingualE5SmallInternalServiceSettings.fromMap( new HashMap<>( Map.of( @@ -138,7 +138,7 @@ protected MultilingualE5SmallInternalServiceSettings mutateInstance(Multilingual instance.getModelId() ); case 2 -> { - var versions = new HashSet<>(TextEmbeddingInternalService.MULTILINGUAL_E5_SMALL_VALID_IDS); + var versions = new HashSet<>(ElasticsearchInternalService.MULTILINGUAL_E5_SMALL_VALID_IDS); versions.remove(instance.getModelId()); yield new MultilingualE5SmallInternalServiceSettings( instance.getNumAllocations(), diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceBaseServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceBaseServiceTests.java index dcf8b3a900a22..cd896cb18440a 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceBaseServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceBaseServiceTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.inference.services.huggingface; -import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.TransportVersion; import org.elasticsearch.action.support.PlainActionFuture; @@ -17,7 +16,7 @@ import org.elasticsearch.inference.TaskType; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.external.http.sender.Sender; import org.elasticsearch.xpack.inference.services.ServiceComponents; import org.junit.After; @@ -58,12 +57,12 @@ public void shutdown() throws IOException { public void testInfer_ThrowsErrorWhenModelIsNotHuggingFaceModel() throws IOException { var sender = mock(Sender.class); - var factory = mock(HttpRequestSenderFactory.class); + var factory = mock(HttpRequestSender.Factory.class); when(factory.createSender(anyString())).thenReturn(sender); var mockModel = getInvalidModel("model_id", "service_name"); - try (var service = new TestService(new SetOnce<>(factory), new SetOnce<>(createWithEmptySettings(threadPool)))) { + try (var service = new TestService(factory, createWithEmptySettings(threadPool))) { PlainActionFuture listener = new PlainActionFuture<>(); service.infer(mockModel, List.of(""), new HashMap<>(), InputType.INGEST, listener); @@ -83,8 +82,7 @@ public void testInfer_ThrowsErrorWhenModelIsNotHuggingFaceModel() throws IOExcep } private static final class TestService extends HuggingFaceBaseService { - - TestService(SetOnce factory, SetOnce serviceComponents) { + TestService(HttpRequestSender.Factory factory, ServiceComponents serviceComponents) { super(factory, serviceComponents); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceTests.java index b34a8ad8a3d65..c4c49065cd79c 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceTests.java @@ -10,7 +10,6 @@ package org.elasticsearch.xpack.inference.services.huggingface; import org.apache.http.HttpHeaders; -import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; @@ -29,7 +28,8 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; -import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderTests; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; import org.elasticsearch.xpack.inference.results.SparseEmbeddingResultsTests; import org.elasticsearch.xpack.inference.services.huggingface.elser.HuggingFaceElserModel; @@ -83,13 +83,7 @@ public void shutdown() throws IOException { } public void testParseRequestConfig_CreatesAnEmbeddingsModel() throws IOException { - try ( - var service = new HuggingFaceService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { - + try (var service = createHuggingFaceService()) { ActionListener modelVerificationActionListener = ActionListener.wrap((model) -> { assertThat(model, instanceOf(HuggingFaceEmbeddingsModel.class)); @@ -109,12 +103,7 @@ public void testParseRequestConfig_CreatesAnEmbeddingsModel() throws IOException } public void testParseRequestConfig_CreatesAnElserModel() throws IOException { - try ( - var service = new HuggingFaceService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createHuggingFaceService()) { ActionListener modelVerificationActionListener = ActionListener.wrap((model) -> { assertThat(model, instanceOf(HuggingFaceElserModel.class)); @@ -134,12 +123,7 @@ public void testParseRequestConfig_CreatesAnElserModel() throws IOException { } public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInConfig() throws IOException { - try ( - var service = new HuggingFaceService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createHuggingFaceService()) { var config = getRequestConfigMap(getServiceSettingsMap("url"), getSecretSettingsMap("secret")); config.put("extra_key", "value"); @@ -159,12 +143,7 @@ public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInConfig() throws I } public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInServiceSettingsMap() throws IOException { - try ( - var service = new HuggingFaceService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createHuggingFaceService()) { var serviceSettings = getServiceSettingsMap("url"); serviceSettings.put("extra_key", "value"); @@ -186,12 +165,7 @@ public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInServiceSettingsMa } public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInSecretSettingsMap() throws IOException { - try ( - var service = new HuggingFaceService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createHuggingFaceService()) { var secretSettingsMap = getSecretSettingsMap("secret"); secretSettingsMap.put("extra_key", "value"); @@ -213,12 +187,7 @@ public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInSecretSettingsMap } public void testParsePersistedConfigWithSecrets_CreatesAnEmbeddingsModel() throws IOException { - try ( - var service = new HuggingFaceService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createHuggingFaceService()) { var persistedConfig = getPersistedConfigMap(getServiceSettingsMap("url"), getSecretSettingsMap("secret")); var model = service.parsePersistedConfigWithSecrets( @@ -237,12 +206,7 @@ public void testParsePersistedConfigWithSecrets_CreatesAnEmbeddingsModel() throw } public void testParsePersistedConfigWithSecrets_CreatesAnElserModel() throws IOException { - try ( - var service = new HuggingFaceService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createHuggingFaceService()) { var persistedConfig = getPersistedConfigMap(getServiceSettingsMap("url"), getSecretSettingsMap("secret")); var model = service.parsePersistedConfigWithSecrets( @@ -261,12 +225,7 @@ public void testParsePersistedConfigWithSecrets_CreatesAnElserModel() throws IOE } public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExistsInConfig() throws IOException { - try ( - var service = new HuggingFaceService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createHuggingFaceService()) { var persistedConfig = getPersistedConfigMap(getServiceSettingsMap("url"), getSecretSettingsMap("secret")); persistedConfig.config().put("extra_key", "value"); @@ -286,12 +245,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists } public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExistsInSecretsSettings() throws IOException { - try ( - var service = new HuggingFaceService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createHuggingFaceService()) { var secretSettingsMap = getSecretSettingsMap("secret"); secretSettingsMap.put("extra_key", "value"); @@ -313,12 +267,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists } public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExistsInSecrets() throws IOException { - try ( - var service = new HuggingFaceService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createHuggingFaceService()) { var persistedConfig = getPersistedConfigMap(getServiceSettingsMap("url"), getSecretSettingsMap("secret")); persistedConfig.secrets.put("extra_key", "value"); @@ -338,12 +287,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists } public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExistsInServiceSettings() throws IOException { - try ( - var service = new HuggingFaceService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createHuggingFaceService()) { var serviceSettingsMap = getServiceSettingsMap("url"); serviceSettingsMap.put("extra_key", "value"); @@ -365,12 +309,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists } public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExistsInTaskSettings() throws IOException { - try ( - var service = new HuggingFaceService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createHuggingFaceService()) { var taskSettingsMap = new HashMap(); taskSettingsMap.put("extra_key", "value"); @@ -392,12 +331,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists } public void testParsePersistedConfig_CreatesAnEmbeddingsModel() throws IOException { - try ( - var service = new HuggingFaceService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createHuggingFaceService()) { var persistedConfig = getPersistedConfigMap(getServiceSettingsMap("url")); var model = service.parsePersistedConfig("id", TaskType.TEXT_EMBEDDING, persistedConfig.config()); @@ -411,12 +345,7 @@ public void testParsePersistedConfig_CreatesAnEmbeddingsModel() throws IOExcepti } public void testParsePersistedConfig_CreatesAnElserModel() throws IOException { - try ( - var service = new HuggingFaceService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createHuggingFaceService()) { var persistedConfig = getPersistedConfigMap(getServiceSettingsMap("url")); var model = service.parsePersistedConfig("id", TaskType.SPARSE_EMBEDDING, persistedConfig.config()); @@ -430,12 +359,7 @@ public void testParsePersistedConfig_CreatesAnElserModel() throws IOException { } public void testParsePersistedConfig_DoesNotThrowWhenAnExtraKeyExistsInConfig() throws IOException { - try ( - var service = new HuggingFaceService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createHuggingFaceService()) { var persistedConfig = getPersistedConfigMap(getServiceSettingsMap("url")); persistedConfig.config().put("extra_key", "value"); @@ -450,12 +374,7 @@ public void testParsePersistedConfig_DoesNotThrowWhenAnExtraKeyExistsInConfig() } public void testParsePersistedConfig_DoesNotThrowWhenAnExtraKeyExistsInServiceSettings() throws IOException { - try ( - var service = new HuggingFaceService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createHuggingFaceService()) { var serviceSettingsMap = getServiceSettingsMap("url"); serviceSettingsMap.put("extra_key", "value"); @@ -472,12 +391,7 @@ public void testParsePersistedConfig_DoesNotThrowWhenAnExtraKeyExistsInServiceSe } public void testParsePersistedConfig_DoesNotThrowWhenAnExtraKeyExistsInTaskSettings() throws IOException { - try ( - var service = new HuggingFaceService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createHuggingFaceService()) { var taskSettingsMap = new HashMap(); taskSettingsMap.put("extra_key", "value"); @@ -494,9 +408,9 @@ public void testParsePersistedConfig_DoesNotThrowWhenAnExtraKeyExistsInTaskSetti } public void testInfer_SendsEmbeddingsRequest() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); - try (var service = new HuggingFaceService(new SetOnce<>(senderFactory), new SetOnce<>(createWithEmptySettings(threadPool)))) { + try (var service = new HuggingFaceService(senderFactory, createWithEmptySettings(threadPool))) { String responseJson = """ { @@ -532,9 +446,9 @@ public void testInfer_SendsEmbeddingsRequest() throws IOException { } public void testInfer_SendsElserRequest() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); - try (var service = new HuggingFaceService(new SetOnce<>(senderFactory), new SetOnce<>(createWithEmptySettings(threadPool)))) { + try (var service = new HuggingFaceService(senderFactory, createWithEmptySettings(threadPool))) { String responseJson = """ [ @@ -574,9 +488,9 @@ public void testInfer_SendsElserRequest() throws IOException { } public void testCheckModelConfig_IncludesMaxTokens() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); - try (var service = new HuggingFaceService(new SetOnce<>(senderFactory), new SetOnce<>(createWithEmptySettings(threadPool)))) { + try (var service = new HuggingFaceService(senderFactory, createWithEmptySettings(threadPool))) { String responseJson = """ { @@ -598,6 +512,10 @@ public void testCheckModelConfig_IncludesMaxTokens() throws IOException { } } + private HuggingFaceService createHuggingFaceService() { + return new HuggingFaceService(mock(HttpRequestSender.Factory.class), createWithEmptySettings(threadPool)); + } + private Map getRequestConfigMap(Map serviceSettings, Map secretSettings) { var builtServiceSettings = new HashMap<>(); builtServiceSettings.putAll(serviceSettings); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java index b3d9a98bad189..d819b2b243872 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java @@ -10,7 +10,6 @@ package org.elasticsearch.xpack.inference.services.openai; import org.apache.http.HttpHeaders; -import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; @@ -30,7 +29,8 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; -import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderTests; import org.elasticsearch.xpack.inference.external.http.sender.Sender; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; import org.elasticsearch.xpack.inference.services.ServiceFields; @@ -93,12 +93,7 @@ public void shutdown() throws IOException { } public void testParseRequestConfig_CreatesAnOpenAiEmbeddingsModel() throws IOException { - try ( - var service = new OpenAiService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createOpenAiService()) { ActionListener modelVerificationListener = ActionListener.wrap(model -> { assertThat(model, instanceOf(OpenAiEmbeddingsModel.class)); @@ -125,12 +120,7 @@ public void testParseRequestConfig_CreatesAnOpenAiEmbeddingsModel() throws IOExc } public void testParseRequestConfig_ThrowsUnsupportedModelType() throws IOException { - try ( - var service = new OpenAiService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createOpenAiService()) { ActionListener modelVerificationListener = ActionListener.wrap( model -> fail("Expected exception, but got model: " + model), exception -> { @@ -154,12 +144,7 @@ public void testParseRequestConfig_ThrowsUnsupportedModelType() throws IOExcepti } public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInConfig() throws IOException { - try ( - var service = new OpenAiService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createOpenAiService()) { var config = getRequestConfigMap( getServiceSettingsMap("model", "url", "org"), getTaskSettingsMap("user"), @@ -183,12 +168,7 @@ public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInConfig() throws I } public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInServiceSettingsMap() throws IOException { - try ( - var service = new OpenAiService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createOpenAiService()) { var serviceSettings = getServiceSettingsMap("model", "url", "org"); serviceSettings.put("extra_key", "value"); @@ -206,12 +186,7 @@ public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInServiceSettingsMa } public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInTaskSettingsMap() throws IOException { - try ( - var service = new OpenAiService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createOpenAiService()) { var taskSettingsMap = getTaskSettingsMap("user"); taskSettingsMap.put("extra_key", "value"); @@ -229,12 +204,7 @@ public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInTaskSettingsMap() } public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInSecretSettingsMap() throws IOException { - try ( - var service = new OpenAiService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createOpenAiService()) { var secretSettingsMap = getSecretSettingsMap("secret"); secretSettingsMap.put("extra_key", "value"); @@ -252,13 +222,7 @@ public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInSecretSettingsMap } public void testParseRequestConfig_CreatesAnOpenAiEmbeddingsModelWithoutUserUrlOrganization() throws IOException { - try ( - var service = new OpenAiService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { - + try (var service = createOpenAiService()) { ActionListener modelVerificationListener = ActionListener.wrap(model -> { assertThat(model, instanceOf(OpenAiEmbeddingsModel.class)); @@ -281,12 +245,7 @@ public void testParseRequestConfig_CreatesAnOpenAiEmbeddingsModelWithoutUserUrlO } public void testParseRequestConfig_MovesModel() throws IOException { - try ( - var service = new OpenAiService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createOpenAiService()) { ActionListener modelVerificationListener = ActionListener.wrap(model -> { assertThat(model, instanceOf(OpenAiEmbeddingsModel.class)); @@ -313,12 +272,7 @@ public void testParseRequestConfig_MovesModel() throws IOException { } public void testParsePersistedConfigWithSecrets_CreatesAnOpenAiEmbeddingsModel() throws IOException { - try ( - var service = new OpenAiService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createOpenAiService()) { var persistedConfig = getPersistedConfigMap( getServiceSettingsMap("model", "url", "org", 100, false), getTaskSettingsMap("user"), @@ -344,12 +298,7 @@ public void testParsePersistedConfigWithSecrets_CreatesAnOpenAiEmbeddingsModel() } public void testParsePersistedConfigWithSecrets_ThrowsErrorTryingToParseInvalidModel() throws IOException { - try ( - var service = new OpenAiService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createOpenAiService()) { var persistedConfig = getPersistedConfigMap( getServiceSettingsMap("model", "url", "org"), getTaskSettingsMap("user"), @@ -374,12 +323,7 @@ public void testParsePersistedConfigWithSecrets_ThrowsErrorTryingToParseInvalidM } public void testParsePersistedConfigWithSecrets_CreatesAnOpenAiEmbeddingsModelWithoutUserUrlOrganization() throws IOException { - try ( - var service = new OpenAiService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createOpenAiService()) { var persistedConfig = getPersistedConfigMap( getServiceSettingsMap("model", null, null, null, true), getTaskSettingsMap(null), @@ -405,12 +349,7 @@ public void testParsePersistedConfigWithSecrets_CreatesAnOpenAiEmbeddingsModelWi } public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExistsInConfig() throws IOException { - try ( - var service = new OpenAiService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createOpenAiService()) { var persistedConfig = getPersistedConfigMap( getServiceSettingsMap("model", "url", "org", null, true), getTaskSettingsMap("user"), @@ -438,12 +377,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists } public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExistsInSecretsSettings() throws IOException { - try ( - var service = new OpenAiService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createOpenAiService()) { var secretSettingsMap = getSecretSettingsMap("secret"); secretSettingsMap.put("extra_key", "value"); @@ -472,12 +406,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists } public void testParsePersistedConfigWithSecrets_NotThrowWhenAnExtraKeyExistsInSecrets() throws IOException { - try ( - var service = new OpenAiService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createOpenAiService()) { var persistedConfig = getPersistedConfigMap( getServiceSettingsMap("model", "url", "org", null, true), getTaskSettingsMap("user"), @@ -505,12 +434,7 @@ public void testParsePersistedConfigWithSecrets_NotThrowWhenAnExtraKeyExistsInSe } public void testParsePersistedConfigWithSecrets_NotThrowWhenAnExtraKeyExistsInServiceSettings() throws IOException { - try ( - var service = new OpenAiService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createOpenAiService()) { var serviceSettingsMap = getServiceSettingsMap("model", "url", "org", null, true); serviceSettingsMap.put("extra_key", "value"); @@ -535,12 +459,7 @@ public void testParsePersistedConfigWithSecrets_NotThrowWhenAnExtraKeyExistsInSe } public void testParsePersistedConfigWithSecrets_NotThrowWhenAnExtraKeyExistsInTaskSettings() throws IOException { - try ( - var service = new OpenAiService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createOpenAiService()) { var taskSettingsMap = getTaskSettingsMap("user"); taskSettingsMap.put("extra_key", "value"); @@ -569,12 +488,7 @@ public void testParsePersistedConfigWithSecrets_NotThrowWhenAnExtraKeyExistsInTa } public void testParsePersistedConfig_CreatesAnOpenAiEmbeddingsModel() throws IOException { - try ( - var service = new OpenAiService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createOpenAiService()) { var persistedConfig = getPersistedConfigMap( getServiceSettingsMap("model", "url", "org", null, true), getTaskSettingsMap("user") @@ -594,12 +508,7 @@ public void testParsePersistedConfig_CreatesAnOpenAiEmbeddingsModel() throws IOE } public void testParsePersistedConfig_ThrowsErrorTryingToParseInvalidModel() throws IOException { - try ( - var service = new OpenAiService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createOpenAiService()) { var persistedConfig = getPersistedConfigMap(getServiceSettingsMap("model", "url", "org"), getTaskSettingsMap("user")); var thrownException = expectThrows( @@ -615,12 +524,7 @@ public void testParsePersistedConfig_ThrowsErrorTryingToParseInvalidModel() thro } public void testParsePersistedConfig_CreatesAnOpenAiEmbeddingsModelWithoutUserUrlOrganization() throws IOException { - try ( - var service = new OpenAiService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createOpenAiService()) { var persistedConfig = getPersistedConfigMap(getServiceSettingsMap("model", null, null, null, true), getTaskSettingsMap(null)); var model = service.parsePersistedConfig("id", TaskType.TEXT_EMBEDDING, persistedConfig.config()); @@ -637,12 +541,7 @@ public void testParsePersistedConfig_CreatesAnOpenAiEmbeddingsModelWithoutUserUr } public void testParsePersistedConfig_DoesNotThrowWhenAnExtraKeyExistsInConfig() throws IOException { - try ( - var service = new OpenAiService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createOpenAiService()) { var persistedConfig = getPersistedConfigMap( getServiceSettingsMap("model", "url", "org", null, true), getTaskSettingsMap("user") @@ -663,12 +562,7 @@ public void testParsePersistedConfig_DoesNotThrowWhenAnExtraKeyExistsInConfig() } public void testParsePersistedConfig_NotThrowWhenAnExtraKeyExistsInServiceSettings() throws IOException { - try ( - var service = new OpenAiService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createOpenAiService()) { var serviceSettingsMap = getServiceSettingsMap("model", "url", "org", null, true); serviceSettingsMap.put("extra_key", "value"); @@ -688,12 +582,7 @@ public void testParsePersistedConfig_NotThrowWhenAnExtraKeyExistsInServiceSettin } public void testParsePersistedConfig_NotThrowWhenAnExtraKeyExistsInTaskSettings() throws IOException { - try ( - var service = new OpenAiService( - new SetOnce<>(mock(HttpRequestSenderFactory.class)), - new SetOnce<>(createWithEmptySettings(threadPool)) - ) - ) { + try (var service = createOpenAiService()) { var taskSettingsMap = getTaskSettingsMap("user"); taskSettingsMap.put("extra_key", "value"); @@ -716,12 +605,12 @@ public void testParsePersistedConfig_NotThrowWhenAnExtraKeyExistsInTaskSettings( public void testInfer_ThrowsErrorWhenModelIsNotOpenAiModel() throws IOException { var sender = mock(Sender.class); - var factory = mock(HttpRequestSenderFactory.class); + var factory = mock(HttpRequestSender.Factory.class); when(factory.createSender(anyString())).thenReturn(sender); var mockModel = getInvalidModel("model_id", "service_name"); - try (var service = new OpenAiService(new SetOnce<>(factory), new SetOnce<>(createWithEmptySettings(threadPool)))) { + try (var service = new OpenAiService(factory, createWithEmptySettings(threadPool))) { PlainActionFuture listener = new PlainActionFuture<>(); service.infer(mockModel, List.of(""), new HashMap<>(), InputType.INGEST, listener); @@ -741,9 +630,9 @@ public void testInfer_ThrowsErrorWhenModelIsNotOpenAiModel() throws IOException } public void testInfer_SendsRequest() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); - try (var service = new OpenAiService(new SetOnce<>(senderFactory), new SetOnce<>(createWithEmptySettings(threadPool)))) { + try (var service = new OpenAiService(senderFactory, createWithEmptySettings(threadPool))) { String responseJson = """ { @@ -789,9 +678,9 @@ public void testInfer_SendsRequest() throws IOException { } public void testCheckModelConfig_IncludesMaxTokens() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); - try (var service = new OpenAiService(new SetOnce<>(senderFactory), new SetOnce<>(createWithEmptySettings(threadPool)))) { + try (var service = new OpenAiService(senderFactory, createWithEmptySettings(threadPool))) { String responseJson = """ { @@ -830,9 +719,9 @@ public void testCheckModelConfig_IncludesMaxTokens() throws IOException { } public void testCheckModelConfig_ThrowsIfEmbeddingSizeDoesNotMatchValueSetByUser() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); - try (var service = new OpenAiService(new SetOnce<>(senderFactory), new SetOnce<>(createWithEmptySettings(threadPool)))) { + try (var service = new OpenAiService(senderFactory, createWithEmptySettings(threadPool))) { String responseJson = """ { @@ -881,9 +770,9 @@ public void testCheckModelConfig_ThrowsIfEmbeddingSizeDoesNotMatchValueSetByUser public void testCheckModelConfig_ReturnsModelWithDimensionsSetTo2_AndDocProductSet_IfDimensionsSetByUser_ButSetToNull() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); - try (var service = new OpenAiService(new SetOnce<>(senderFactory), new SetOnce<>(createWithEmptySettings(threadPool)))) { + try (var service = new OpenAiService(senderFactory, createWithEmptySettings(threadPool))) { String responseJson = """ { @@ -939,9 +828,9 @@ public void testCheckModelConfig_ReturnsModelWithDimensionsSetTo2_AndDocProductS public void testCheckModelConfig_ReturnsModelWithSameDimensions_AndDocProductSet_IfDimensionsSetByUser_AndTheyMatchReturnedSize() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); - try (var service = new OpenAiService(new SetOnce<>(senderFactory), new SetOnce<>(createWithEmptySettings(threadPool)))) { + try (var service = new OpenAiService(senderFactory, createWithEmptySettings(threadPool))) { String responseJson = """ { @@ -998,9 +887,9 @@ public void testCheckModelConfig_ReturnsModelWithSameDimensions_AndDocProductSet } public void testCheckModelConfig_ReturnsNewModelReference_AndDoesNotSendDimensionsField_WhenNotSetByUser() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); - try (var service = new OpenAiService(new SetOnce<>(senderFactory), new SetOnce<>(createWithEmptySettings(threadPool)))) { + try (var service = new OpenAiService(senderFactory, createWithEmptySettings(threadPool))) { String responseJson = """ { @@ -1064,9 +953,9 @@ public void testCheckModelConfig_ReturnsNewModelReference_AndDoesNotSendDimensio } public void testInfer_UnauthorisedResponse() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); - try (var service = new OpenAiService(new SetOnce<>(senderFactory), new SetOnce<>(createWithEmptySettings(threadPool)))) { + try (var service = new OpenAiService(senderFactory, createWithEmptySettings(threadPool))) { String responseJson = """ { @@ -1118,6 +1007,10 @@ public void testMoveModelFromTaskToServiceSettings_AlreadyMoved() { assertEquals("model", serviceSettings.get(ServiceFields.MODEL_ID)); } + private OpenAiService createOpenAiService() { + return new OpenAiService(mock(HttpRequestSender.Factory.class), createWithEmptySettings(threadPool)); + } + private Map getRequestConfigMap( Map serviceSettings, Map taskSettings, diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsModelTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsModelTests.java index 01b60fdb896d0..db5febef1dab2 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsModelTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsModelTests.java @@ -47,6 +47,24 @@ public void testOverrideWith_NullMap() { assertThat(overriddenModel, sameInstance(model)); } + public static OpenAiEmbeddingsModel createModel( + String url, + @Nullable String org, + String apiKey, + String modelName, + @Nullable String user, + String inferenceEntityId + ) { + return new OpenAiEmbeddingsModel( + inferenceEntityId, + TaskType.TEXT_EMBEDDING, + "service", + new OpenAiEmbeddingsServiceSettings(modelName, url, org, SimilarityMeasure.DOT_PRODUCT, 1536, null, false), + new OpenAiEmbeddingsTaskSettings(user), + new DefaultSecretSettings(new SecureString(apiKey.toCharArray())) + ); + } + public static OpenAiEmbeddingsModel createModel( String url, @Nullable String org, diff --git a/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java b/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java index 1581803920cdc..03f1aaf8577cf 100644 --- a/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java +++ b/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java @@ -15,6 +15,8 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.util.NumericUtils; +import org.elasticsearch.common.logging.DeprecationCategory; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.time.DateMathParser; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.IndexMode; @@ -74,6 +76,8 @@ /** A {@link FieldMapper} for a field containing aggregate metrics such as min/max/value_count etc. */ public class AggregateDoubleMetricFieldMapper extends FieldMapper { + private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(AggregateDoubleMetricFieldMapper.class); + public static final String CONTENT_TYPE = "aggregate_metric_double"; public static final String SUBFIELD_SEPARATOR = "."; @@ -187,6 +191,13 @@ public Builder metric(MetricType metric) { @Override public AggregateDoubleMetricFieldMapper build(MapperBuilderContext context) { + if (multiFieldsBuilder.hasMultiFields()) { + DEPRECATION_LOGGER.warn( + DeprecationCategory.MAPPINGS, + CONTENT_TYPE + "_multifields", + "Adding multifields to [" + CONTENT_TYPE + "] mappers has no effect and will be forbidden in future" + ); + } if (defaultMetric.isConfigured() == false) { // If a single metric is contained, this should be the default if (metrics.getValue().size() == 1) { diff --git a/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java b/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java index f2b1f013212db..ebf060f520c5a 100644 --- a/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java +++ b/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java @@ -21,6 +21,8 @@ import org.apache.lucene.util.automaton.LevenshteinAutomata; import org.apache.lucene.util.automaton.RegExp; import org.elasticsearch.common.geo.ShapeRelation; +import org.elasticsearch.common.logging.DeprecationCategory; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.time.DateMathParser; @@ -62,6 +64,8 @@ */ public class ConstantKeywordFieldMapper extends FieldMapper { + private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(ConstantKeywordFieldMapper.class); + public static final String CONTENT_TYPE = "constant_keyword"; private static ConstantKeywordFieldMapper toType(FieldMapper in) { @@ -98,6 +102,13 @@ protected Parameter[] getParameters() { @Override public ConstantKeywordFieldMapper build(MapperBuilderContext context) { + if (multiFieldsBuilder.hasMultiFields()) { + DEPRECATION_LOGGER.warn( + DeprecationCategory.MAPPINGS, + CONTENT_TYPE + "_multifields", + "Adding multifields to [" + CONTENT_TYPE + "] mappers has no effect and will be forbidden in future" + ); + } return new ConstantKeywordFieldMapper( name(), new ConstantKeywordFieldType(context.buildFullName(name()), value.getValue(), meta.getValue()) diff --git a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java index 955d658b01bab..52424956ef53e 100644 --- a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java +++ b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java @@ -333,7 +333,7 @@ protected Object parseSourceValue(Object value) { if (value.equals("")) { return nullValueFormatted; } - return parseUnsignedLong(value); + return unsignedToSortableSignedLong(parseUnsignedLong(value)); } }; BlockSourceReader.LeafIteratorLookup lookup = isStored() || isIndexed() diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java index f8542d316dac5..ea97e08dce990 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java @@ -181,7 +181,6 @@ public void testTooManyByAndOverFields() throws Exception { assertThat(modelSizeStats.getMemoryStatus(), equalTo(ModelSizeStats.MemoryStatus.HARD_LIMIT)); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105347") public void testManyDistinctOverFields() throws Exception { Detector.Builder detector = new Detector.Builder("sum", "value"); detector.setOverFieldName("user"); @@ -226,7 +225,7 @@ public void testManyDistinctOverFields() throws Exception { // Assert we haven't violated the limit too much GetJobsStatsAction.Response.JobStats jobStats = getJobStats(job.getId()).get(0); ModelSizeStats modelSizeStats = jobStats.getModelSizeStats(); - assertThat(modelSizeStats.getModelBytes(), lessThan(120000000L)); + assertThat(modelSizeStats.getModelBytes(), lessThan(120500000L)); assertThat(modelSizeStats.getModelBytes(), greaterThan(90000000L)); assertThat(modelSizeStats.getMemoryStatus(), equalTo(ModelSizeStats.MemoryStatus.HARD_LIMIT)); } diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/InferenceIngestIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/InferenceIngestIT.java index 84c5ed9f934bb..0544534501ab2 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/InferenceIngestIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/InferenceIngestIT.java @@ -33,7 +33,6 @@ import org.junit.Before; import java.io.IOException; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -539,15 +538,17 @@ private static Request searchRequest(String index, QueryBuilder queryBuilder) th return request; } - private Map generateSourceDoc() { - return new HashMap<>() { - { - put("col1", randomFrom("female", "male")); - put("col2", randomFrom("S", "M", "L", "XL")); - put("col3", randomFrom("true", "false", "none", "other")); - put("col4", randomIntBetween(0, 10)); - } - }; + private static Map generateSourceDoc() { + return Map.of( + "col1", + randomFrom("female", "male"), + "col2", + randomFrom("S", "M", "L", "XL"), + "col3", + randomFrom("true", "false", "none", "other"), + "col4", + randomIntBetween(0, 10) + ); } private static final String REGRESSION_DEFINITION = """ diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ResetJobIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ResetJobIT.java index 4d336daa4924c..61ce2224c1ed9 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ResetJobIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ResetJobIT.java @@ -8,11 +8,13 @@ package org.elasticsearch.xpack.ml.integration; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.tasks.TaskId; import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; import org.elasticsearch.xpack.core.ml.job.config.Blocked; import org.elasticsearch.xpack.core.ml.job.config.DataDescription; import org.elasticsearch.xpack.core.ml.job.config.Detector; import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.config.JobUpdate; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; import org.elasticsearch.xpack.core.ml.job.results.Bucket; import org.junit.After; @@ -34,10 +36,18 @@ public void tearDownData() { } public void testReset() throws Exception { + testReset(false); + } + + public void testReset_previousResetFailed() throws Exception { + testReset(true); + } + + private void testReset(boolean previousResetFailed) throws Exception { TimeValue bucketSpan = TimeValue.timeValueMinutes(30); long startTime = 1514764800000L; final int bucketCount = 100; - Job.Builder job = createJob("test-reset", bucketSpan); + Job.Builder job = createJob("test-reset-" + previousResetFailed, bucketSpan); openJob(job.getId()); postData( @@ -53,6 +63,13 @@ public void testReset() throws Exception { DataCounts dataCounts = getJobStats(job.getId()).get(0).getDataCounts(); assertThat(dataCounts.getProcessedRecordCount(), greaterThan(0L)); + if (previousResetFailed) { + JobUpdate jobUpdate = new JobUpdate.Builder(job.getId()).setBlocked( + new Blocked(Blocked.Reason.RESET, new TaskId(randomIdentifier(), randomInt())) + ).build(); + updateJob(job.getId(), jobUpdate); + } + resetJob(job.getId()); buckets = getBuckets(job.getId()); diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/DataFrameAnalyticsConfigProviderIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/DataFrameAnalyticsConfigProviderIT.java index 3b84c5d86c00c..e29cd4545846c 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/DataFrameAnalyticsConfigProviderIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/DataFrameAnalyticsConfigProviderIT.java @@ -332,7 +332,6 @@ public void testUpdate_ConfigDoesNotExist() throws InterruptedException { assertThat(exceptionHolder.get(), is(instanceOf(ResourceNotFoundException.class))); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/58814") public void testUpdate_UpdateCannotBeAppliedWhenTaskIsRunning() throws InterruptedException { String configId = "config-id"; DataFrameAnalyticsConfig initialConfig = DataFrameAnalyticsConfigTests.createRandom(configId); @@ -354,8 +353,10 @@ public void testUpdate_UpdateCannotBeAppliedWhenTaskIsRunning() throws Interrupt AtomicReference updatedConfigHolder = new AtomicReference<>(); AtomicReference exceptionHolder = new AtomicReference<>(); + // Important: the new value specified here must be one that it's impossible for DataFrameAnalyticsConfigTests.createRandom + // to have used originally. If the update is a no-op then the test fails. DataFrameAnalyticsConfigUpdate configUpdate = new DataFrameAnalyticsConfigUpdate.Builder(configId).setModelMemoryLimit( - ByteSizeValue.ofMb(2048) + ByteSizeValue.ofMb(1234) ).build(); ClusterState clusterState = clusterStateWithRunningAnalyticsTask(configId, DataFrameAnalyticsState.ANALYZING); diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java index 942729bb81c64..33fd7c108863b 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java @@ -541,7 +541,6 @@ public void testClusterWithTwoMlNodes_RunsDatafeed_GivenOriginalNodeGoesDown() t }); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103108") public void testClusterWithTwoMlNodes_StopsDatafeed_GivenJobFailsOnReassign() throws Exception { internalCluster().ensureAtMostNumDataNodes(0); logger.info("Starting dedicated master node..."); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceService.java index 8c46f7229c655..47f0fde838b8e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceService.java @@ -10,9 +10,11 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.TransportListTasksAction; +import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; @@ -22,16 +24,20 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.core.Predicates; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.MlMetadata; +import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.DeleteExpiredDataAction; import org.elasticsearch.xpack.core.ml.action.DeleteJobAction; import org.elasticsearch.xpack.core.ml.action.GetJobsAction; +import org.elasticsearch.xpack.core.ml.action.ResetJobAction; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.ml.utils.TypedChainTaskExecutor; @@ -41,6 +47,8 @@ import java.util.Objects; import java.util.Random; import java.util.Set; +import java.util.function.Function; +import java.util.function.Predicate; import java.util.function.Supplier; import static java.util.stream.Collectors.toList; @@ -205,24 +213,34 @@ private void triggerTasks() { } private void triggerAnomalyDetectionMaintenance() { - // Step 3: Log any error that could have happened + // Step 4: Log any error that could have happened ActionListener finalListener = ActionListener.wrap( unused -> {}, - e -> logger.error("An error occurred during [ML] maintenance tasks execution", e) + e -> logger.warn("An error occurred during [ML] maintenance tasks execution", e) ); - // Step 2: Delete expired data + // Step 3: Delete expired data ActionListener deleteJobsListener = ActionListener.wrap( unused -> triggerDeleteExpiredDataTask(finalListener), e -> { - logger.info("[ML] maintenance task: triggerDeleteJobsInStateDeletingWithoutDeletionTask failed", e); - // Note: Steps 1 and 2 are independent of each other and step 2 is executed even if step 1 failed. + logger.warn("[ML] maintenance task: triggerResetJobsInStateResetWithoutResetTask failed", e); + // Note: Steps 1-3 are independent, so continue upon errors. triggerDeleteExpiredDataTask(finalListener); } ); - // Step 1: Delete jobs that are in deleting state - triggerDeleteJobsInStateDeletingWithoutDeletionTask(deleteJobsListener); + // Step 2: Reset jobs that are in resetting state without task + ActionListener resetJobsListener = ActionListener.wrap( + unused -> triggerResetJobsInStateResetWithoutResetTask(deleteJobsListener), + e -> { + logger.warn("[ML] maintenance task: triggerDeleteJobsInStateDeletingWithoutDeletionTask failed", e); + // Note: Steps 1-3 are independent, so continue upon errors. + triggerResetJobsInStateResetWithoutResetTask(deleteJobsListener); + } + ); + + // Step 1: Delete jobs that are in deleting state without task + triggerDeleteJobsInStateDeletingWithoutDeletionTask(resetJobsListener); } private void triggerDataFrameAnalyticsMaintenance() { @@ -256,73 +274,111 @@ private void triggerDeleteExpiredDataTask(ActionListener f // Visible for testing public void triggerDeleteJobsInStateDeletingWithoutDeletionTask(ActionListener finalListener) { - SetOnce> jobsInStateDeletingHolder = new SetOnce<>(); - - ActionListener>> deleteJobsActionListener = finalListener - .delegateFailureAndWrap((delegate, deleteJobsResponses) -> { - List jobIds = deleteJobsResponses.stream() - .filter(t -> t.v2().isAcknowledged() == false) - .map(Tuple::v1) - .map(DeleteJobAction.Request::getJobId) - .collect(toList()); + triggerJobsInStateWithoutMatchingTask( + "triggerDeleteJobsInStateDeletingWithoutDeletionTask", + Job::isDeleting, + DeleteJobAction.NAME, + taskInfo -> stripPrefixOrNull(taskInfo.description(), DeleteJobAction.DELETION_TASK_DESCRIPTION_PREFIX), + DeleteJobAction.INSTANCE, + DeleteJobAction.Request::new, + finalListener + ); + } + + public void triggerResetJobsInStateResetWithoutResetTask(ActionListener finalListener) { + triggerJobsInStateWithoutMatchingTask( + "triggerResetJobsInStateResetWithoutResetTask", + Job::isResetting, + ResetJobAction.NAME, + taskInfo -> stripPrefixOrNull(taskInfo.description(), MlTasks.JOB_TASK_ID_PREFIX), + ResetJobAction.INSTANCE, + ResetJobAction.Request::new, + finalListener + ); + } + + /** + * @return If the string starts with the prefix, this returns the string without the prefix. + * Otherwise, this return null. + */ + private static String stripPrefixOrNull(String str, String prefix) { + return str == null || str.startsWith(prefix) == false ? null : str.substring(prefix.length()); + } + + /** + * Executes a request for each job in a state, while missing the corresponding task. This + * usually indicates the node originally executing the task has died, so retry the request. + * + * @param maintenanceTaskName Name of ML maintenance task; used only for logging. + * @param jobFilter Predicate for filtering the jobs. + * @param taskActionName Action name of the tasks corresponding to the jobs. + * @param jobIdExtractor Function to extract the job ID from the task info (in order to match to the job). + * @param actionType Action type of the request that should be (re)executed. + * @param requestCreator Function to create the request from the job ID. + * @param finalListener Listener that captures the final response. + */ + private void triggerJobsInStateWithoutMatchingTask( + String maintenanceTaskName, + Predicate jobFilter, + String taskActionName, + Function jobIdExtractor, + ActionType actionType, + Function> requestCreator, + ActionListener finalListener + ) { + SetOnce> jobsInStateHolder = new SetOnce<>(); + + ActionListener>> jobsActionListener = finalListener.delegateFailureAndWrap( + (delegate, jobsResponses) -> { + List jobIds = jobsResponses.stream().filter(t -> t.v2().isAcknowledged() == false).map(Tuple::v1).collect(toList()); if (jobIds.isEmpty()) { - logger.info("Successfully completed [ML] maintenance task: triggerDeleteJobsInStateDeletingWithoutDeletionTask"); + logger.info("Successfully completed [ML] maintenance task: {}", maintenanceTaskName); } else { - logger.info("The following ML jobs could not be deleted: [" + String.join(",", jobIds) + "]"); + logger.info("[ML] maintenance task {} failed for jobs: {}", maintenanceTaskName, jobIds); } delegate.onResponse(AcknowledgedResponse.TRUE); - }); + } + ); ActionListener listTasksActionListener = ActionListener.wrap(listTasksResponse -> { - Set jobsInStateDeleting = jobsInStateDeletingHolder.get(); - Set jobsWithDeletionTask = listTasksResponse.getTasks() - .stream() - .filter(t -> t.description() != null) - .filter(t -> t.description().startsWith(DeleteJobAction.DELETION_TASK_DESCRIPTION_PREFIX)) - .map(t -> t.description().substring(DeleteJobAction.DELETION_TASK_DESCRIPTION_PREFIX.length())) - .collect(toSet()); - Set jobsInStateDeletingWithoutDeletionTask = Sets.difference(jobsInStateDeleting, jobsWithDeletionTask); - if (jobsInStateDeletingWithoutDeletionTask.isEmpty()) { + Set jobsInState = jobsInStateHolder.get(); + Set jobsWithTask = listTasksResponse.getTasks().stream().map(jobIdExtractor).filter(Objects::nonNull).collect(toSet()); + Set jobsInStateWithoutTask = Sets.difference(jobsInState, jobsWithTask); + if (jobsInStateWithoutTask.isEmpty()) { finalListener.onResponse(AcknowledgedResponse.TRUE); return; } - TypedChainTaskExecutor> chainTaskExecutor = new TypedChainTaskExecutor<>( + TypedChainTaskExecutor> chainTaskExecutor = new TypedChainTaskExecutor<>( EsExecutors.DIRECT_EXECUTOR_SERVICE, - unused -> true, - unused -> true + Predicates.always(), + Predicates.always() ); - for (String jobId : jobsInStateDeletingWithoutDeletionTask) { - DeleteJobAction.Request request = new DeleteJobAction.Request(jobId); + for (String jobId : jobsInStateWithoutTask) { chainTaskExecutor.add( listener -> executeAsyncWithOrigin( client, ML_ORIGIN, - DeleteJobAction.INSTANCE, - request, - listener.delegateFailureAndWrap((l, response) -> l.onResponse(Tuple.tuple(request, response))) + actionType, + requestCreator.apply(jobId), + listener.delegateFailureAndWrap((l, response) -> l.onResponse(Tuple.tuple(jobId, response))) ) ); } - chainTaskExecutor.execute(deleteJobsActionListener); + chainTaskExecutor.execute(jobsActionListener); }, finalListener::onFailure); ActionListener getJobsActionListener = ActionListener.wrap(getJobsResponse -> { - Set jobsInStateDeleting = getJobsResponse.getResponse() - .results() - .stream() - .filter(Job::isDeleting) - .map(Job::getId) - .collect(toSet()); - if (jobsInStateDeleting.isEmpty()) { + Set jobsInState = getJobsResponse.getResponse().results().stream().filter(jobFilter).map(Job::getId).collect(toSet()); + if (jobsInState.isEmpty()) { finalListener.onResponse(AcknowledgedResponse.TRUE); return; } - jobsInStateDeletingHolder.set(jobsInStateDeleting); + jobsInStateHolder.set(jobsInState); executeAsyncWithOrigin( client, ML_ORIGIN, TransportListTasksAction.TYPE, - new ListTasksRequest().setActions(DeleteJobAction.NAME), + new ListTasksRequest().setActions(taskActionName), listTasksActionListener ); }, finalListener::onFailure); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCancelJobModelSnapshotUpgradeAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCancelJobModelSnapshotUpgradeAction.java index 1d6692f533b9c..41b146f1d9adb 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCancelJobModelSnapshotUpgradeAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCancelJobModelSnapshotUpgradeAction.java @@ -103,7 +103,7 @@ private void removePersistentTasks( final AtomicArray failures = new AtomicArray<>(numberOfTasks); for (PersistentTasksCustomMetadata.PersistentTask task : upgradeTasksToCancel) { - persistentTasksService.sendRemoveRequest(task.getId(), new ActionListener<>() { + persistentTasksService.sendRemoveRequest(task.getId(), null, new ActionListener<>() { @Override public void onResponse(PersistentTasksCustomMetadata.PersistentTask task) { if (counter.incrementAndGet() == numberOfTasks) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java index 02801864a3e78..6b605e0438b43 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java @@ -209,6 +209,7 @@ protected void doExecute(Task task, CloseJobAction.Request request, ActionListen // these persistent tasks to disappear. persistentTasksService.sendRemoveRequest( jobTask.getId(), + null, ActionListener.wrap( r -> logger.trace( () -> format("[%s] removed task to close unassigned job", resolvedJobId) @@ -519,7 +520,7 @@ private void forceCloseJob( PersistentTasksCustomMetadata.PersistentTask jobTask = MlTasks.getJobTask(jobId, tasks); if (jobTask != null) { auditor.info(jobId, Messages.JOB_AUDIT_FORCE_CLOSING); - persistentTasksService.sendRemoveRequest(jobTask.getId(), new ActionListener<>() { + persistentTasksService.sendRemoveRequest(jobTask.getId(), null, new ActionListener<>() { @Override public void onResponse(PersistentTasksCustomMetadata.PersistentTask task) { if (counter.incrementAndGet() == numberOfJobs) { @@ -590,6 +591,7 @@ private void normalCloseJob( PersistentTasksCustomMetadata.PersistentTask jobTask = MlTasks.getJobTask(jobId, tasks); persistentTasksService.sendRemoveRequest( jobTask.getId(), + null, ActionListener.wrap(r -> logger.trace("[{}] removed persistent task for relocated job", jobId), e -> { if (ExceptionsHelper.unwrapCause(e) instanceof ResourceNotFoundException) { logger.debug("[{}] relocated job task already removed", jobId); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDatafeedAction.java index 49c6021a6ed8b..cddddc8d3c245 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDatafeedAction.java @@ -103,7 +103,7 @@ private void removeDatafeedTask(DeleteDatafeedAction.Request request, ClusterSta if (datafeedTask == null) { listener.onResponse(true); } else { - persistentTasksService.sendRemoveRequest(datafeedTask.getId(), new ActionListener<>() { + persistentTasksService.sendRemoveRequest(datafeedTask.getId(), null, new ActionListener<>() { @Override public void onResponse(PersistentTasksCustomMetadata.PersistentTask persistentTask) { listener.onResponse(Boolean.TRUE); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java index f694e85144b48..19f99a329d309 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java @@ -291,7 +291,7 @@ private void removePersistentTask(String jobId, ClusterState currentState, Actio if (jobTask == null) { listener.onResponse(null); } else { - persistentTasksService.sendRemoveRequest(jobTask.getId(), listener.safeMap(task -> true)); + persistentTasksService.sendRemoveRequest(jobTask.getId(), null, listener.safeMap(task -> true)); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportEvaluateDataFrameAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportEvaluateDataFrameAction.java index 61db7f683f0f3..92c9909441b14 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportEvaluateDataFrameAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportEvaluateDataFrameAction.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.Predicates; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; @@ -128,7 +129,7 @@ private static final class EvaluationExecutor extends TypedChainTaskExecutor true, unused -> true); + super(threadPool.generic(), Predicates.always(), Predicates.always()); this.client = client; this.parameters = parameters; this.request = request; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java index 6a8dca8e2776b..d54cac9dca496 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.Predicates; import org.elasticsearch.core.Tuple; import org.elasticsearch.inference.InferenceResults; import org.elasticsearch.license.License; @@ -175,9 +176,9 @@ private void getModelAndInfer( TypedChainTaskExecutor typedChainTaskExecutor = new TypedChainTaskExecutor<>( EsExecutors.DIRECT_EXECUTOR_SERVICE, // run through all tasks - r -> true, + Predicates.always(), // Always fail immediately and return an error - ex -> true + Predicates.always() ); request.getObjectsToInfer().forEach(stringObjectMap -> typedChainTaskExecutor.add(chainedTask -> { if (task.isCancelled()) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java index c527c00a738a2..52e9f93d7d31f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java @@ -166,6 +166,7 @@ public void onFailure(Exception e) { MlTasks.jobTaskId(jobParams.getJobId()), MlTasks.JOB_TASK_NAME, jobParams, + null, waitForJobToStart ), listener::onFailure @@ -324,7 +325,7 @@ private void cancelJobStart( Exception exception, ActionListener listener ) { - persistentTasksService.sendRemoveRequest(persistentTask.getId(), new ActionListener<>() { + persistentTasksService.sendRemoveRequest(persistentTask.getId(), null, new ActionListener<>() { @Override public void onResponse(PersistentTasksCustomMetadata.PersistentTask task) { // We succeeded in cancelling the persistent task, but the diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportResetJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportResetJobAction.java index 35a80876ea763..030e25ea7797a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportResetJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportResetJobAction.java @@ -9,6 +9,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskAction; import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskRequest; @@ -124,7 +125,14 @@ protected void masterOperation( waitExistingResetTaskToComplete( job.getBlocked().getTaskId(), request, - ActionListener.wrap(r -> resetIfJobIsStillBlockedOnReset(task, request, listener), listener::onFailure) + ActionListener.wrap(r -> resetIfJobIsStillBlockedOnReset(task, request, listener), e -> { + if (ExceptionsHelper.unwrapCause(e) instanceof ResourceNotFoundException) { + // If the task is not found then the node it was running on likely died, so try again. + resetIfJobIsStillBlockedOnReset(task, request, listener); + } else { + listener.onFailure(e); + } + }) ); } else { ParentTaskAssigningClient taskClient = new ParentTaskAssigningClient(client, taskId); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetUpgradeModeAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetUpgradeModeAction.java index 07b556cf9a989..4f4eee6e5c597 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetUpgradeModeAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetUpgradeModeAction.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.Predicates; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.persistent.PersistentTasksClusterService; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; @@ -300,7 +301,7 @@ private void unassignPersistentTasks( TypedChainTaskExecutor> chainTaskExecutor = new TypedChainTaskExecutor<>( executor, - r -> true, + Predicates.always(), // Another process could modify tasks and thus we cannot find them via the allocation_id and name // If the task was removed from the node, all is well // We handle the case of allocation_id changing later in this transport class by timing out waiting for task completion @@ -330,8 +331,8 @@ private void isolateDatafeeds( logger.info("Isolating datafeeds: " + datafeedsToIsolate.toString()); TypedChainTaskExecutor isolateDatafeedsExecutor = new TypedChainTaskExecutor<>( executor, - r -> true, - ex -> true + Predicates.always(), + Predicates.always() ); datafeedsToIsolate.forEach(datafeedId -> { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsAction.java index 5351023a803e7..05f3d6311404a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsAction.java @@ -211,6 +211,7 @@ public void onFailure(Exception e) { MlTasks.dataFrameAnalyticsTaskId(request.getId()), MlTasks.DATA_FRAME_ANALYTICS_TASK_NAME, taskParams, + null, waitForAnalyticsToStart ); }, listener::onFailure); @@ -602,6 +603,7 @@ private void cancelAnalyticsStart( ) { persistentTasksService.sendRemoveRequest( persistentTask.getId(), + null, new ActionListener>() { @Override public void onResponse(PersistentTasksCustomMetadata.PersistentTask task) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java index e7d5d956bb1b0..2067bae048561 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java @@ -345,6 +345,7 @@ private void createDataExtractor( MlTasks.datafeedTaskId(params.getDatafeedId()), MlTasks.DATAFEED_TASK_NAME, params, + null, listener ), listener::onFailure @@ -407,7 +408,7 @@ private void cancelDatafeedStart( Exception exception, ActionListener listener ) { - persistentTasksService.sendRemoveRequest(persistentTask.getId(), new ActionListener<>() { + persistentTasksService.sendRemoveRequest(persistentTask.getId(), null, new ActionListener<>() { @Override public void onResponse(PersistentTasksCustomMetadata.PersistentTask task) { // We succeeded in cancelling the persistent task, but the diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDataFrameAnalyticsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDataFrameAnalyticsAction.java index 42d36006acbde..9ad0213bf7ee5 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDataFrameAnalyticsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDataFrameAnalyticsAction.java @@ -260,7 +260,7 @@ private void forceStop( for (String analyticsId : nonStoppedAnalytics) { PersistentTasksCustomMetadata.PersistentTask analyticsTask = MlTasks.getDataFrameAnalyticsTask(analyticsId, tasks); if (analyticsTask != null) { - persistentTasksService.sendRemoveRequest(analyticsTask.getId(), ActionListener.wrap(removedTask -> { + persistentTasksService.sendRemoveRequest(analyticsTask.getId(), null, ActionListener.wrap(removedTask -> { auditor.info(analyticsId, Messages.DATA_FRAME_ANALYTICS_AUDIT_FORCE_STOPPED); if (counter.incrementAndGet() == nonStoppedAnalytics.size()) { sendResponseOrFailure(request.getId(), listener, failures); @@ -329,7 +329,7 @@ private String[] findAllocatedNodesAndRemoveUnassignedTasks(List analyti // This means the task has not been assigned to a node yet so // we can stop it by removing its persistent task. // The listener is a no-op as we're already going to wait for the task to be removed. - persistentTasksService.sendRemoveRequest(task.getId(), ActionListener.noop()); + persistentTasksService.sendRemoveRequest(task.getId(), null, ActionListener.noop()); } } return nodes.toArray(new String[0]); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java index 41359f5fcc166..8ba8132ecafa2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java @@ -252,6 +252,7 @@ private void normalStopDatafeed( // already waits for these persistent tasks to disappear. persistentTasksService.sendRemoveRequest( datafeedTask.getId(), + null, ActionListener.wrap( r -> auditDatafeedStopped(datafeedTask), e -> logger.error("[" + datafeedId + "] failed to remove task to stop unassigned datafeed", e) @@ -278,6 +279,7 @@ private void normalStopDatafeed( PersistentTasksCustomMetadata.PersistentTask datafeedTask = MlTasks.getDatafeedTask(datafeedId, tasks); persistentTasksService.sendRemoveRequest( datafeedTask.getId(), + null, ActionListener.wrap(r -> auditDatafeedStopped(datafeedTask), e -> { if (ExceptionsHelper.unwrapCause(e) instanceof ResourceNotFoundException) { logger.debug("[{}] relocated datafeed task already removed", datafeedId); @@ -381,7 +383,7 @@ private void forceStopDatafeed( for (String datafeedId : notStoppedDatafeeds) { PersistentTasksCustomMetadata.PersistentTask datafeedTask = MlTasks.getDatafeedTask(datafeedId, tasks); if (datafeedTask != null) { - persistentTasksService.sendRemoveRequest(datafeedTask.getId(), ActionListener.wrap(persistentTask -> { + persistentTasksService.sendRemoveRequest(datafeedTask.getId(), null, ActionListener.wrap(persistentTask -> { // For force stop, only audit here if the datafeed was unassigned at the time of the stop, hence inactive. // If the datafeed was active then it audits itself on being cancelled. if (PersistentTasksClusterService.needsReassignment(datafeedTask.getAssignment(), nodes)) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpgradeJobModelSnapshotAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpgradeJobModelSnapshotAction.java index 15c1d53f7bdf8..dea6c53d39ab4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpgradeJobModelSnapshotAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpgradeJobModelSnapshotAction.java @@ -164,6 +164,7 @@ protected void masterOperation(Task task, Request request, ClusterState state, A MlTasks.snapshotUpgradeTaskId(params.getJobId(), params.getSnapshotId()), MlTasks.JOB_SNAPSHOT_UPGRADE_TASK_NAME, params, + null, waitForJobToStart ); }, listener::onFailure); @@ -290,18 +291,22 @@ private void cancelJobStart( Exception exception, ActionListener listener ) { - persistentTasksService.sendRemoveRequest(persistentTask.getId(), ActionListener.wrap(t -> listener.onFailure(exception), e -> { - logger.error( - () -> format( - "[%s] [%s] Failed to cancel persistent task that could not be assigned due to %s", - persistentTask.getParams().getJobId(), - persistentTask.getParams().getSnapshotId(), - exception.getMessage() - ), - e - ); - listener.onFailure(exception); - })); + persistentTasksService.sendRemoveRequest( + persistentTask.getId(), + null, + ActionListener.wrap(t -> listener.onFailure(exception), e -> { + logger.error( + () -> format( + "[%s] [%s] Failed to cancel persistent task that could not be assigned due to %s", + persistentTask.getParams().getJobId(), + persistentTask.getParams().getSnapshotId(), + exception.getMessage() + ), + e + ); + listener.onFailure(exception); + }) + ); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/CategorizeTextAggregator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/CategorizeTextAggregator.java index 520d554379cfc..cedaced0f57ee 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/CategorizeTextAggregator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/CategorizeTextAggregator.java @@ -113,7 +113,8 @@ protected void doClose() { public InternalAggregation[] buildAggregations(long[] ordsToCollect) throws IOException { Bucket[][] topBucketsPerOrd = new Bucket[ordsToCollect.length][]; for (int ordIdx = 0; ordIdx < ordsToCollect.length; ordIdx++) { - final TokenListCategorizer categorizer = categorizers.get(ordsToCollect[ordIdx]); + final long ord = ordsToCollect[ordIdx]; + final TokenListCategorizer categorizer = (ord < categorizers.size()) ? categorizers.get(ord) : null; if (categorizer == null) { topBucketsPerOrd[ordIdx] = new Bucket[0]; continue; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDecider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDecider.java index 4ff7e66d296d0..dfe0e557f749d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDecider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDecider.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.XContentElasticsearchExtension; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Predicates; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.logging.LogManager; @@ -913,7 +914,7 @@ private static Collection true) + return tasksCustomMetadata.findTasks(MlTasks.DATAFEED_TASK_NAME, Predicates.always()) .stream() .map(p -> (PersistentTasksCustomMetadata.PersistentTask) p) .toList(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java index 3e196e1a12723..2a76b925247ff 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java @@ -75,7 +75,7 @@ class DatafeedJob { private volatile long lastDataCheckTimeMs; private volatile Tuple lastDataCheckAnnotationWithId; private volatile Long lastEndTimeMs; - private AtomicBoolean running = new AtomicBoolean(true); + private final AtomicBoolean running = new AtomicBoolean(true); private volatile boolean isIsolated; private volatile boolean haveEverSeenData; private volatile long consecutiveDelayedDataBuckets; @@ -351,7 +351,7 @@ public boolean isRunning() { return running.get(); } - private void run(long start, long end, FlushJobAction.Request flushRequest) throws IOException { + private void run(long start, long end, FlushJobAction.Request flushRequest) { if (end <= start) { return; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java index ede57764a0813..d44d2181f0ce8 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java @@ -121,9 +121,8 @@ public void putDatafeed( final RoleDescriptor.IndicesPrivileges.Builder indicesPrivilegesBuilder = RoleDescriptor.IndicesPrivileges.builder() .indices(indices); - ActionListener privResponseListener = ActionListener.wrap( - r -> handlePrivsResponse(username, request, r, state, threadPool, listener), - listener::onFailure + ActionListener privResponseListener = listener.delegateFailureAndWrap( + (l, r) -> handlePrivsResponse(username, request, r, state, threadPool, l) ); ActionListener getRollupIndexCapsActionHandler = ActionListener.wrap(response -> { @@ -173,15 +172,14 @@ public void getDatafeeds( request.getDatafeedId(), request.allowNoMatch(), parentTaskId, - ActionListener.wrap( - datafeedBuilders -> listener.onResponse( + listener.delegateFailureAndWrap( + (l, datafeedBuilders) -> l.onResponse( new QueryPage<>( datafeedBuilders.stream().map(DatafeedConfig.Builder::build).collect(Collectors.toList()), datafeedBuilders.size(), DatafeedConfig.RESULTS_FIELD ) - ), - listener::onFailure + ) ) ); } @@ -222,10 +220,7 @@ public void updateDatafeed( request.getUpdate(), headers, jobConfigProvider::validateDatafeedJob, - ActionListener.wrap( - updatedConfig -> listener.onResponse(new PutDatafeedAction.Response(updatedConfig)), - listener::onFailure - ) + listener.delegateFailureAndWrap((l, updatedConfig) -> l.onResponse(new PutDatafeedAction.Response(updatedConfig))) ); }); @@ -254,19 +249,18 @@ public void deleteDatafeed(DeleteDatafeedAction.Request request, ClusterState st String datafeedId = request.getDatafeedId(); - datafeedConfigProvider.getDatafeedConfig(datafeedId, null, ActionListener.wrap(datafeedConfigBuilder -> { + datafeedConfigProvider.getDatafeedConfig(datafeedId, null, listener.delegateFailureAndWrap((delegate, datafeedConfigBuilder) -> { String jobId = datafeedConfigBuilder.build().getJobId(); JobDataDeleter jobDataDeleter = new JobDataDeleter(client, jobId); jobDataDeleter.deleteDatafeedTimingStats( - ActionListener.wrap( - unused1 -> datafeedConfigProvider.deleteDatafeedConfig( + delegate.delegateFailureAndWrap( + (l, unused1) -> datafeedConfigProvider.deleteDatafeedConfig( datafeedId, - ActionListener.wrap(unused2 -> listener.onResponse(AcknowledgedResponse.TRUE), listener::onFailure) - ), - listener::onFailure + l.delegateFailureAndWrap((ll, unused2) -> ll.onResponse(AcknowledgedResponse.TRUE)) + ) ) ); - }, listener::onFailure)); + })); } @@ -316,7 +310,7 @@ private void putDatafeed( CheckedConsumer mappingsUpdated = ok -> datafeedConfigProvider.putDatafeedConfig( request.getDatafeed(), headers, - ActionListener.wrap(response -> listener.onResponse(new PutDatafeedAction.Response(response.v1())), listener::onFailure) + listener.delegateFailureAndWrap((l, response) -> l.onResponse(new PutDatafeedAction.Response(response.v1()))) ); CheckedConsumer validationOk = ok -> { @@ -345,16 +339,19 @@ private void putDatafeed( } private void checkJobDoesNotHaveADatafeed(String jobId, ActionListener listener) { - datafeedConfigProvider.findDatafeedIdsForJobIds(Collections.singletonList(jobId), ActionListener.wrap(datafeedIds -> { - if (datafeedIds.isEmpty()) { - listener.onResponse(Boolean.TRUE); - } else { - listener.onFailure( - ExceptionsHelper.conflictStatusException( - "A datafeed [" + datafeedIds.iterator().next() + "] already exists for job [" + jobId + "]" - ) - ); - } - }, listener::onFailure)); + datafeedConfigProvider.findDatafeedIdsForJobIds( + Collections.singletonList(jobId), + listener.delegateFailureAndWrap((delegate, datafeedIds) -> { + if (datafeedIds.isEmpty()) { + delegate.onResponse(Boolean.TRUE); + } else { + delegate.onFailure( + ExceptionsHelper.conflictStatusException( + "A datafeed [" + datafeedIds.iterator().next() + "] already exists for job [" + jobId + "]" + ) + ); + } + }) + ); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactory.java index be2c8dd871a9b..bcdf5e83cc5ca 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactory.java @@ -59,13 +59,12 @@ static void create( ) { final boolean hasAggs = datafeed.hasAggregations(); final boolean isComposite = hasAggs && datafeed.hasCompositeAgg(xContentRegistry); - ActionListener factoryHandler = ActionListener.wrap( - factory -> listener.onResponse( + ActionListener factoryHandler = listener.delegateFailureAndWrap( + (l, factory) -> l.onResponse( datafeed.getChunkingConfig().isEnabled() ? new ChunkedDataExtractorFactory(datafeed, job, xContentRegistry, factory) : factory - ), - listener::onFailure + ) ); ActionListener getRollupIndexCapsActionHandler = ActionListener.wrap(response -> { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AbstractAggregationDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AbstractAggregationDataExtractor.java index 26c43e1d098c1..f561c2a0aa5ca 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AbstractAggregationDataExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AbstractAggregationDataExtractor.java @@ -22,6 +22,7 @@ import org.elasticsearch.xpack.core.ml.datafeed.SearchInterval; import org.elasticsearch.xpack.ml.datafeed.DatafeedTimingStatsReporter; import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractor; +import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractorQueryContext; import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractorUtils; import java.io.ByteArrayInputStream; @@ -121,7 +122,7 @@ private InternalAggregations search() { LOGGER.debug("[{}] Executing aggregated search", context.jobId); ActionRequestBuilder searchRequest = buildSearchRequest(buildBaseSearchSource()); assert searchRequest.request().allowPartialSearchResults() == false; - SearchResponse searchResponse = executeSearchRequest(searchRequest); + SearchResponse searchResponse = executeSearchRequest(client, context.queryContext, searchRequest); try { LOGGER.debug("[{}] Search response was obtained", context.jobId); timingStatsReporter.reportSearchDuration(searchResponse.getTook()); @@ -142,9 +143,13 @@ private void initAggregationProcessor(InternalAggregations aggs) throws IOExcept aggregationToJsonProcessor.process(aggs); } - private SearchResponse executeSearchRequest(ActionRequestBuilder searchRequestBuilder) { + static SearchResponse executeSearchRequest( + Client client, + DataExtractorQueryContext context, + ActionRequestBuilder searchRequestBuilder + ) { SearchResponse searchResponse = ClientHelper.executeWithHeaders( - context.queryContext.headers, + context.headers, ClientHelper.ML_ORIGIN, client, searchRequestBuilder::get @@ -216,7 +221,7 @@ public DataSummary getSummary() { ActionRequestBuilder searchRequestBuilder = buildSearchRequest( DataExtractorUtils.getSearchSourceBuilderForSummary(context.queryContext) ); - SearchResponse searchResponse = executeSearchRequest(searchRequestBuilder); + SearchResponse searchResponse = executeSearchRequest(client, context.queryContext, searchRequestBuilder); try { LOGGER.debug("[{}] Aggregating Data summary response was obtained", context.jobId); timingStatsReporter.reportSearchDuration(searchResponse.getTook()); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/CompositeAggregationDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/CompositeAggregationDataExtractor.java index e4712d051ef1e..874c68c0afd73 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/CompositeAggregationDataExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/CompositeAggregationDataExtractor.java @@ -16,7 +16,6 @@ import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregation; import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfigUtils; import org.elasticsearch.xpack.core.ml.datafeed.SearchInterval; import org.elasticsearch.xpack.core.ml.utils.Intervals; @@ -48,9 +47,6 @@ class CompositeAggregationDataExtractor implements DataExtractor { private static final Logger LOGGER = LogManager.getLogger(CompositeAggregationDataExtractor.class); - private static final String EARLIEST_TIME = "earliest_time"; - private static final String LATEST_TIME = "latest_time"; - private volatile Map afterKey = null; private final CompositeAggregationBuilder compositeAggregationBuilder; private final Client client; @@ -90,7 +86,7 @@ public boolean isCancelled() { @Override public void cancel() { - LOGGER.debug(() -> "[" + context.jobId + "] Data extractor received cancel request"); + LOGGER.debug("[{}] Data extractor received cancel request", context.jobId); isCancelled = true; } @@ -113,7 +109,7 @@ public Result next() throws IOException { SearchInterval searchInterval = new SearchInterval(context.queryContext.start, context.queryContext.end); InternalAggregations aggs = search(); if (aggs == null) { - LOGGER.trace(() -> "[" + context.jobId + "] extraction finished"); + LOGGER.trace("[{}] extraction finished", context.jobId); hasNext = false; afterKey = null; return new Result(searchInterval, Optional.empty()); @@ -153,9 +149,9 @@ private InternalAggregations search() { } searchSourceBuilder.aggregation(compositeAggregationBuilder); ActionRequestBuilder searchRequest = requestBuilder.build(searchSourceBuilder); - SearchResponse searchResponse = executeSearchRequest(searchRequest); + SearchResponse searchResponse = AbstractAggregationDataExtractor.executeSearchRequest(client, context.queryContext, searchRequest); try { - LOGGER.trace(() -> "[" + context.jobId + "] Search composite response was obtained"); + LOGGER.trace("[{}] Search composite response was obtained", context.jobId); timingStatsReporter.reportSearchDuration(searchResponse.getTook()); InternalAggregations aggregations = searchResponse.getAggregations(); if (aggregations == null) { @@ -171,25 +167,6 @@ private InternalAggregations search() { } } - private SearchResponse executeSearchRequest(ActionRequestBuilder searchRequestBuilder) { - SearchResponse searchResponse = ClientHelper.executeWithHeaders( - context.queryContext.headers, - ClientHelper.ML_ORIGIN, - client, - searchRequestBuilder::get - ); - boolean success = false; - try { - DataExtractorUtils.checkForSkippedClusters(searchResponse); - success = true; - } finally { - if (success == false) { - searchResponse.decRef(); - } - } - return searchResponse; - } - private InputStream processAggs(InternalAggregations aggs) throws IOException { AggregationToJsonProcessor aggregationToJsonProcessor = new AggregationToJsonProcessor( context.queryContext.timeField, @@ -262,7 +239,11 @@ public DataSummary getSummary() { client, context.queryContext ); - SearchResponse searchResponse = executeSearchRequest(searchRequestBuilder); + SearchResponse searchResponse = AbstractAggregationDataExtractor.executeSearchRequest( + client, + context.queryContext, + searchRequestBuilder + ); try { LOGGER.debug("[{}] Aggregating Data summary response was obtained", context.jobId); timingStatsReporter.reportSearchDuration(searchResponse.getTook()); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java index 5da89da6b3450..52ffe3893f33c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java @@ -9,6 +9,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequestBuilder; @@ -133,13 +134,13 @@ protected InputStream initScroll(long startTimestamp) throws IOException { } } - protected SearchResponse executeSearchRequest(SearchRequestBuilder searchRequestBuilder) { - SearchResponse searchResponse = ClientHelper.executeWithHeaders( - context.queryContext.headers, - ClientHelper.ML_ORIGIN, - client, - searchRequestBuilder::get + protected SearchResponse executeSearchRequest(ActionRequestBuilder searchRequestBuilder) { + return checkForSkippedClusters( + ClientHelper.executeWithHeaders(context.queryContext.headers, ClientHelper.ML_ORIGIN, client, searchRequestBuilder::get) ); + } + + private SearchResponse checkForSkippedClusters(SearchResponse searchResponse) { boolean success = false; try { DataExtractorUtils.checkForSkippedClusters(searchResponse); @@ -262,25 +263,7 @@ void markScrollAsErrored() { @SuppressWarnings("HiddenField") protected SearchResponse executeSearchScrollRequest(String scrollId) { - SearchResponse searchResponse = ClientHelper.executeWithHeaders( - context.queryContext.headers, - ClientHelper.ML_ORIGIN, - client, - () -> new SearchScrollRequestBuilder(client).setScroll(SCROLL_TIMEOUT).setScrollId(scrollId).get() - ); - boolean success = false; - try { - DataExtractorUtils.checkForSkippedClusters(searchResponse); - success = true; - } catch (ResourceNotFoundException e) { - clearScrollLoggingExceptions(searchResponse.getScrollId()); - throw e; - } finally { - if (success == false) { - searchResponse.decRef(); - } - } - return searchResponse; + return executeSearchRequest(new SearchScrollRequestBuilder(client).setScroll(SCROLL_TIMEOUT).setScrollId(scrollId)); } private void clearScroll() { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java index e226056217351..fbabc9903c4cc 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java @@ -223,7 +223,7 @@ public void findDatafeedIdsForJobIds(Collection jobIds, ActionListenerwrap(response -> { + listener.delegateFailureAndWrap((delegate, response) -> { Set datafeedIds = new HashSet<>(); // There cannot be more than one datafeed per job assert response.getHits().getTotalHits().value <= jobIds.size(); @@ -233,8 +233,8 @@ public void findDatafeedIdsForJobIds(Collection jobIds, ActionListenerwrap(response -> { + listener.delegateFailureAndWrap((delegate, response) -> { Map datafeedsByJobId = new HashMap<>(); // There cannot be more than one datafeed per job assert response.getHits().getTotalHits().value <= jobIds.size(); @@ -265,8 +265,8 @@ public void findDatafeedsByJobIds( DatafeedConfig.Builder builder = parseLenientlyFromSource(hit.getSourceRef()); datafeedsByJobId.put(builder.getJobId(), builder); } - listener.onResponse(datafeedsByJobId); - }, listener::onFailure), + delegate.onResponse(datafeedsByJobId); + }), client::search ); } @@ -440,7 +440,7 @@ public void expandDatafeedIds( client.threadPool().getThreadContext(), ML_ORIGIN, searchRequest, - ActionListener.wrap(response -> { + listener.delegateFailureAndWrap((delegate, response) -> { SortedSet datafeedIds = new TreeSet<>(); SearchHit[] hits = response.getHits().getHits(); for (SearchHit hit : hits) { @@ -453,12 +453,12 @@ public void expandDatafeedIds( requiredMatches.filterMatchedIds(datafeedIds); if (requiredMatches.hasUnmatchedIds()) { // some required datafeeds were not found - listener.onFailure(ExceptionsHelper.missingDatafeedException(requiredMatches.unmatchedIdsString())); + delegate.onFailure(ExceptionsHelper.missingDatafeedException(requiredMatches.unmatchedIdsString())); return; } - listener.onResponse(datafeedIds); - }, listener::onFailure), + delegate.onResponse(datafeedIds); + }), client::search ); @@ -502,7 +502,7 @@ public void expandDatafeedConfigs( client.threadPool().getThreadContext(), ML_ORIGIN, searchRequest, - ActionListener.wrap(response -> { + listener.delegateFailureAndWrap((delegate, response) -> { List datafeeds = new ArrayList<>(); Set datafeedIds = new HashSet<>(); SearchHit[] hits = response.getHits().getHits(); @@ -521,12 +521,12 @@ public void expandDatafeedConfigs( requiredMatches.filterMatchedIds(datafeedIds); if (requiredMatches.hasUnmatchedIds()) { // some required datafeeds were not found - listener.onFailure(ExceptionsHelper.missingDatafeedException(requiredMatches.unmatchedIdsString())); + delegate.onFailure(ExceptionsHelper.missingDatafeedException(requiredMatches.unmatchedIdsString())); return; } - listener.onResponse(datafeeds); - }, listener::onFailure), + delegate.onResponse(datafeeds); + }), client::search ); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java index 223154737df3f..d370e8af52549 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java @@ -33,7 +33,6 @@ import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; -import org.elasticsearch.xpack.ml.dataframe.extractor.ExtractedFieldsDetector; import org.elasticsearch.xpack.ml.dataframe.extractor.ExtractedFieldsDetectorFactory; import org.elasticsearch.xpack.ml.dataframe.inference.InferenceRunner; import org.elasticsearch.xpack.ml.dataframe.persistence.DataFrameAnalyticsConfigProvider; @@ -171,9 +170,8 @@ public void execute(DataFrameAnalyticsTask task, ClusterState clusterState, Time }, task::setFailed); // Retrieve configuration - ActionListener statsIndexListener = ActionListener.wrap( - aBoolean -> configProvider.get(task.getParams().getId(), configListener), - configListener::onFailure + ActionListener statsIndexListener = configListener.delegateFailureAndWrap( + (l, aBoolean) -> configProvider.get(task.getParams().getId(), l) ); // Make sure the stats index and alias exist @@ -203,25 +201,22 @@ private void createStatsIndexAndUpdateMappingsIfNecessary( TimeValue masterNodeTimeout, ActionListener listener ) { - ActionListener createIndexListener = ActionListener.wrap( - aBoolean -> ElasticsearchMappings.addDocMappingIfMissing( - MlStatsIndex.writeAlias(), - MlStatsIndex::wrappedMapping, - clientToUse, - clusterState, - masterNodeTimeout, - listener, - MlStatsIndex.STATS_INDEX_MAPPINGS_VERSION - ), - listener::onFailure - ); - MlStatsIndex.createStatsIndexAndAliasIfNecessary( clientToUse, clusterState, expressionResolver, masterNodeTimeout, - createIndexListener + listener.delegateFailureAndWrap( + (l, aBoolean) -> ElasticsearchMappings.addDocMappingIfMissing( + MlStatsIndex.writeAlias(), + MlStatsIndex::wrappedMapping, + clientToUse, + clusterState, + masterNodeTimeout, + l, + MlStatsIndex.STATS_INDEX_MAPPINGS_VERSION + ) + ) ); } @@ -306,25 +301,25 @@ private void executeJobInMiddleOfReindexing(DataFrameAnalyticsTask task, DataFra private void buildInferenceStep(DataFrameAnalyticsTask task, DataFrameAnalyticsConfig config, ActionListener listener) { ParentTaskAssigningClient parentTaskClient = new ParentTaskAssigningClient(client, task.getParentTaskId()); - - ActionListener extractedFieldsDetectorListener = ActionListener.wrap(extractedFieldsDetector -> { - ExtractedFields extractedFields = extractedFieldsDetector.detect().v1(); - InferenceRunner inferenceRunner = new InferenceRunner( - settings, - parentTaskClient, - modelLoadingService, - resultsPersisterService, - task.getParentTaskId(), - config, - extractedFields, - task.getStatsHolder().getProgressTracker(), - task.getStatsHolder().getDataCountsTracker() - ); - InferenceStep inferenceStep = new InferenceStep(client, task, auditor, config, threadPool, inferenceRunner); - listener.onResponse(inferenceStep); - }, listener::onFailure); - - new ExtractedFieldsDetectorFactory(parentTaskClient).createFromDest(config, extractedFieldsDetectorListener); + new ExtractedFieldsDetectorFactory(parentTaskClient).createFromDest( + config, + listener.delegateFailureAndWrap((delegate, extractedFieldsDetector) -> { + ExtractedFields extractedFields = extractedFieldsDetector.detect().v1(); + InferenceRunner inferenceRunner = new InferenceRunner( + settings, + parentTaskClient, + modelLoadingService, + resultsPersisterService, + task.getParentTaskId(), + config, + extractedFields, + task.getStatsHolder().getProgressTracker(), + task.getStatsHolder().getDataCountsTracker() + ); + InferenceStep inferenceStep = new InferenceStep(client, task, auditor, config, threadPool, inferenceRunner); + delegate.onResponse(inferenceStep); + }) + ); } public boolean isNodeShuttingDown() { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DestinationIndex.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DestinationIndex.java index 81de8add4ae2e..8623f456b2035 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DestinationIndex.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DestinationIndex.java @@ -134,9 +134,11 @@ private static void prepareCreateIndexRequest( AtomicReference settingsHolder = new AtomicReference<>(); AtomicReference mappingsHolder = new AtomicReference<>(); - ActionListener fieldCapabilitiesListener = ActionListener.wrap(fieldCapabilitiesResponse -> { - listener.onResponse(createIndexRequest(clock, config, settingsHolder.get(), mappingsHolder.get(), fieldCapabilitiesResponse)); - }, listener::onFailure); + ActionListener fieldCapabilitiesListener = listener.delegateFailureAndWrap( + (l, fieldCapabilitiesResponse) -> l.onResponse( + createIndexRequest(clock, config, settingsHolder.get(), mappingsHolder.get(), fieldCapabilitiesResponse) + ) + ); ActionListener mappingsListener = ActionListener.wrap(mappings -> { mappingsHolder.set(mappings); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorFactory.java index b9d7e31a2cf73..09c3ae15c90a3 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorFactory.java @@ -147,22 +147,22 @@ public static void createForDestinationIndex( ActionListener listener ) { ExtractedFieldsDetectorFactory extractedFieldsDetectorFactory = new ExtractedFieldsDetectorFactory(client); - extractedFieldsDetectorFactory.createFromDest(config, ActionListener.wrap(extractedFieldsDetector -> { + extractedFieldsDetectorFactory.createFromDest(config, listener.delegateFailureAndWrap((delegate, extractedFieldsDetector) -> { ExtractedFields extractedFields = extractedFieldsDetector.detect().v1(); - - DataFrameDataExtractorFactory extractorFactory = new DataFrameDataExtractorFactory( - client, - config.getId(), - Collections.singletonList(config.getDest().getIndex()), - config.getSource().getParsedQuery(), - extractedFields, - config.getAnalysis().getRequiredFields(), - config.getHeaders(), - config.getAnalysis().supportsMissingValues(), - createTrainTestSplitterFactory(client, config, extractedFields), - Collections.emptyMap() + delegate.onResponse( + new DataFrameDataExtractorFactory( + client, + config.getId(), + Collections.singletonList(config.getDest().getIndex()), + config.getSource().getParsedQuery(), + extractedFields, + config.getAnalysis().getRequiredFields(), + config.getHeaders(), + config.getAnalysis().supportsMissingValues(), + createTrainTestSplitterFactory(client, config, extractedFields), + Collections.emptyMap() + ) ); - listener.onResponse(extractorFactory); - }, listener::onFailure)); + })); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetectorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetectorFactory.java index 49e25c95713ef..73f8e7bd520d4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetectorFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetectorFactory.java @@ -112,11 +112,6 @@ private void getCardinalitiesForFieldsWithConstraints( return; } - ActionListener searchListener = ActionListener.wrap( - searchResponse -> buildFieldCardinalitiesMap(config, searchResponse, listener), - listener::onFailure - ); - SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().size(0) .query(config.getSource().getParsedQuery()) .runtimeMappings(config.getSource().getRuntimeMappings()); @@ -147,7 +142,7 @@ private void getCardinalitiesForFieldsWithConstraints( client, TransportSearchAction.TYPE, searchRequest, - searchListener + listener.delegateFailureAndWrap((l, searchResponse) -> buildFieldCardinalitiesMap(config, searchResponse, l)) ); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/inference/InferenceRunner.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/inference/InferenceRunner.java index c9ce6e0d4e3c7..637b37853363f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/inference/InferenceRunner.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/inference/InferenceRunner.java @@ -153,7 +153,7 @@ private InferenceState restoreInferenceState() { config.getHeaders(), ClientHelper.ML_ORIGIN, client, - () -> client.search(searchRequest).actionGet() + client.search(searchRequest)::actionGet ); try { Max maxIncrementalIdAgg = searchResponse.getAggregations().get(DestinationIndex.INCREMENTAL_ID); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/persistence/DataFrameAnalyticsConfigProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/persistence/DataFrameAnalyticsConfigProvider.java index 5469c6a7a7d87..8c7d490f37787 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/persistence/DataFrameAnalyticsConfigProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/persistence/DataFrameAnalyticsConfigProvider.java @@ -103,19 +103,17 @@ public void put( TimeValue timeout, ActionListener listener ) { - - ActionListener deleteLeftOverDocsListener = ActionListener.wrap( - r -> index(prepareConfigForIndex(config, headers), null, listener), - listener::onFailure - ); - - ActionListener existsListener = ActionListener.wrap(exists -> { + ActionListener existsListener = listener.delegateFailureAndWrap((l, exists) -> { if (exists) { - listener.onFailure(ExceptionsHelper.dataFrameAnalyticsAlreadyExists(config.getId())); + l.onFailure(ExceptionsHelper.dataFrameAnalyticsAlreadyExists(config.getId())); } else { - deleteLeftOverDocs(config, timeout, deleteLeftOverDocsListener); + deleteLeftOverDocs( + config, + timeout, + l.delegateFailureAndWrap((ll, r) -> index(prepareConfigForIndex(config, headers), null, ll)) + ); } - }, listener::onFailure); + }); exists(config.getId(), existsListener); } @@ -194,10 +192,10 @@ public void update( DataFrameAnalyticsConfig updatedConfig = updatedConfigBuilder.build(); // Index the update config - index(updatedConfig, getResponse, ActionListener.wrap(indexedConfig -> { + index(updatedConfig, getResponse, listener.delegateFailureAndWrap((l, indexedConfig) -> { auditor.info(id, Messages.getMessage(Messages.DATA_FRAME_ANALYTICS_AUDIT_UPDATED, update.getUpdatedFields())); - listener.onResponse(indexedConfig); - }, listener::onFailure)); + l.onResponse(indexedConfig); + })); }, listener::onFailure)); } @@ -269,20 +267,26 @@ private void index( public void get(String id, ActionListener listener) { GetDataFrameAnalyticsAction.Request request = new GetDataFrameAnalyticsAction.Request(); request.setResourceId(id); - executeAsyncWithOrigin(client, ML_ORIGIN, GetDataFrameAnalyticsAction.INSTANCE, request, ActionListener.wrap(response -> { - List analytics = response.getResources().results(); - if (analytics.size() != 1) { - listener.onFailure( - ExceptionsHelper.badRequestException( - "Expected a single match for data frame analytics [{}] " + "but got [{}]", - id, - analytics.size() - ) - ); - } else { - listener.onResponse(analytics.get(0)); - } - }, listener::onFailure)); + executeAsyncWithOrigin( + client, + ML_ORIGIN, + GetDataFrameAnalyticsAction.INSTANCE, + request, + listener.delegateFailureAndWrap((delegate, response) -> { + List analytics = response.getResources().results(); + if (analytics.size() != 1) { + delegate.onFailure( + ExceptionsHelper.badRequestException( + "Expected a single match for data frame analytics [{}] " + "but got [{}]", + id, + analytics.size() + ) + ); + } else { + delegate.onResponse(analytics.get(0)); + } + }) + ); } /** @@ -298,7 +302,7 @@ public void getMultiple(String ids, boolean allowNoMatch, ActionListener listener.onResponse(response.getResources().results()), listener::onFailure) + listener.delegateFailureAndWrap((l, response) -> l.onResponse(response.getResources().results())) ); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/persistence/DataFrameAnalyticsDeleter.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/persistence/DataFrameAnalyticsDeleter.java index 843d9d74a1c7d..2a8b23728fbdb 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/persistence/DataFrameAnalyticsDeleter.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/persistence/DataFrameAnalyticsDeleter.java @@ -126,14 +126,13 @@ private void deleteConfig(String id, ActionListener listen } private void deleteState(DataFrameAnalyticsConfig config, TimeValue timeout, ActionListener listener) { - ActionListener deleteModelStateListener = ActionListener.wrap( - r -> executeDeleteByQuery( + ActionListener deleteModelStateListener = listener.delegateFailureAndWrap( + (l, r) -> executeDeleteByQuery( AnomalyDetectorsIndex.jobStateIndexPattern(), QueryBuilders.idsQuery().addIds(StoredProgress.documentId(config.getId())), timeout, - listener - ), - listener::onFailure + l + ) ); deleteModelState(config, timeout, 1, deleteModelStateListener); @@ -146,13 +145,18 @@ private void deleteModelState(DataFrameAnalyticsConfig config, TimeValue timeout } IdsQueryBuilder query = QueryBuilders.idsQuery().addIds(config.getAnalysis().getStateDocIdPrefix(config.getId()) + docNum); - executeDeleteByQuery(AnomalyDetectorsIndex.jobStateIndexPattern(), query, timeout, ActionListener.wrap(response -> { - if (response.getDeleted() > 0) { - deleteModelState(config, timeout, docNum + 1, listener); - return; - } - listener.onResponse(true); - }, listener::onFailure)); + executeDeleteByQuery( + AnomalyDetectorsIndex.jobStateIndexPattern(), + query, + timeout, + listener.delegateFailureAndWrap((l, response) -> { + if (response.getDeleted() > 0) { + deleteModelState(config, timeout, docNum + 1, l); + return; + } + l.onResponse(true); + }) + ); } private void deleteStats(String jobId, TimeValue timeout, ActionListener listener) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/AbstractDataFrameAnalyticsStep.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/AbstractDataFrameAnalyticsStep.java index 0c693ff2d34f4..112d164601546 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/AbstractDataFrameAnalyticsStep.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/AbstractDataFrameAnalyticsStep.java @@ -67,11 +67,11 @@ public final void execute(ActionListener listener) { listener.onResponse(new StepResponse(true)); return; } - doExecute(ActionListener.wrap(stepResponse -> { + doExecute(listener.delegateFailureAndWrap((l, stepResponse) -> { // We persist progress at the end of each step to ensure we do not have // to repeat the step in case the node goes down without getting a chance to persist progress. - task.persistProgress(() -> listener.onResponse(stepResponse)); - }, listener::onFailure)); + task.persistProgress(() -> l.onResponse(stepResponse)); + })); } protected abstract void doExecute(ActionListener listener); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/AnalysisStep.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/AnalysisStep.java index 9e56387ed773e..ec914546c7de5 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/AnalysisStep.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/AnalysisStep.java @@ -58,17 +58,16 @@ protected void doExecute(ActionListener listener) { final ParentTaskAssigningClient parentTaskClient = parentTaskClient(); // Update state to ANALYZING and start process - ActionListener dataExtractorFactoryListener = ActionListener.wrap( - dataExtractorFactory -> processManager.runJob(task, config, dataExtractorFactory, listener), - listener::onFailure + ActionListener dataExtractorFactoryListener = listener.delegateFailureAndWrap( + (l, dataExtractorFactory) -> processManager.runJob(task, config, dataExtractorFactory, l) ); - ActionListener refreshListener = ActionListener.wrap(refreshResponse -> { + ActionListener refreshListener = dataExtractorFactoryListener.delegateFailureAndWrap((l, refreshResponse) -> { // TODO This could fail with errors. In that case we get stuck with the copied index. // We could delete the index in case of failure or we could try building the factory before reindexing // to catch the error early on. - DataFrameDataExtractorFactory.createForDestinationIndex(parentTaskClient, config, dataExtractorFactoryListener); - }, dataExtractorFactoryListener::onFailure); + DataFrameDataExtractorFactory.createForDestinationIndex(parentTaskClient, config, l); + }); // First we need to refresh the dest index to ensure data is searchable in case the job // was stopped after reindexing was complete but before the index was refreshed. diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/FinalStep.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/FinalStep.java index dbf1f3e7be3d9..258c66ad5cb0f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/FinalStep.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/FinalStep.java @@ -59,18 +59,13 @@ public Name name() { @Override protected void doExecute(ActionListener listener) { - - ActionListener refreshListener = ActionListener.wrap( - refreshResponse -> listener.onResponse(new StepResponse(false)), - listener::onFailure - ); - - ActionListener dataCountsIndexedListener = ActionListener.wrap( - indexResponse -> refreshIndices(refreshListener), - listener::onFailure + indexDataCounts( + listener.delegateFailureAndWrap( + (l, indexResponse) -> refreshIndices( + l.delegateFailureAndWrap((ll, refreshResponse) -> ll.onResponse(new StepResponse(false))) + ) + ) ); - - indexDataCounts(dataCountsIndexedListener); } private void indexDataCounts(ActionListener listener) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/InferenceStep.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/InferenceStep.java index ad005e6d9ae6c..37ad1a5cb8f56 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/InferenceStep.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/InferenceStep.java @@ -13,7 +13,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.TransportSearchAction; -import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.QueryBuilders; @@ -70,27 +69,21 @@ protected void doExecute(ActionListener listener) { return; } - ActionListener modelIdListener = ActionListener.wrap(modelId -> runInference(modelId, listener), listener::onFailure); - - ActionListener testDocsExistListener = ActionListener.wrap(testDocsExist -> { - if (testDocsExist) { - getModelId(modelIdListener); - } else { - // no need to run inference at all so let us skip - // loading the model in memory. - LOGGER.debug(() -> "[" + config.getId() + "] Inference step completed immediately as there are no test docs"); - task.getStatsHolder().getProgressTracker().updateInferenceProgress(100); - listener.onResponse(new StepResponse(isTaskStopping())); - return; - } - }, listener::onFailure); - - ActionListener refreshDestListener = ActionListener.wrap( - refreshResponse -> searchIfTestDocsExist(testDocsExistListener), - listener::onFailure + refreshDestAsync( + listener.delegateFailureAndWrap( + (delegate, refreshResponse) -> searchIfTestDocsExist(delegate.delegateFailureAndWrap((delegate2, testDocsExist) -> { + if (testDocsExist) { + getModelId(delegate2.delegateFailureAndWrap((l, modelId) -> runInference(modelId, l))); + } else { + // no need to run inference at all so let us skip + // loading the model in memory. + LOGGER.debug(() -> "[" + config.getId() + "] Inference step completed immediately as there are no test docs"); + task.getStatsHolder().getProgressTracker().updateInferenceProgress(100); + delegate2.onResponse(new StepResponse(isTaskStopping())); + } + })) + ) ); - - refreshDestAsync(refreshDestListener); } private void runInference(String modelId, ActionListener listener) { @@ -124,10 +117,7 @@ private void searchIfTestDocsExist(ActionListener listener) { ML_ORIGIN, TransportSearchAction.TYPE, searchRequest, - ActionListener.wrap( - searchResponse -> listener.onResponse(searchResponse.getHits().getTotalHits().value > 0), - listener::onFailure - ) + listener.delegateFailureAndWrap((l, searchResponse) -> l.onResponse(searchResponse.getHits().getTotalHits().value > 0)) ); } @@ -142,14 +132,20 @@ private void getModelId(ActionListener listener) { SearchRequest searchRequest = new SearchRequest(InferenceIndexConstants.INDEX_PATTERN); searchRequest.source(searchSourceBuilder); - executeAsyncWithOrigin(client, ML_ORIGIN, TransportSearchAction.TYPE, searchRequest, ActionListener.wrap(searchResponse -> { - SearchHit[] hits = searchResponse.getHits().getHits(); - if (hits.length == 0) { - listener.onFailure(new ResourceNotFoundException("No model could be found to perform inference")); - } else { - listener.onResponse(hits[0].getId()); - } - }, listener::onFailure)); + executeAsyncWithOrigin( + client, + ML_ORIGIN, + TransportSearchAction.TYPE, + searchRequest, + listener.delegateFailureAndWrap((l, searchResponse) -> { + SearchHit[] hits = searchResponse.getHits().getHits(); + if (hits.length == 0) { + l.onFailure(new ResourceNotFoundException("No model could be found to perform inference")); + } else { + l.onResponse(hits[0].getId()); + } + }) + ); } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ingest/InferenceProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ingest/InferenceProcessor.java index 6b28a9aef9f48..32c85eb4e335e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ingest/InferenceProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ingest/InferenceProcessor.java @@ -368,7 +368,7 @@ public static final class Factory implements Processor.Factory, Consumer config ) { + final var currentInferenceProcessors = InferenceProcessorInfoExtractor.countInferenceProcessors(clusterState); if (this.maxIngestProcessors <= currentInferenceProcessors) { throw new ElasticsearchStatusException( "Max number of inference processors reached, total inference processors [{}]. " diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java index 5994c61f46297..5869f353c80c9 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java @@ -647,7 +647,7 @@ private void handleLoadSuccess( // Also, if the consumer is a search consumer, we should always cache it if (referencedModels.contains(modelId) || Sets.haveNonEmptyIntersection(modelIdToModelAliases.getOrDefault(modelId, new HashSet<>()), referencedModels) - || consumer.equals(Consumer.SEARCH_AGGS)) { + || consumer.isAnyOf(Consumer.SEARCH_AGGS, Consumer.SEARCH_RESCORER)) { try { // The local model may already be in cache. If it is, we don't bother adding it to cache. // If it isn't, we flip an `isLoaded` flag, and increment the model counter to make sure if it is evicted @@ -810,7 +810,8 @@ public void clusterChanged(ClusterChangedEvent event) { ); if (oldModelAliasesNotReferenced && newModelAliasesNotReferenced && modelIsNotReferenced) { ModelAndConsumer modelAndConsumer = localModelCache.get(modelId); - if (modelAndConsumer != null && modelAndConsumer.consumers.contains(Consumer.SEARCH_AGGS) == false) { + if (modelAndConsumer != null + && modelAndConsumer.consumers.stream().noneMatch(c -> c.isAnyOf(Consumer.SEARCH_AGGS, Consumer.SEARCH_RESCORER))) { logger.trace("[{} ({})] invalidated from cache", modelId, modelAliasOrId); localModelCache.invalidate(modelId); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java index 7532ae4317830..9887152c6f311 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java @@ -142,14 +142,8 @@ public void jobExists(String jobId, @Nullable TaskId parentTaskId, ActionListene * a ResourceNotFoundException is returned */ public void getJob(String jobId, ActionListener jobListener) { - jobConfigProvider.getJob( - jobId, - null, - ActionListener.wrap( - r -> jobListener.onResponse(r.build()), // TODO JIndex we shouldn't be building the job here - jobListener::onFailure - ) - ); + // TODO JIndex we shouldn't be building the job here + jobConfigProvider.getJob(jobId, null, jobListener.delegateFailureAndWrap((l, r) -> l.onResponse(r.build()))); } /** @@ -183,15 +177,14 @@ public void expandJobs(String expression, boolean allowNoMatch, ActionListener jobsListener.onResponse( + jobsListener.delegateFailureAndWrap( + (l, jobBuilders) -> l.onResponse( new QueryPage<>( jobBuilders.stream().map(Job.Builder::build).collect(Collectors.toList()), jobBuilders.size(), Job.RESULTS_FIELD ) - ), - jobsListener::onFailure + ) ) ); } @@ -253,10 +246,10 @@ public void putJob( @Override public void onResponse(Boolean mappingsUpdated) { - jobConfigProvider.putJob(job, ActionListener.wrap(response -> { + jobConfigProvider.putJob(job, actionListener.delegateFailureAndWrap((l, response) -> { auditor.info(job.getId(), Messages.getMessage(Messages.JOB_AUDIT_CREATED)); - actionListener.onResponse(new PutJobAction.Response(job)); - }, actionListener::onFailure)); + l.onResponse(new PutJobAction.Response(job)); + })); } @Override @@ -275,17 +268,16 @@ public void onFailure(Exception e) { } }; - ActionListener addDocMappingsListener = ActionListener.wrap( - indicesCreated -> ElasticsearchMappings.addDocMappingIfMissing( + ActionListener addDocMappingsListener = putJobListener.delegateFailureAndWrap( + (l, indicesCreated) -> ElasticsearchMappings.addDocMappingIfMissing( MlConfigIndex.indexName(), MlConfigIndex::mapping, client, state, request.masterNodeTimeout(), - putJobListener, + l, MlConfigIndex.CONFIG_INDEX_MAPPINGS_VERSION - ), - putJobListener::onFailure + ) ); ActionListener> checkForLeftOverDocs = ActionListener.wrap(matchedIds -> { @@ -634,14 +626,15 @@ public void updateProcessOnCalendarChanged(List calendarJobIds, ActionLi // calendarJobIds may be a group or job jobConfigProvider.expandGroupIds( calendarJobIds, - ActionListener.wrap(expandedIds -> threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME).execute(() -> { - // Merge the expanded group members with the request Ids. - // Ids that aren't jobs will be filtered by isJobOpen() - expandedIds.addAll(calendarJobIds); - - openJobIds.retainAll(expandedIds); - submitJobEventUpdate(openJobIds, updateListener); - }), updateListener::onFailure) + updateListener.delegateFailureAndWrap( + (delegate, expandedIds) -> threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME).execute(() -> { + // Merge the expanded group members with the request Ids. + // Ids that aren't jobs will be filtered by isJobOpen() + expandedIds.addAll(calendarJobIds); + openJobIds.retainAll(expandedIds); + submitJobEventUpdate(openJobIds, delegate); + }) + ) ); } @@ -678,12 +671,13 @@ public void revertSnapshot( jobResultsPersister.persistQuantiles( modelSnapshot.getQuantiles(), WriteRequest.RefreshPolicy.IMMEDIATE, - ActionListener.wrap(quantilesResponse -> { - // The quantiles can be large, and totally dominate the output - - // it's clearer to remove them as they are not necessary for the revert op - ModelSnapshot snapshotWithoutQuantiles = new ModelSnapshot.Builder(modelSnapshot).setQuantiles(null).build(); - actionListener.onResponse(new RevertModelSnapshotAction.Response(snapshotWithoutQuantiles)); - }, actionListener::onFailure) + // The quantiles can be large, and totally dominate the output - + // it's clearer to remove them as they are not necessary for the revert op + actionListener.delegateFailureAndWrap( + (l, quantilesResponse) -> l.onResponse( + new RevertModelSnapshotAction.Response(new ModelSnapshot.Builder(modelSnapshot).setQuantiles(null).build()) + ) + ) ); }; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemover.java index aa82c7a261b96..bd1e47e3cb160 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemover.java @@ -70,19 +70,19 @@ private void removeData( return; } - calcCutoffEpochMs(job.getId(), retentionDays, ActionListener.wrap(response -> { + calcCutoffEpochMs(job.getId(), retentionDays, listener.delegateFailureAndWrap((delegate, response) -> { if (response == null) { - removeData(jobIterator, requestsPerSecond, listener, isTimedOutSupplier); + removeData(jobIterator, requestsPerSecond, delegate, isTimedOutSupplier); } else { removeDataBefore( job, requestsPerSecond, response.latestTimeMs, response.cutoffEpochMs, - ActionListener.wrap(r -> removeData(jobIterator, requestsPerSecond, listener, isTimedOutSupplier), listener::onFailure) + delegate.delegateFailureAndWrap((l, r) -> removeData(jobIterator, requestsPerSecond, l, isTimedOutSupplier)) ); } - }, listener::onFailure)); + })); } abstract void calcCutoffEpochMs(String jobId, long retentionDays, ActionListener listener); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/EmptyStateIndexRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/EmptyStateIndexRemover.java index 0a5612f8e0ccc..1c8c100939dc7 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/EmptyStateIndexRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/EmptyStateIndexRemover.java @@ -42,20 +42,20 @@ public void remove(float requestsPerSec, ActionListener listener, Boole listener.onResponse(false); return; } - getEmptyStateIndices(ActionListener.wrap(emptyStateIndices -> { + getEmptyStateIndices(listener.delegateFailureAndWrap((delegate, emptyStateIndices) -> { if (emptyStateIndices.isEmpty()) { - listener.onResponse(true); + delegate.onResponse(true); return; } - getCurrentStateIndices(ActionListener.wrap(currentStateIndices -> { + getCurrentStateIndices(delegate.delegateFailureAndWrap((l, currentStateIndices) -> { Set stateIndicesToRemove = Sets.difference(emptyStateIndices, currentStateIndices); if (stateIndicesToRemove.isEmpty()) { - listener.onResponse(true); + l.onResponse(true); return; } - executeDeleteEmptyStateIndices(stateIndicesToRemove, listener); - }, listener::onFailure)); - }, listener::onFailure)); + executeDeleteEmptyStateIndices(stateIndicesToRemove, l); + })); + })); } catch (Exception e) { listener.onFailure(e); } @@ -64,15 +64,21 @@ public void remove(float requestsPerSec, ActionListener listener, Boole private void getEmptyStateIndices(ActionListener> listener) { IndicesStatsRequest indicesStatsRequest = new IndicesStatsRequest().indices(AnomalyDetectorsIndex.jobStateIndexPattern()); indicesStatsRequest.setParentTask(parentTaskId); - client.admin().indices().stats(indicesStatsRequest, ActionListener.wrap(indicesStatsResponse -> { - Set emptyStateIndices = indicesStatsResponse.getIndices() - .values() - .stream() - .filter(stats -> stats.getTotal().getDocs().getCount() == 0) - .map(IndexStats::getIndex) - .collect(toSet()); - listener.onResponse(emptyStateIndices); - }, listener::onFailure)); + client.admin() + .indices() + .stats( + indicesStatsRequest, + listener.delegateFailureAndWrap( + (l, indicesStatsResponse) -> l.onResponse( + indicesStatsResponse.getIndices() + .values() + .stream() + .filter(stats -> stats.getTotal().getDocs().getCount() == 0) + .map(IndexStats::getIndex) + .collect(toSet()) + ) + ) + ); } private void getCurrentStateIndices(ActionListener> listener) { @@ -82,7 +88,7 @@ private void getCurrentStateIndices(ActionListener> listener) { .indices() .getIndex( getIndexRequest, - ActionListener.wrap(getIndexResponse -> listener.onResponse(Set.of(getIndexResponse.getIndices())), listener::onFailure) + listener.delegateFailureAndWrap((l, getIndexResponse) -> l.onResponse(Set.of(getIndexResponse.getIndices()))) ); } @@ -93,7 +99,7 @@ private void executeDeleteEmptyStateIndices(Set emptyStateIndices, Actio .indices() .delete( deleteIndexRequest, - ActionListener.wrap(deleteResponse -> listener.onResponse(deleteResponse.isAcknowledged()), listener::onFailure) + listener.delegateFailureAndWrap((l, deleteResponse) -> l.onResponse(deleteResponse.isAcknowledged())) ); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemover.java index 507e9dac6282d..27bd3c926d944 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemover.java @@ -249,7 +249,7 @@ private void deleteModelSnapshots(List modelSnapshots, String job return; } JobDataDeleter deleter = new JobDataDeleter(client, jobId); - deleter.deleteModelSnapshots(modelSnapshots, ActionListener.wrap(bulkResponse -> { + deleter.deleteModelSnapshots(modelSnapshots, listener.delegateFailureAndWrap((l, bulkResponse) -> { auditor.info(jobId, Messages.getMessage(Messages.JOB_AUDIT_SNAPSHOTS_DELETED, modelSnapshots.size())); LOGGER.debug( () -> format( @@ -259,8 +259,8 @@ private void deleteModelSnapshots(List modelSnapshots, String job modelSnapshots.stream().map(ModelSnapshot::getDescription).collect(toList()) ) ); - listener.onResponse(true); - }, listener::onFailure)); + l.onResponse(true); + })); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java index 654ce87fc5e30..35e16b9fa8b88 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java @@ -195,11 +195,11 @@ static void latestBucketTime(OriginSettingClient client, TaskId parentTaskId, St searchRequest.indicesOptions(MlIndicesUtils.addIgnoreUnavailable(SearchRequest.DEFAULT_INDICES_OPTIONS)); searchRequest.setParentTask(parentTaskId); - client.search(searchRequest, ActionListener.wrap(response -> { + client.search(searchRequest, listener.delegateFailureAndWrap((delegate, response) -> { SearchHit[] hits = response.getHits().getHits(); if (hits.length == 0) { // no buckets found - listener.onResponse(null); + delegate.onResponse(null); } else { try ( @@ -210,12 +210,12 @@ static void latestBucketTime(OriginSettingClient client, TaskId parentTaskId, St ) ) { Bucket bucket = Bucket.LENIENT_PARSER.apply(parser, null); - listener.onResponse(bucket.getTimestamp().getTime()); + delegate.onResponse(bucket.getTimestamp().getTime()); } catch (IOException e) { - listener.onFailure(new ElasticsearchParseException("failed to parse bucket", e)); + delegate.onFailure(new ElasticsearchParseException("failed to parse bucket", e)); } } - }, listener::onFailure)); + })); } private void auditResultsWereDeleted(String jobId, long cutoffEpochMs) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutor.java index 09cd6225cf0ca..c50e744bde96b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutor.java @@ -398,18 +398,18 @@ private void stopAssociatedDatafeedForFailedJob(String jobId) { } private void getRunningDatafeed(String jobId, ActionListener listener) { - ActionListener> datafeedListener = ActionListener.wrap(datafeeds -> { + ActionListener> datafeedListener = listener.delegateFailureAndWrap((delegate, datafeeds) -> { assert datafeeds.size() <= 1; if (datafeeds.isEmpty()) { - listener.onResponse(null); + delegate.onResponse(null); return; } String datafeedId = datafeeds.iterator().next(); PersistentTasksCustomMetadata tasks = clusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); PersistentTasksCustomMetadata.PersistentTask datafeedTask = MlTasks.getDatafeedTask(datafeedId, tasks); - listener.onResponse(datafeedTask != null ? datafeedId : null); - }, listener::onFailure); + delegate.onResponse(datafeedTask != null ? datafeedId : null); + }); datafeedConfigProvider.findDatafeedIdsForJobIds(Collections.singleton(jobId), datafeedListener); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/queries/TextExpansionQueryBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/queries/TextExpansionQueryBuilder.java index 7d5197b9e9ba0..f6fa7ca9005c5 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/queries/TextExpansionQueryBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/queries/TextExpansionQueryBuilder.java @@ -32,8 +32,10 @@ import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextExpansionConfigUpdate; import java.io.IOException; +import java.util.Arrays; import java.util.List; import java.util.Objects; +import java.util.stream.Collectors; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; @@ -51,6 +53,29 @@ public class TextExpansionQueryBuilder extends AbstractQueryBuilder weightedTokensSupplier; private final TokenPruningConfig tokenPruningConfig; + public enum AllowedFieldType { + RANK_FEATURES("rank_features"), + SPARSE_VECTOR("sparse_vector"); + + private final String typeName; + + AllowedFieldType(String typeName) { + this.typeName = typeName; + } + + public String getTypeName() { + return typeName; + } + + public static boolean isFieldTypeAllowed(String typeName) { + return Arrays.stream(values()).anyMatch(value -> value.typeName.equals(typeName)); + } + + public static String getAllowedFieldTypesAsString() { + return Arrays.stream(values()).map(value -> value.typeName).collect(Collectors.joining(", ")); + } + } + public TextExpansionQueryBuilder(String fieldName, String modelText, String modelId) { this(fieldName, modelText, modelId, null); } @@ -139,8 +164,7 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep } @Override - protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws IOException { - + protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) { if (weightedTokensSupplier != null) { if (weightedTokensSupplier.get() == null) { return this; @@ -159,8 +183,8 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws inferRequest.setPrefixType(TrainedModelPrefixStrings.PrefixType.SEARCH); SetOnce textExpansionResultsSupplier = new SetOnce<>(); - queryRewriteContext.registerAsyncAction((client, listener) -> { - executeAsyncWithOrigin( + queryRewriteContext.registerAsyncAction( + (client, listener) -> executeAsyncWithOrigin( client, ML_ORIGIN, CoordinatedInferenceAction.INSTANCE, @@ -191,8 +215,8 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws ); } }, listener::onFailure) - ); - }); + ) + ); return new TextExpansionQueryBuilder(this, textExpansionResultsSupplier); } @@ -208,13 +232,16 @@ private QueryBuilder weightedTokensToQuery(String fieldName, TextExpansionResult weightedTokensQueryBuilder.boost(boost); return weightedTokensQueryBuilder; } + // Note: Weighted tokens queries were introduced in 8.13.0. To support mixed version clusters prior to 8.13.0, + // if no token pruning configuration is specified we fall back to a boolean query. + // TODO this should be updated to always use a WeightedTokensQueryBuilder once it's in all supported versions. var boolQuery = QueryBuilders.boolQuery(); for (var weightedToken : textExpansionResults.getWeightedTokens()) { boolQuery.should(QueryBuilders.termQuery(fieldName, weightedToken.token()).boost(weightedToken.weight())); } boolQuery.minimumShouldMatch(1); - boolQuery.boost(this.boost); - boolQuery.queryName(this.queryName); + boolQuery.boost(boost); + boolQuery.queryName(queryName); return boolQuery; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/queries/WeightedTokensQueryBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/queries/WeightedTokensQueryBuilder.java index a09bcadaacfc0..51139881fc2e4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/queries/WeightedTokensQueryBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/queries/WeightedTokensQueryBuilder.java @@ -34,6 +34,7 @@ import java.util.List; import java.util.Objects; +import static org.elasticsearch.xpack.ml.queries.TextExpansionQueryBuilder.AllowedFieldType; import static org.elasticsearch.xpack.ml.queries.TextExpansionQueryBuilder.PRUNING_CONFIG; public class WeightedTokensQueryBuilder extends AbstractQueryBuilder { @@ -152,27 +153,53 @@ protected Query doToQuery(SearchExecutionContext context) throws IOException { if (ft == null) { return new MatchNoDocsQuery("The \"" + getName() + "\" query is against a field that does not exist"); } + + final String fieldTypeName = ft.typeName(); + if (AllowedFieldType.isFieldTypeAllowed(fieldTypeName) == false) { + throw new ElasticsearchParseException( + "[" + + fieldTypeName + + "]" + + " is not an appropriate field type for this query. " + + "Allowed field types are [" + + AllowedFieldType.getAllowedFieldTypesAsString() + + "]." + ); + } + + return (this.tokenPruningConfig == null) + ? queryBuilderWithAllTokens(tokens, ft, context) + : queryBuilderWithPrunedTokens(tokens, ft, context); + } + + private Query queryBuilderWithAllTokens(List tokens, MappedFieldType ft, SearchExecutionContext context) { var qb = new BooleanQuery.Builder(); - int fieldDocCount = context.getIndexReader().getDocCount(fieldName); - float bestWeight = 0f; - for (var t : tokens) { - bestWeight = Math.max(t.weight(), bestWeight); + + for (var token : tokens) { + qb.add(new BoostQuery(ft.termQuery(token.token(), context), token.weight()), BooleanClause.Occur.SHOULD); } + return qb.setMinimumNumberShouldMatch(1).build(); + } + + private Query queryBuilderWithPrunedTokens(List tokens, MappedFieldType ft, SearchExecutionContext context) + throws IOException { + var qb = new BooleanQuery.Builder(); + int fieldDocCount = context.getIndexReader().getDocCount(fieldName); + float bestWeight = tokens.stream().map(WeightedToken::weight).reduce(0f, Math::max); float averageTokenFreqRatio = getAverageTokenFreqRatio(context.getIndexReader(), fieldDocCount); if (averageTokenFreqRatio == 0) { return new MatchNoDocsQuery("The \"" + getName() + "\" query is against an empty field"); } + for (var token : tokens) { boolean keep = shouldKeepToken(context.getIndexReader(), token, fieldDocCount, averageTokenFreqRatio, bestWeight); - if (this.tokenPruningConfig != null) { - keep ^= this.tokenPruningConfig.isOnlyScorePrunedTokens(); - } + keep ^= this.tokenPruningConfig.isOnlyScorePrunedTokens(); if (keep) { qb.add(new BoostQuery(ft.termQuery(token.token(), context), token.weight()), BooleanClause.Occur.SHOULD); } } - qb.setMinimumNumberShouldMatch(1); - return qb.build(); + + return qb.setMinimumNumberShouldMatch(1).build(); } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/VoidChainTaskExecutor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/VoidChainTaskExecutor.java index d5d7767a7e7a1..f7c46222d4471 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/VoidChainTaskExecutor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/VoidChainTaskExecutor.java @@ -6,6 +6,8 @@ */ package org.elasticsearch.xpack.ml.utils; +import org.elasticsearch.core.Predicates; + import java.util.concurrent.ExecutorService; import java.util.function.Predicate; @@ -16,7 +18,7 @@ public class VoidChainTaskExecutor extends TypedChainTaskExecutor { public VoidChainTaskExecutor(ExecutorService executorService, boolean shortCircuit) { - this(executorService, (a) -> true, (e) -> shortCircuit); + this(executorService, Predicates.always(), shortCircuit ? Predicates.always() : Predicates.never()); } VoidChainTaskExecutor( diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceServiceTests.java index 618733ccccb06..f79dd645bfea5 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceServiceTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.xpack.core.ml.action.DeleteExpiredDataAction; import org.elasticsearch.xpack.core.ml.action.DeleteJobAction; import org.elasticsearch.xpack.core.ml.action.GetJobsAction; +import org.elasticsearch.xpack.core.ml.action.ResetJobAction; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.junit.After; import org.junit.Before; @@ -38,8 +39,10 @@ import org.mockito.stubbing.Answer; import java.util.Collections; +import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.same; @@ -79,29 +82,21 @@ public void testScheduledTriggering() throws InterruptedException { doAnswer(withResponse(new GetJobsAction.Response(new QueryPage<>(Collections.emptyList(), 0, new ParseField(""))))).when(client) .execute(same(GetJobsAction.INSTANCE), any(), any()); - int triggerCount = randomIntBetween(2, 4); - CountDownLatch latch = new CountDownLatch(triggerCount); - try (MlDailyMaintenanceService service = createService(latch, client)) { - service.start(); - latch.await(5, TimeUnit.SECONDS); - } + int triggerCount = randomIntBetween(1, 3); + executeMaintenanceTriggers(triggerCount); - verify(client, times(triggerCount - 1)).execute(same(DeleteExpiredDataAction.INSTANCE), any(), any()); - verify(client, times(triggerCount - 1)).execute(same(GetJobsAction.INSTANCE), any(), any()); - verify(mlAssignmentNotifier, times(triggerCount - 1)).auditUnassignedMlTasks(any(), any()); + verify(client, times(triggerCount)).execute(same(DeleteExpiredDataAction.INSTANCE), any(), any()); + verify(client, times(2 * triggerCount)).execute(same(GetJobsAction.INSTANCE), any(), any()); + verify(mlAssignmentNotifier, times(triggerCount)).auditUnassignedMlTasks(any(), any()); } public void testScheduledTriggeringWhileUpgradeModeIsEnabled() throws InterruptedException { when(clusterService.state()).thenReturn(createClusterState(true)); - int triggerCount = randomIntBetween(2, 4); - CountDownLatch latch = new CountDownLatch(triggerCount); - try (MlDailyMaintenanceService service = createService(latch, client)) { - service.start(); - latch.await(5, TimeUnit.SECONDS); - } + int triggerCount = randomIntBetween(1, 3); + executeMaintenanceTriggers(triggerCount); - verify(clusterService, times(triggerCount - 1)).state(); + verify(clusterService, times(triggerCount)).state(); verifyNoMoreInteractions(client, clusterService, mlAssignmentNotifier); } @@ -143,11 +138,7 @@ public void testBothTasksAreTriggered_BothTasksFail() throws InterruptedExceptio public void testNoAnomalyDetectionTasksWhenDisabled() throws InterruptedException { when(clusterService.state()).thenReturn(createClusterState(false)); - CountDownLatch latch = new CountDownLatch(2); - try (MlDailyMaintenanceService service = createService(latch, client, false, randomBoolean(), randomBoolean())) { - service.start(); - latch.await(5, TimeUnit.SECONDS); - } + executeMaintenanceTriggers(1, false, randomBoolean(), randomBoolean()); verify(client, never()).threadPool(); verify(client, never()).execute(same(DeleteExpiredDataAction.INSTANCE), any(), any()); @@ -160,15 +151,11 @@ private void assertThatBothTasksAreTriggered(Answer deleteExpiredDataAnswer, doAnswer(deleteExpiredDataAnswer).when(client).execute(same(DeleteExpiredDataAction.INSTANCE), any(), any()); doAnswer(getJobsAnswer).when(client).execute(same(GetJobsAction.INSTANCE), any(), any()); - CountDownLatch latch = new CountDownLatch(2); - try (MlDailyMaintenanceService service = createService(latch, client)) { - service.start(); - latch.await(5, TimeUnit.SECONDS); - } + executeMaintenanceTriggers(1); - verify(client, Mockito.atLeast(2)).threadPool(); - verify(client, Mockito.atLeast(1)).execute(same(DeleteExpiredDataAction.INSTANCE), any(), any()); - verify(client, Mockito.atLeast(1)).execute(same(GetJobsAction.INSTANCE), any(), any()); + verify(client, times(3)).threadPool(); + verify(client, times(1)).execute(same(DeleteExpiredDataAction.INSTANCE), any(), any()); + verify(client, times(2)).execute(same(GetJobsAction.INSTANCE), any(), any()); verify(mlAssignmentNotifier, Mockito.atLeast(1)).auditUnassignedMlTasks(any(), any()); } @@ -202,14 +189,10 @@ public void testJobInDeletingStateAlreadyHasDeletionTask() throws InterruptedExc .when(client) .execute(same(TransportListTasksAction.TYPE), any(), any()); - CountDownLatch latch = new CountDownLatch(2); - try (MlDailyMaintenanceService service = createService(latch, client)) { - service.start(); - latch.await(5, TimeUnit.SECONDS); - } + executeMaintenanceTriggers(1); - verify(client, times(3)).threadPool(); - verify(client).execute(same(GetJobsAction.INSTANCE), any(), any()); + verify(client, times(4)).threadPool(); + verify(client, times(2)).execute(same(GetJobsAction.INSTANCE), any(), any()); verify(client).execute(same(TransportListTasksAction.TYPE), any(), any()); verify(client).execute(same(DeleteExpiredDataAction.INSTANCE), any(), any()); verify(mlAssignmentNotifier).auditUnassignedMlTasks(any(), any()); @@ -240,14 +223,10 @@ private void testJobInDeletingStateDoesNotHaveDeletionTask(boolean deleted) thro ).execute(same(TransportListTasksAction.TYPE), any(), any()); doAnswer(withResponse(AcknowledgedResponse.of(deleted))).when(client).execute(same(DeleteJobAction.INSTANCE), any(), any()); - CountDownLatch latch = new CountDownLatch(2); - try (MlDailyMaintenanceService service = createService(latch, client)) { - service.start(); - latch.await(5, TimeUnit.SECONDS); - } + executeMaintenanceTriggers(1); - verify(client, times(4)).threadPool(); - verify(client).execute(same(GetJobsAction.INSTANCE), any(), any()); + verify(client, times(5)).threadPool(); + verify(client, times(2)).execute(same(GetJobsAction.INSTANCE), any(), any()); verify(client).execute(same(TransportListTasksAction.TYPE), any(), any()); verify(client).execute(same(DeleteJobAction.INSTANCE), any(), any()); verify(client).execute(same(DeleteExpiredDataAction.INSTANCE), any(), any()); @@ -255,29 +234,98 @@ private void testJobInDeletingStateDoesNotHaveDeletionTask(boolean deleted) thro verifyNoMoreInteractions(client, mlAssignmentNotifier); } - private MlDailyMaintenanceService createService(CountDownLatch latch, Client client) { - return createService(latch, client, true, true, true); + public void testJobInResettingState_doesNotHaveResetTask() throws InterruptedException { + testJobInResettingState(false); + } + + public void testJobInResettingState_hasResetTask() throws InterruptedException { + testJobInResettingState(true); + } + + private void testJobInResettingState(boolean hasResetTask) throws InterruptedException { + String jobId = "job-in-state-resetting"; + when(clusterService.state()).thenReturn(createClusterState(false)); + doAnswer(withResponse(new DeleteExpiredDataAction.Response(true))).when(client) + .execute(same(DeleteExpiredDataAction.INSTANCE), any(), any()); + Job job = mock(Job.class); + when(job.getId()).thenReturn(jobId); + when(job.isDeleting()).thenReturn(false); + when(job.isResetting()).thenReturn(true); + doAnswer(withResponse(new GetJobsAction.Response(new QueryPage<>(List.of(job), 1, new ParseField(""))))).when(client) + .execute(same(GetJobsAction.INSTANCE), any(), any()); + List tasks = hasResetTask + ? List.of( + new TaskInfo( + new TaskId("test", 123), + "test", + "test", + ResetJobAction.NAME, + "job-" + jobId, + null, + 0, + 0, + true, + false, + new TaskId("test", 456), + Collections.emptyMap() + ) + ) + : List.of(); + doAnswer(withResponse(new ListTasksResponse(tasks, List.of(), List.of()))).when(client) + .execute(same(TransportListTasksAction.TYPE), any(), any()); + doAnswer(withResponse(AcknowledgedResponse.of(true))).when(client).execute(same(ResetJobAction.INSTANCE), any(), any()); + + executeMaintenanceTriggers(1); + + verify(client, times(hasResetTask ? 4 : 5)).threadPool(); + verify(client, times(2)).execute(same(GetJobsAction.INSTANCE), any(), any()); + verify(client).execute(same(TransportListTasksAction.TYPE), any(), any()); + if (hasResetTask == false) { + verify(client).execute(same(ResetJobAction.INSTANCE), any(), any()); + } + verify(client).execute(same(DeleteExpiredDataAction.INSTANCE), any(), any()); + verify(mlAssignmentNotifier).auditUnassignedMlTasks(any(), any()); + verifyNoMoreInteractions(client, mlAssignmentNotifier); + } + + private void executeMaintenanceTriggers(int triggerCount) throws InterruptedException { + executeMaintenanceTriggers(triggerCount, true, true, true); } - private MlDailyMaintenanceService createService( - CountDownLatch latch, - Client client, + private void executeMaintenanceTriggers( + int triggerCount, boolean isAnomalyDetectionEnabled, boolean isDataFrameAnalyticsEnabled, boolean isNlpEnabled - ) { - return new MlDailyMaintenanceService(Settings.EMPTY, threadPool, client, clusterService, mlAssignmentNotifier, () -> { - // We need to be careful that an unexpected iteration doesn't get squeezed in by the maintenance threadpool in - // between the latch getting counted down to zero and the main test thread stopping the maintenance service. - // This could happen if the main test thread happens to be waiting for a CPU for the whole 100ms after the - // latch counts down to zero. - if (latch.getCount() > 0) { - latch.countDown(); - return TimeValue.timeValueMillis(100); - } else { - return TimeValue.timeValueHours(1); - } - }, isAnomalyDetectionEnabled, isDataFrameAnalyticsEnabled, isNlpEnabled); + ) throws InterruptedException { + // The scheduleProvider is called upon scheduling. The latch waits for (triggerCount + 1) + // schedules to happen, which means that the maintenance task is executed triggerCount + // times. The first triggerCount invocations of the scheduleProvider return 100ms, which + // is the time between the executed maintenance tasks. + // After that, maintenance task (triggerCount + 1) is scheduled after 100sec, the latch is + // released, the service is closed, and the method returns. Task (triggerCount + 1) is + // therefore never executed. + CountDownLatch latch = new CountDownLatch(triggerCount + 1); + Supplier scheduleProvider = () -> { + latch.countDown(); + return TimeValue.timeValueMillis(latch.getCount() > 0 ? 100 : 100_000); + }; + try ( + MlDailyMaintenanceService service = new MlDailyMaintenanceService( + Settings.EMPTY, + threadPool, + client, + clusterService, + mlAssignmentNotifier, + scheduleProvider, + isAnomalyDetectionEnabled, + isDataFrameAnalyticsEnabled, + isNlpEnabled + ) + ) { + service.start(); + latch.await(5, TimeUnit.SECONDS); + } } private static ClusterState createClusterState(boolean isUpgradeMode) { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java index 377b7fd45d78c..5c2c3abf232f5 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java @@ -274,6 +274,7 @@ private static NodeStats buildNodeStats( null, null, null, + null, null ); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/categorization/CategorizeTextAggregatorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/categorization/CategorizeTextAggregatorTests.java index cb5b98af29d57..29f298894477a 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/categorization/CategorizeTextAggregatorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/categorization/CategorizeTextAggregatorTests.java @@ -322,6 +322,90 @@ public void testCategorizationWithSubAggsManyDocs() throws Exception { ); } + public void testCategorizationAsSubAggWithExtendedBounds() throws Exception { + // Test with more buckets than we have data for (via extended bounds in the histogram config). + // This will confirm that we don't try to read beyond the end of arrays of categorizers. + int numHistoBuckets = 50; + HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("histo").field(NUMERIC_FIELD_NAME) + .interval(1) + .extendedBounds(0, numHistoBuckets - 1) + .subAggregation(new CategorizeTextAggregationBuilder("my_agg", TEXT_FIELD_NAME)); + testCase(CategorizeTextAggregatorTests::writeTestDocs, (InternalHistogram histo) -> { + assertThat(histo.getBuckets(), hasSize(numHistoBuckets)); + assertThat(histo.getBuckets().get(0).getDocCount(), equalTo(2L)); + assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("0.0")); + InternalCategorizationAggregation categorizationAggregation = histo.getBuckets().get(0).getAggregations().get("my_agg"); + assertThat(categorizationAggregation.getBuckets().get(0).getDocCount(), equalTo(1L)); + assertThat( + categorizationAggregation.getBuckets().get(0).getKeyAsString(), + equalTo("Failed to shutdown error org.aaaa.bbbb.Cccc line caused by foo exception") + ); + assertThat(categorizationAggregation.getBuckets().get(0).getSerializableCategory().maxMatchingStringLen(), equalTo(84)); + assertThat( + categorizationAggregation.getBuckets().get(0).getSerializableCategory().getRegex(), + equalTo(".*?Failed.+?to.+?shutdown.+?error.+?org\\.aaaa\\.bbbb\\.Cccc.+?line.+?caused.+?by.+?foo.+?exception.*?") + ); + assertThat(categorizationAggregation.getBuckets().get(1).getDocCount(), equalTo(1L)); + assertThat(categorizationAggregation.getBuckets().get(1).getKeyAsString(), equalTo("Node started")); + assertThat(categorizationAggregation.getBuckets().get(1).getSerializableCategory().maxMatchingStringLen(), equalTo(15)); + assertThat(categorizationAggregation.getBuckets().get(1).getSerializableCategory().getRegex(), equalTo(".*?Node.+?started.*?")); + assertThat(histo.getBuckets().get(1).getDocCount(), equalTo(1L)); + assertThat(histo.getBuckets().get(1).getKeyAsString(), equalTo("1.0")); + categorizationAggregation = histo.getBuckets().get(1).getAggregations().get("my_agg"); + assertThat(categorizationAggregation.getBuckets().get(0).getDocCount(), equalTo(1L)); + assertThat(categorizationAggregation.getBuckets().get(0).getKeyAsString(), equalTo("Node started")); + assertThat(categorizationAggregation.getBuckets().get(0).getSerializableCategory().maxMatchingStringLen(), equalTo(15)); + assertThat(categorizationAggregation.getBuckets().get(0).getSerializableCategory().getRegex(), equalTo(".*?Node.+?started.*?")); + assertThat(histo.getBuckets().get(2).getDocCount(), equalTo(1L)); + assertThat(histo.getBuckets().get(2).getKeyAsString(), equalTo("2.0")); + categorizationAggregation = histo.getBuckets().get(2).getAggregations().get("my_agg"); + assertThat(categorizationAggregation.getBuckets().get(0).getDocCount(), equalTo(1L)); + assertThat(categorizationAggregation.getBuckets().get(0).getKeyAsString(), equalTo("Node started")); + assertThat(categorizationAggregation.getBuckets().get(0).getSerializableCategory().maxMatchingStringLen(), equalTo(15)); + assertThat(categorizationAggregation.getBuckets().get(0).getSerializableCategory().getRegex(), equalTo(".*?Node.+?started.*?")); + assertThat(histo.getBuckets().get(3).getDocCount(), equalTo(1L)); + assertThat(histo.getBuckets().get(3).getKeyAsString(), equalTo("3.0")); + categorizationAggregation = histo.getBuckets().get(3).getAggregations().get("my_agg"); + assertThat(categorizationAggregation.getBuckets().get(0).getDocCount(), equalTo(1L)); + assertThat(categorizationAggregation.getBuckets().get(0).getKeyAsString(), equalTo("Node started")); + assertThat(categorizationAggregation.getBuckets().get(0).getSerializableCategory().maxMatchingStringLen(), equalTo(15)); + assertThat(categorizationAggregation.getBuckets().get(0).getSerializableCategory().getRegex(), equalTo(".*?Node.+?started.*?")); + assertThat(histo.getBuckets().get(4).getDocCount(), equalTo(2L)); + assertThat(histo.getBuckets().get(4).getKeyAsString(), equalTo("4.0")); + categorizationAggregation = histo.getBuckets().get(4).getAggregations().get("my_agg"); + assertThat(categorizationAggregation.getBuckets().get(0).getDocCount(), equalTo(1L)); + assertThat( + categorizationAggregation.getBuckets().get(0).getKeyAsString(), + equalTo("Failed to shutdown error org.aaaa.bbbb.Cccc line caused by foo exception") + ); + assertThat(categorizationAggregation.getBuckets().get(0).getSerializableCategory().maxMatchingStringLen(), equalTo(84)); + assertThat( + categorizationAggregation.getBuckets().get(0).getSerializableCategory().getRegex(), + equalTo(".*?Failed.+?to.+?shutdown.+?error.+?org\\.aaaa\\.bbbb\\.Cccc.+?line.+?caused.+?by.+?foo.+?exception.*?") + ); + assertThat(categorizationAggregation.getBuckets().get(1).getDocCount(), equalTo(1L)); + assertThat(categorizationAggregation.getBuckets().get(1).getKeyAsString(), equalTo("Node started")); + assertThat(categorizationAggregation.getBuckets().get(1).getSerializableCategory().maxMatchingStringLen(), equalTo(15)); + assertThat(categorizationAggregation.getBuckets().get(1).getSerializableCategory().getRegex(), equalTo(".*?Node.+?started.*?")); + assertThat(histo.getBuckets().get(5).getDocCount(), equalTo(1L)); + assertThat(histo.getBuckets().get(5).getKeyAsString(), equalTo("5.0")); + categorizationAggregation = histo.getBuckets().get(5).getAggregations().get("my_agg"); + assertThat(categorizationAggregation.getBuckets().get(0).getDocCount(), equalTo(1L)); + assertThat(categorizationAggregation.getBuckets().get(0).getKeyAsString(), equalTo("Node started")); + assertThat(categorizationAggregation.getBuckets().get(0).getSerializableCategory().maxMatchingStringLen(), equalTo(15)); + assertThat(categorizationAggregation.getBuckets().get(0).getSerializableCategory().getRegex(), equalTo(".*?Node.+?started.*?")); + for (int bucket = 6; bucket < numHistoBuckets; ++bucket) { + assertThat(histo.getBuckets().get(bucket).getDocCount(), equalTo(0L)); + } + }, + new AggTestConfig( + aggBuilder, + new TextFieldMapper.TextFieldType(TEXT_FIELD_NAME, randomBoolean()), + longField(NUMERIC_FIELD_NAME) + ) + ); + } + private static void writeTestDocs(RandomIndexWriter w) throws IOException { w.addDocument( Arrays.asList( diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/changepoint/KDETests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/changepoint/KDETests.java index e4d30912050e3..46b563a15c89c 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/changepoint/KDETests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/changepoint/KDETests.java @@ -23,7 +23,6 @@ public void testEmpty() { assertThat(kde.data(), equalTo(new double[0])); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/102876") public void testCdfAndSf() { double[] data = DoubleStream.generate(() -> randomDoubleBetween(0.0, 100.0, true)).limit(101).toArray(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java index 69d8663478b36..4bb612921876e 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java @@ -56,6 +56,7 @@ import static org.elasticsearch.xpack.ml.job.task.OpenJobPersistentTasksExecutorTests.addJobTask; import static org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase.createDatafeed; import static org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase.createScheduledJob; +import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -334,12 +335,22 @@ public void testIndexDoesntExist() { assertNull(result.getExecutorNode()); assertThat( result.getExplanation(), - equalTo( - "cannot start datafeed [datafeed_id] because it failed resolving indices given [not_foo] and " - + "indices_options [IndicesOptions[ignore_unavailable=false, allow_no_indices=true, expand_wildcards_open=true, " - + "expand_wildcards_closed=false, expand_wildcards_hidden=false, allow_aliases_to_multiple_indices=true, " - + "forbid_closed_indices=true, ignore_aliases=false, ignore_throttled=true]] " - + "with exception [no such index [not_foo]]" + anyOf( + // TODO remove this first option and only allow the second once the failure store functionality is permanently switched on + equalTo( + "cannot start datafeed [datafeed_id] because it failed resolving indices given [not_foo] and " + + "indices_options [IndicesOptions[ignore_unavailable=false, allow_no_indices=true, expand_wildcards_open=true, " + + "expand_wildcards_closed=false, expand_wildcards_hidden=false, allow_aliases_to_multiple_indices=true, " + + "forbid_closed_indices=true, ignore_aliases=false, ignore_throttled=true]] " + + "with exception [no such index [not_foo]]" + ), + equalTo( + "cannot start datafeed [datafeed_id] because it failed resolving indices given [not_foo] and " + + "indices_options [IndicesOptions[ignore_unavailable=false, allow_no_indices=true, expand_wildcards_open=true, " + + "expand_wildcards_closed=false, expand_wildcards_hidden=false, allow_aliases_to_multiple_indices=true, " + + "forbid_closed_indices=true, ignore_aliases=false, ignore_throttled=true, include_regular_indices=true, " + + "include_failure_indices=false, allow_failure_indices=true]] with exception [no such index [not_foo]]" + ) ) ); @@ -356,13 +367,25 @@ public void testIndexDoesntExist() { ); assertThat( e.getMessage(), - containsString( - "No node found to start datafeed [datafeed_id], allocation explanation " - + "[cannot start datafeed [datafeed_id] because it failed resolving " - + "indices given [not_foo] and indices_options [IndicesOptions[ignore_unavailable=false, allow_no_indices=true, " - + "expand_wildcards_open=true, expand_wildcards_closed=false, expand_wildcards_hidden=false, " - + "allow_aliases_to_multiple_indices=true, forbid_closed_indices=true, ignore_aliases=false, ignore_throttled=true" - + "]] with exception [no such index [not_foo]]]" + anyOf( + // TODO remove this first option and only allow the second once the failure store functionality is permanently switched on + containsString( + "No node found to start datafeed [datafeed_id], allocation explanation " + + "[cannot start datafeed [datafeed_id] because it failed resolving " + + "indices given [not_foo] and indices_options [IndicesOptions[ignore_unavailable=false, allow_no_indices=true, " + + "expand_wildcards_open=true, expand_wildcards_closed=false, expand_wildcards_hidden=false, " + + "allow_aliases_to_multiple_indices=true, forbid_closed_indices=true, ignore_aliases=false, ignore_throttled=true" + + "]] with exception [no such index [not_foo]]]" + ), + containsString( + "No node found to start datafeed [datafeed_id], allocation explanation " + + "[cannot start datafeed [datafeed_id] because it failed resolving " + + "indices given [not_foo] and indices_options [IndicesOptions[ignore_unavailable=false, allow_no_indices=true, " + + "expand_wildcards_open=true, expand_wildcards_closed=false, expand_wildcards_hidden=false, " + + "allow_aliases_to_multiple_indices=true, forbid_closed_indices=true, ignore_aliases=false, " + + "ignore_throttled=true, include_regular_indices=true, include_failure_indices=false, " + + "allow_failure_indices=true]] with exception [no such index [not_foo]]]" + ) ) ); } @@ -522,13 +545,24 @@ public void testSelectNode_GivenJobOpeningAndIndexDoesNotExist() { ); assertThat( e.getMessage(), - containsString( - "No node found to start datafeed [datafeed_id], allocation explanation " - + "[cannot start datafeed [datafeed_id] because it failed resolving indices given [not_foo] and " - + "indices_options [IndicesOptions[ignore_unavailable=false, allow_no_indices=true, expand_wildcards_open=true, " - + "expand_wildcards_closed=false, expand_wildcards_hidden=false, allow_aliases_to_multiple_indices=true, " - + "forbid_closed_indices=true, ignore_aliases=false, ignore_throttled=true]] " - + "with exception [no such index [not_foo]]]" + anyOf( + // TODO remove this first option and only allow the second once the failure store functionality is permanently switched on + containsString( + "No node found to start datafeed [datafeed_id], allocation explanation " + + "[cannot start datafeed [datafeed_id] because it failed resolving indices given [not_foo] and " + + "indices_options [IndicesOptions[ignore_unavailable=false, allow_no_indices=true, expand_wildcards_open=true, " + + "expand_wildcards_closed=false, expand_wildcards_hidden=false, allow_aliases_to_multiple_indices=true, " + + "forbid_closed_indices=true, ignore_aliases=false, ignore_throttled=true]] " + + "with exception [no such index [not_foo]]]" + ), + containsString( + "No node found to start datafeed [datafeed_id], allocation explanation " + + "[cannot start datafeed [datafeed_id] because it failed resolving indices given [not_foo] and " + + "indices_options [IndicesOptions[ignore_unavailable=false, allow_no_indices=true, expand_wildcards_open=true, " + + "expand_wildcards_closed=false, expand_wildcards_hidden=false, allow_aliases_to_multiple_indices=true, " + + "forbid_closed_indices=true, ignore_aliases=false, ignore_throttled=true, include_regular_indices=true, " + + "include_failure_indices=false, allow_failure_indices=true]] with exception [no such index [not_foo]]]" + ) ) ); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java index d994b14265a26..b04e9ab6d5332 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java @@ -9,11 +9,10 @@ import org.apache.lucene.search.TotalHits; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.search.ClearScrollRequest; -import org.elasticsearch.action.search.ClearScrollResponse; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.search.TransportClearScrollAction; @@ -77,7 +76,7 @@ public class ScrollDataExtractorTests extends ESTestCase { private Client client; - private List capturedSearchRequests; + private List> capturedSearchRequests; private List capturedContinueScrollIds; private ArgumentCaptor capturedClearScrollRequests; private String jobId; @@ -87,12 +86,11 @@ public class ScrollDataExtractorTests extends ESTestCase { private List scriptFields; private int scrollSize; private long initScrollStartTime; - private ActionFuture clearScrollFuture; private DatafeedTimingStatsReporter timingStatsReporter; private class TestDataExtractor extends ScrollDataExtractor { - private Queue> responses = new LinkedList<>(); + private final Queue> responses = new LinkedList<>(); private int numScrollReset; TestDataExtractor(long start, long end) { @@ -110,7 +108,7 @@ protected InputStream initScroll(long startTimestamp) throws IOException { } @Override - protected SearchResponse executeSearchRequest(SearchRequestBuilder searchRequestBuilder) { + protected SearchResponse executeSearchRequest(ActionRequestBuilder searchRequestBuilder) { capturedSearchRequests.add(searchRequestBuilder); Tuple responseOrException = responses.remove(); if (responseOrException.v2() != null) { @@ -176,9 +174,10 @@ public void setUpTests() { scriptFields = Collections.emptyList(); scrollSize = 1000; - clearScrollFuture = mock(ActionFuture.class); capturedClearScrollRequests = ArgumentCaptor.forClass(ClearScrollRequest.class); - when(client.execute(same(TransportClearScrollAction.TYPE), capturedClearScrollRequests.capture())).thenReturn(clearScrollFuture); + when(client.execute(same(TransportClearScrollAction.TYPE), capturedClearScrollRequests.capture())).thenReturn( + mock(ActionFuture.class) + ); timingStatsReporter = new DatafeedTimingStatsReporter(new DatafeedTimingStats(jobId), mock(DatafeedTimingStatsPersister.class)); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingServiceTests.java index 40b0dd519f7d8..bab292671c0bc 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingServiceTests.java @@ -43,7 +43,9 @@ import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; import org.elasticsearch.xpack.core.ml.inference.TrainedModelInput; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ClassificationConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceStats; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearningToRankConfig; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.inference.InferenceDefinition; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.ml.MachineLearning; @@ -424,6 +426,34 @@ public void testGetModelForSearch() throws Exception { verify(trainedModelStatsService, never()).queueStats(any(InferenceStats.class), anyBoolean()); } + public void testGetModelForLearningToRank() throws Exception { + String modelId = "test-get-model-for-ltr"; + withTrainedModel(modelId, 1L, LearningToRankConfig.EMPTY_PARAMS); + + ModelLoadingService modelLoadingService = new ModelLoadingService( + trainedModelProvider, + auditor, + threadPool, + clusterService, + trainedModelStatsService, + Settings.EMPTY, + "test-node", + circuitBreaker, + mock(XPackLicenseState.class) + ); + + for (int i = 0; i < 3; i++) { + PlainActionFuture future = new PlainActionFuture<>(); + modelLoadingService.getModelForLearningToRank(modelId, future); + assertThat(future.get(), is(not(nullValue()))); + } + + assertTrue(modelLoadingService.isModelCached(modelId)); + + verify(trainedModelProvider, times(1)).getTrainedModelForInference(eq(modelId), eq(false), any()); + verify(trainedModelStatsService, never()).queueStats(any(InferenceStats.class), anyBoolean()); + } + public void testCircuitBreakerBreak() throws Exception { String model1 = "test-circuit-break-model-1"; String model2 = "test-circuit-break-model-2"; @@ -656,13 +686,17 @@ public void testAliasesGetUpdatedEvenWhenNotIngestNode() throws IOException { assertThat(modelLoadingService.getModelId("loaded_model_again"), equalTo(model1)); } - @SuppressWarnings("unchecked") private void withTrainedModel(String modelId, long size) { + withTrainedModel(modelId, size, ClassificationConfig.EMPTY_PARAMS); + } + + @SuppressWarnings("unchecked") + private void withTrainedModel(String modelId, long size, InferenceConfig inferenceConfig) { InferenceDefinition definition = mock(InferenceDefinition.class); when(definition.ramBytesUsed()).thenReturn(size); TrainedModelConfig trainedModelConfig = mock(TrainedModelConfig.class); when(trainedModelConfig.getModelId()).thenReturn(modelId); - when(trainedModelConfig.getInferenceConfig()).thenReturn(ClassificationConfig.EMPTY_PARAMS); + when(trainedModelConfig.getInferenceConfig()).thenReturn(inferenceConfig); when(trainedModelConfig.getInput()).thenReturn(new TrainedModelInput(Arrays.asList("foo", "bar", "baz"))); when(trainedModelConfig.getModelSize()).thenReturn(size); doAnswer(invocationOnMock -> { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/EmptyStateIndexRemoverTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/EmptyStateIndexRemoverTests.java index b560a758b8e83..a452c156e77f1 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/EmptyStateIndexRemoverTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/EmptyStateIndexRemoverTests.java @@ -27,6 +27,7 @@ import org.junit.Before; import org.mockito.ArgumentCaptor; import org.mockito.InOrder; +import org.mockito.Mockito; import org.mockito.stubbing.Answer; import java.util.Map; @@ -57,6 +58,7 @@ public void setUpTests() { client = mock(Client.class); OriginSettingClient originSettingClient = MockOriginSettingClient.mockOriginSettingClient(client, ClientHelper.ML_ORIGIN); listener = mock(ActionListener.class); + when(listener.delegateFailureAndWrap(any())).thenCallRealMethod(); deleteIndexRequestCaptor = ArgumentCaptor.forClass(DeleteIndexRequest.class); remover = new EmptyStateIndexRemover(originSettingClient, new TaskId("test", 0L)); @@ -66,6 +68,7 @@ public void setUpTests() { public void verifyNoOtherInteractionsWithMocks() { verify(client).settings(); verify(client, atLeastOnce()).threadPool(); + verify(listener, Mockito.atLeast(0)).delegateFailureAndWrap(any()); verifyNoMoreInteractions(client, listener); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredAnnotationsRemoverTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredAnnotationsRemoverTests.java index ad0719011c92e..39f1ead7e24e0 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredAnnotationsRemoverTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredAnnotationsRemoverTests.java @@ -60,6 +60,7 @@ public void setUpTests() { client = mock(Client.class); originSettingClient = MockOriginSettingClient.mockOriginSettingClient(client, ClientHelper.ML_ORIGIN); listener = mock(ActionListener.class); + when(listener.delegateFailureAndWrap(any())).thenCallRealMethod(); } public void testRemove_GivenNoJobs() { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemoverTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemoverTests.java index 5aa5b847b26be..4dbb4eda07b0a 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemoverTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemoverTests.java @@ -60,6 +60,7 @@ public void setUpTests() { client = mock(Client.class); originSettingClient = MockOriginSettingClient.mockOriginSettingClient(client, ClientHelper.ML_ORIGIN); listener = mock(ActionListener.class); + when(listener.delegateFailureAndWrap(any())).thenCallRealMethod(); } public void testRemove_GivenNoJobs() { diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsMonitoringDocTests.java index 2acdc8ae72232..54f3ce634a25a 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsMonitoringDocTests.java @@ -460,6 +460,7 @@ private static NodeStats mockNodeStats() { null, null, null, + null, null ); } diff --git a/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/codecs/BWCCodec.java b/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/codecs/BWCCodec.java index df6fded49e6bb..25b4b685ac50f 100644 --- a/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/codecs/BWCCodec.java +++ b/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/codecs/BWCCodec.java @@ -109,7 +109,8 @@ private static FieldInfos filterFields(FieldInfos fieldInfos) { 0, fieldInfo.getVectorEncoding(), fieldInfo.getVectorSimilarityFunction(), - fieldInfo.isSoftDeletesField() + fieldInfo.isSoftDeletesField(), + fieldInfo.isParentField() ) ); } diff --git a/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/codecs/lucene50/Lucene50FieldInfosFormat.java b/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/codecs/lucene50/Lucene50FieldInfosFormat.java index 9cef274aa753e..83fcb17449100 100644 --- a/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/codecs/lucene50/Lucene50FieldInfosFormat.java +++ b/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/codecs/lucene50/Lucene50FieldInfosFormat.java @@ -111,6 +111,7 @@ public FieldInfos read(Directory directory, SegmentInfo segmentInfo, String segm 0, VectorEncoding.FLOAT32, VectorSimilarityFunction.EUCLIDEAN, + false, false ); infos[i].checkConsistency(); diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java index 02506db3e9cc8..62b8242e7df86 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java @@ -7,11 +7,9 @@ package org.elasticsearch.xpack.profiling; -import org.elasticsearch.index.Index; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.TermQueryBuilder; -import org.elasticsearch.index.shard.ShardId; import java.util.List; @@ -92,7 +90,6 @@ public void testGetStackTracesFromAPMWithMatchNoDownsampling() throws Exception public void testGetStackTracesFromAPMWithMatchAndDownsampling() throws Exception { TermQueryBuilder query = QueryBuilders.termQuery("transaction.name", "encodeSha1"); - Index apmTest = resolveIndex("apm-test-001"); GetStackTracesRequest request = new GetStackTracesRequest( 1, @@ -107,28 +104,9 @@ public void testGetStackTracesFromAPMWithMatchAndDownsampling() throws Exception null, null, null - ) { - @Override - public boolean equals(Object o) { - return super.equals(o); - } - - @Override - public int hashCode() { - // The random sampler aggregation takes a user-provided seed as well as the index UUID into account for randomness. This is - // fine for a production use case but here we need full control over the internal seed so test results are stable. As - // the index UUID changes between test runs, and we have no control over it, we will instead modify the user provided seed - // so that the random number generator is always initialized the same, regardless of the index UUID. - // - // See org.elasticsearch.search.aggregations.bucket.sampler.random.RandomSamplingQuery#createWeight(), specifically the - // initialization of SplittableRandom(), which uses both the "seed" (user-provided) and a "hash", which is built from - // ShardId#hashCode(). By using the same hash code, the XOR will always evaluate to 0, thus producing a consistent seed for - // SplittableRandom(). - int baseSeed = new ShardId(apmTest, 0).hashCode(); - // a seed of zero won't return results for our test scenario, so we toggle one bit to generate a consistent non-zero seed. - return baseSeed ^ 2; - } - }; + ); + // ensures consistent results in the random sampler aggregation that is used internally + request.setShardSeed(42); GetStackTracesResponse response = client().execute(GetStackTracesAction.INSTANCE, request).get(); assertEquals(49, response.getTotalFrames()); diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphResponse.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphResponse.java index 468b74ed16000..65b342abddd9d 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphResponse.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphResponse.java @@ -191,27 +191,64 @@ public Iterator toXContentChunked(ToXContent.Params params ChunkedToXContentHelper.array("ExeFilename", Iterators.map(fileNames.iterator(), e -> (b, p) -> b.value(e))), ChunkedToXContentHelper.array("AddressOrLine", Iterators.map(addressOrLines.iterator(), e -> (b, p) -> b.value(e))), ChunkedToXContentHelper.array("FunctionName", Iterators.map(functionNames.iterator(), e -> (b, p) -> b.value(e))), - ChunkedToXContentHelper.array("FunctionOffset", Iterators.map(functionOffsets.iterator(), e -> (b, p) -> b.value(e))), + ChunkedToXContentHelper.singleChunk((b, p) -> { + b.startArray("FunctionOffset"); + for (int functionOffset : functionOffsets) { + b.value(functionOffset); + } + return b.endArray(); + }), ChunkedToXContentHelper.array("SourceFilename", Iterators.map(sourceFileNames.iterator(), e -> (b, p) -> b.value(e))), - ChunkedToXContentHelper.array("SourceLine", Iterators.map(sourceLines.iterator(), e -> (b, p) -> b.value(e))), - ChunkedToXContentHelper.array("CountInclusive", Iterators.map(countInclusive.iterator(), e -> (b, p) -> b.value(e))), - ChunkedToXContentHelper.array("CountExclusive", Iterators.map(countExclusive.iterator(), e -> (b, p) -> b.value(e))), - ChunkedToXContentHelper.array( - "AnnualCO2TonsInclusive", - Iterators.map(annualCO2TonsInclusive.iterator(), e -> (b, p) -> b.value(e)) - ), - ChunkedToXContentHelper.array( - "AnnualCO2TonsExclusive", - Iterators.map(annualCO2TonsExclusive.iterator(), e -> (b, p) -> b.value(e)) - ), - ChunkedToXContentHelper.array( - "AnnualCostsUSDInclusive", - Iterators.map(annualCostsUSDInclusive.iterator(), e -> (b, p) -> b.value(e)) - ), - ChunkedToXContentHelper.array( - "AnnualCostsUSDExclusive", - Iterators.map(annualCostsUSDExclusive.iterator(), e -> (b, p) -> b.value(e)) - ), + ChunkedToXContentHelper.singleChunk((b, p) -> { + b.startArray("SourceLine"); + for (int sourceLine : sourceLines) { + b.value(sourceLine); + } + return b.endArray(); + }), + ChunkedToXContentHelper.singleChunk((b, p) -> { + b.startArray("CountInclusive"); + for (long countInclusive : countInclusive) { + b.value(countInclusive); + } + return b.endArray(); + }), + ChunkedToXContentHelper.singleChunk((b, p) -> { + b.startArray("CountExclusive"); + for (long c : countExclusive) { + b.value(c); + } + return b.endArray(); + }), + ChunkedToXContentHelper.singleChunk((b, p) -> { + b.startArray("AnnualCO2TonsInclusive"); + for (double co2Tons : annualCO2TonsInclusive) { + // write as raw value - we need direct control over the output representation (here: limit to 4 decimal places) + b.rawValue(NumberUtils.doubleToString(co2Tons)); + } + return b.endArray(); + }), + ChunkedToXContentHelper.singleChunk((b, p) -> { + b.startArray("AnnualCO2TonsExclusive"); + for (double co2Tons : annualCO2TonsExclusive) { + b.rawValue(NumberUtils.doubleToString(co2Tons)); + } + return b.endArray(); + }), + ChunkedToXContentHelper.singleChunk((b, p) -> { + b.startArray("AnnualCostsUSDInclusive"); + for (double costs : annualCostsUSDInclusive) { + b.rawValue(NumberUtils.doubleToString(costs)); + } + return b.endArray(); + }), + ChunkedToXContentHelper.singleChunk((b, p) -> { + b.startArray("AnnualCostsUSDExclusive"); + for (double costs : annualCostsUSDExclusive) { + b.rawValue(NumberUtils.doubleToString(costs)); + } + return b.endArray(); + }), Iterators.single((b, p) -> b.field("Size", size)), Iterators.single((b, p) -> b.field("SamplingRate", samplingRate)), Iterators.single((b, p) -> b.field("SelfCPU", selfCPU)), diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesRequest.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesRequest.java index 3dfe48744cb97..86ed038467191 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesRequest.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesRequest.java @@ -69,6 +69,9 @@ public class GetStackTracesRequest extends ActionRequest implements IndicesReque // sample counts by default and remove this flag. private Boolean adjustSampleCount; + // This is only meant for testing and is intentionally not exposed in the REST API. + private Integer shardSeed; + public GetStackTracesRequest() { this(null, null, null, null, null, null, null, null, null, null, null, null); } @@ -167,6 +170,14 @@ public void setAdjustSampleCount(Boolean adjustSampleCount) { this.adjustSampleCount = adjustSampleCount; } + public Integer getShardSeed() { + return shardSeed; + } + + public void setShardSeed(Integer shardSeed) { + this.shardSeed = shardSeed; + } + public void parseXContent(XContentParser parser) throws IOException { XContentParser.Token token = parser.currentToken(); String currentFieldName = null; diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/NumberUtils.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/NumberUtils.java new file mode 100644 index 0000000000000..d346dd279f250 --- /dev/null +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/NumberUtils.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.profiling; + +final class NumberUtils { + private NumberUtils() { + // no instances intended + } + + /** + * Converts a positive double number to a string. + * + * @param value The double value. + * @return The corresponding string representation rounded to four fractional digits. + */ + public static String doubleToString(double value) { + if (value < 0.0001d) { + return "0"; + } + StringBuilder sb = new StringBuilder(); + int i = (int) value; + int f = (int) ((value - i) * 10000.0d + 0.5d); + sb.append(i); + sb.append("."); + if (f < 10) { + sb.append("000"); + } else if (f < 100) { + sb.append("00"); + } else if (f < 1000) { + sb.append("0"); + } + sb.append(f); + return sb.toString(); + } +} diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexTemplateRegistry.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexTemplateRegistry.java index 1762b2537c455..c90e0e52c4d58 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexTemplateRegistry.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexTemplateRegistry.java @@ -45,7 +45,8 @@ public class ProfilingIndexTemplateRegistry extends IndexTemplateRegistry { // version 2: Added 'profiling.host.machine' keyword mapping to profiling-hosts // version 3: Add optional component template 'profiling-ilm@custom' to all ILM-managed index templates // version 4: Added 'service.name' keyword mapping to profiling-events - public static final int INDEX_TEMPLATE_VERSION = 4; + // version 5: Add optional component template '@custom' to all index templates that reference component templates + public static final int INDEX_TEMPLATE_VERSION = 5; // history for individual indices / index templates. Only bump these for breaking changes that require to create a new index public static final int PROFILING_EVENTS_VERSION = 2; diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java index 417f4fccfa8d9..aa5f3efb179a2 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java @@ -257,6 +257,16 @@ private void searchGenericEventGroupedByStackTrace( ActionListener submitListener, GetStackTracesResponseBuilder responseBuilder ) { + + RandomSamplerAggregationBuilder randomSampler = new RandomSamplerAggregationBuilder("sample").setSeed(request.hashCode()) + .setProbability(responseBuilder.getSamplingRate()) + .subAggregation( + new CountedTermsAggregationBuilder("group_by").size(MAX_TRACE_EVENTS_RESULT_SIZE).field(request.getStackTraceIdsField()) + ); + // shard seed is only set in tests and ensures consistent results + if (request.getShardSeed() != null) { + randomSampler.setShardSeed(request.getShardSeed()); + } client.prepareSearch(request.getIndices()) .setTrackTotalHits(false) .setSize(0) @@ -266,14 +276,7 @@ private void searchGenericEventGroupedByStackTrace( .setQuery(request.getQuery()) .addAggregation(new MinAggregationBuilder("min_time").field("@timestamp")) .addAggregation(new MaxAggregationBuilder("max_time").field("@timestamp")) - .addAggregation( - new RandomSamplerAggregationBuilder("sample").setSeed(request.hashCode()) - .setProbability(responseBuilder.getSamplingRate()) - .subAggregation( - new CountedTermsAggregationBuilder("group_by").size(MAX_TRACE_EVENTS_RESULT_SIZE) - .field(request.getStackTraceIdsField()) - ) - ) + .addAggregation(randomSampler) .execute(handleEventsGroupedByStackTrace(submitTask, client, responseBuilder, submitListener, searchResponse -> { long totalSamples = 0; SingleBucketAggregation sample = searchResponse.getAggregations().get("sample"); diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/NumberUtilsTests.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/NumberUtilsTests.java new file mode 100644 index 0000000000000..0b8a410f9bb66 --- /dev/null +++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/NumberUtilsTests.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.profiling; + +import org.elasticsearch.test.ESTestCase; + +public class NumberUtilsTests extends ESTestCase { + public void testConvertNumberToString() { + assertEquals("872.6182", NumberUtils.doubleToString(872.6181989583333d)); + assertEquals("1222.1833", NumberUtils.doubleToString(1222.18325d)); + assertEquals("1222.1832", NumberUtils.doubleToString(1222.18324d)); + assertEquals("1.0013", NumberUtils.doubleToString(1.0013d)); + assertEquals("10.0220", NumberUtils.doubleToString(10.022d)); + assertEquals("222.0000", NumberUtils.doubleToString(222.0d)); + assertEquals("0.0001", NumberUtils.doubleToString(0.0001d)); + } + + public void testConvertZeroToString() { + assertEquals("0", NumberUtils.doubleToString(0.0d)); + assertEquals("0", NumberUtils.doubleToString(0.00009d)); + } +} diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/IndexResolver.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/IndexResolver.java index 9009baf188cac..318f3888ac9b3 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/IndexResolver.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/IndexResolver.java @@ -124,8 +124,8 @@ public String toString() { .allowEmptyExpressions(true) .resolveAliases(false) ) - .generalOptions( - IndicesOptions.GeneralOptions.builder().ignoreThrottled(true).allowClosedIndices(true).allowAliasToMultipleIndices(true) + .gatekeeperOptions( + IndicesOptions.GatekeeperOptions.builder().ignoreThrottled(true).allowClosedIndices(true).allowAliasToMultipleIndices(true) ) .build(); private static final IndicesOptions FROZEN_INDICES_OPTIONS = IndicesOptions.builder() @@ -138,8 +138,8 @@ public String toString() { .allowEmptyExpressions(true) .resolveAliases(false) ) - .generalOptions( - IndicesOptions.GeneralOptions.builder().ignoreThrottled(false).allowClosedIndices(true).allowAliasToMultipleIndices(true) + .gatekeeperOptions( + IndicesOptions.GatekeeperOptions.builder().ignoreThrottled(false).allowClosedIndices(true).allowAliasToMultipleIndices(true) ) .build(); @@ -153,8 +153,8 @@ public String toString() { .allowEmptyExpressions(true) .resolveAliases(true) ) - .generalOptions( - IndicesOptions.GeneralOptions.builder().ignoreThrottled(true).allowClosedIndices(true).allowAliasToMultipleIndices(true) + .gatekeeperOptions( + IndicesOptions.GatekeeperOptions.builder().ignoreThrottled(true).allowClosedIndices(true).allowAliasToMultipleIndices(true) ) .build(); public static final IndicesOptions FIELD_CAPS_FROZEN_INDICES_OPTIONS = IndicesOptions.builder() @@ -167,8 +167,8 @@ public String toString() { .allowEmptyExpressions(true) .resolveAliases(true) ) - .generalOptions( - IndicesOptions.GeneralOptions.builder().ignoreThrottled(false).allowClosedIndices(true).allowAliasToMultipleIndices(true) + .gatekeeperOptions( + IndicesOptions.GatekeeperOptions.builder().ignoreThrottled(false).allowClosedIndices(true).allowAliasToMultipleIndices(true) ) .build(); diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/optimizer/OptimizerRules.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/optimizer/OptimizerRules.java index f084b5cda4abe..7625cbf3a56e5 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/optimizer/OptimizerRules.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/optimizer/OptimizerRules.java @@ -1678,14 +1678,9 @@ public FoldNull() { @Override protected Expression rule(Expression e) { - if (e instanceof IsNotNull isnn) { - if (isnn.field().nullable() == Nullability.FALSE) { - return new Literal(e.source(), Boolean.TRUE, DataTypes.BOOLEAN); - } - } else if (e instanceof IsNull isn) { - if (isn.field().nullable() == Nullability.FALSE) { - return new Literal(e.source(), Boolean.FALSE, DataTypes.BOOLEAN); - } + Expression result = tryReplaceIsNullIsNotNull(e); + if (result != e) { + return result; } else if (e instanceof In in) { if (Expressions.isNull(in.value())) { return Literal.of(in, null); @@ -1697,6 +1692,19 @@ protected Expression rule(Expression e) { } return e; } + + protected Expression tryReplaceIsNullIsNotNull(Expression e) { + if (e instanceof IsNotNull isnn) { + if (isnn.field().nullable() == Nullability.FALSE) { + return new Literal(e.source(), Boolean.TRUE, DataTypes.BOOLEAN); + } + } else if (e instanceof IsNull isn) { + if (isn.field().nullable() == Nullability.FALSE) { + return new Literal(e.source(), Boolean.FALSE, DataTypes.BOOLEAN); + } + } + return e; + } } // a IS NULL AND a IS NOT NULL -> FALSE @@ -1851,7 +1859,7 @@ protected Set resolveExpressionAsRootAttributes(Expression exp, Attr private boolean doResolve(Expression exp, AttributeMap aliases, Set resolvedExpressions) { boolean changed = false; // check if the expression can be skipped or is not nullabe - if (skipExpression(exp) || exp.nullable() == Nullability.FALSE) { + if (skipExpression(exp)) { resolvedExpressions.add(exp); } else { for (Expression e : exp.references()) { @@ -1871,7 +1879,7 @@ private boolean doResolve(Expression exp, AttributeMap aliases, Set< } protected boolean skipExpression(Expression e) { - return false; + return e.nullable() == Nullability.FALSE; } } diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/util/SpatialCoordinateTypes.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/util/SpatialCoordinateTypes.java index 6508a67f7e785..32bd76cf84e19 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/util/SpatialCoordinateTypes.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/util/SpatialCoordinateTypes.java @@ -56,6 +56,15 @@ public long pointAsLong(double x, double y) { final long yi = XYEncodingUtils.encode((float) y); return (yi & 0xFFFFFFFFL) | xi << 32; } + }, + UNSPECIFIED { + public Point longAsPoint(long encoded) { + throw new UnsupportedOperationException("Cannot convert long to point without specifying coordinate type"); + } + + public long pointAsLong(double x, double y) { + throw new UnsupportedOperationException("Cannot convert point to long without specifying coordinate type"); + } }; public abstract Point longAsPoint(long encoded); @@ -63,9 +72,14 @@ public long pointAsLong(double x, double y) { public abstract long pointAsLong(double x, double y); public long wkbAsLong(BytesRef wkb) { + Point point = wkbAsPoint(wkb); + return pointAsLong(point.getX(), point.getY()); + } + + public Point wkbAsPoint(BytesRef wkb) { Geometry geometry = WellKnownBinary.fromWKB(GeometryValidator.NOOP, false, wkb.bytes, wkb.offset, wkb.length); if (geometry instanceof Point point) { - return pointAsLong(point.getX(), point.getY()); + return point; } else { throw new IllegalArgumentException("Unsupported geometry: " + geometry.type()); } diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java index aef7a266fff37..1748c1be86b78 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java @@ -77,8 +77,6 @@ public class Rollup extends Plugin implements ActionPlugin, PersistentTaskPlugin public static final String TASK_THREAD_POOL_NAME = RollupField.NAME + "_indexing"; - public static final String ROLLUP_TEMPLATE_VERSION_FIELD = "rollup-version"; - private final SetOnce schedulerEngine = new SetOnce<>(); private final Settings settings; diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/RollupIndexCaps.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/RollupIndexCaps.java index 87f7a3de956fc..ebdcc1ed13e1f 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/RollupIndexCaps.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/RollupIndexCaps.java @@ -60,7 +60,7 @@ public class RollupIndexCaps implements Writeable, ToXContentFragment { ... job config, parsable by RollupJobConfig.PARSER ... } }, - "rollup-version": "7.0.0" + "rollup-version": "" } } */ diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java index 41c2f855ff8c9..a276971762c81 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java @@ -11,7 +11,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ResourceAlreadyExistsException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.admin.indices.create.CreateIndexAction; @@ -53,7 +52,6 @@ import org.elasticsearch.xpack.core.rollup.action.PutRollupJobAction; import org.elasticsearch.xpack.core.rollup.job.RollupJob; import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; -import org.elasticsearch.xpack.rollup.Rollup; import java.io.IOException; import java.util.Map; @@ -188,7 +186,7 @@ private static XContentBuilder createMappings(RollupJobConfig config) throws IOE .startObject("mappings") .startObject("_doc") .startObject("_meta") - .field(Rollup.ROLLUP_TEMPLATE_VERSION_FIELD, Version.CURRENT.toString()) + .field("rollup-version", "") // empty string to remain backwards compatible .startObject("_rollup") .field(config.getId(), config) .endObject() @@ -255,14 +253,6 @@ static void updateMapping( Map rollupMeta = (Map) ((Map) m).get(RollupField.ROLLUP_META); - String stringVersion = (String) ((Map) m).get(Rollup.ROLLUP_TEMPLATE_VERSION_FIELD); - if (stringVersion == null) { - listener.onFailure( - new IllegalStateException("Could not determine version of existing rollup metadata for index [" + indexName + "]") - ); - return; - } - if (rollupMeta.get(job.getConfig().getId()) != null) { String msg = "Cannot create rollup job [" + job.getConfig().getId() @@ -303,6 +293,7 @@ static void startPersistentTask( job.getConfig().getId(), RollupField.TASK_NAME, job, + null, ActionListener.wrap(rollupConfigPersistentTask -> waitForRollupStarted(job, listener, persistentTasksService), e -> { if (e instanceof ResourceAlreadyExistsException) { e = new ElasticsearchStatusException( diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java index dd6f5173cb6ba..b1455c4738623 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java @@ -8,7 +8,6 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ResourceAlreadyExistsException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.create.CreateIndexAction; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; @@ -25,7 +24,6 @@ import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.action.PutRollupJobAction; @@ -33,7 +31,6 @@ import org.elasticsearch.xpack.core.rollup.job.GroupConfig; import org.elasticsearch.xpack.core.rollup.job.RollupJob; import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; -import org.elasticsearch.xpack.rollup.Rollup; import org.mockito.ArgumentCaptor; import java.util.Collections; @@ -127,7 +124,7 @@ public void testIndexMetadata() throws InterruptedException { String mapping = requestCaptor.getValue().mappings(); // Make sure the version is present, and we have our date template (the most important aspects) - assertThat(mapping, containsString("\"rollup-version\":\"" + Version.CURRENT.toString() + "\"")); + assertThat(mapping, containsString("\"rollup-version\":\"\"")); assertThat(mapping, containsString("\"path_match\":\"*.date_histogram.timestamp\"")); listenerCaptor.getValue().onFailure(new ResourceAlreadyExistsException(job.getConfig().getRollupIndex())); @@ -245,38 +242,6 @@ public void testMetadataButNotRollup() { verify(client).execute(eq(GetMappingsAction.INSTANCE), any(GetMappingsRequest.class), any()); } - @SuppressWarnings({ "unchecked", "rawtypes" }) - public void testNoMappingVersion() { - RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap()); - - ActionListener testListener = ActionListener.wrap(response -> { - fail("Listener success should not have been triggered."); - }, e -> { - assertThat( - e.getMessage(), - equalTo("Could not determine version of existing rollup metadata for index [" + job.getConfig().getRollupIndex() + "]") - ); - }); - - Logger logger = mock(Logger.class); - Client client = mock(Client.class); - - ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(ActionListener.class); - doAnswer(invocation -> { - GetMappingsResponse response = mock(GetMappingsResponse.class); - Map m = Maps.newMapWithExpectedSize(2); - m.put(RollupField.ROLLUP_META, Collections.singletonMap(job.getConfig().getId(), job.getConfig())); - MappingMetadata meta = new MappingMetadata(RollupField.TYPE_NAME, Collections.singletonMap("_meta", m)); - - when(response.getMappings()).thenReturn(Map.of(job.getConfig().getRollupIndex(), meta)); - requestCaptor.getValue().onResponse(response); - return null; - }).when(client).execute(eq(GetMappingsAction.INSTANCE), any(GetMappingsRequest.class), requestCaptor.capture()); - - TransportPutRollupJobAction.updateMapping(job, testListener, mock(PersistentTasksService.class), client, logger); - verify(client).execute(eq(GetMappingsAction.INSTANCE), any(GetMappingsRequest.class), any()); - } - @SuppressWarnings({ "unchecked", "rawtypes" }) public void testJobAlreadyInMapping() { RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random(), "foo"), Collections.emptyMap()); @@ -299,7 +264,6 @@ public void testJobAlreadyInMapping() { doAnswer(invocation -> { GetMappingsResponse response = mock(GetMappingsResponse.class); Map m = Maps.newMapWithExpectedSize(2); - m.put(Rollup.ROLLUP_TEMPLATE_VERSION_FIELD, VersionUtils.randomCompatibleVersion(random(), Version.CURRENT)); m.put(RollupField.ROLLUP_META, Collections.singletonMap(job.getConfig().getId(), job.getConfig())); MappingMetadata meta = new MappingMetadata(RollupField.TYPE_NAME, Collections.singletonMap("_meta", m)); @@ -339,7 +303,6 @@ public void testAddJobToMapping() { doAnswer(invocation -> { GetMappingsResponse response = mock(GetMappingsResponse.class); Map m = Maps.newMapWithExpectedSize(2); - m.put(Rollup.ROLLUP_TEMPLATE_VERSION_FIELD, VersionUtils.randomCompatibleVersion(random(), Version.CURRENT)); m.put(RollupField.ROLLUP_META, Collections.singletonMap(unrelatedJob.getId(), unrelatedJob)); MappingMetadata meta = new MappingMetadata(RollupField.TYPE_NAME, Collections.singletonMap("_meta", m)); @@ -374,10 +337,11 @@ public void testTaskAlreadyExists() { doAnswer(invocation -> { requestCaptor.getValue().onFailure(new ResourceAlreadyExistsException(job.getConfig().getRollupIndex())); return null; - }).when(tasksService).sendStartRequest(eq(job.getConfig().getId()), eq(RollupField.TASK_NAME), eq(job), requestCaptor.capture()); + }).when(tasksService) + .sendStartRequest(eq(job.getConfig().getId()), eq(RollupField.TASK_NAME), eq(job), eq(null), requestCaptor.capture()); TransportPutRollupJobAction.startPersistentTask(job, testListener, tasksService); - verify(tasksService).sendStartRequest(eq(job.getConfig().getId()), eq(RollupField.TASK_NAME), eq(job), any()); + verify(tasksService).sendStartRequest(eq(job.getConfig().getId()), eq(RollupField.TASK_NAME), eq(job), eq(null), any()); } @SuppressWarnings({ "unchecked", "rawtypes" }) @@ -401,7 +365,8 @@ public void testStartTask() { ); requestCaptor.getValue().onResponse(response); return null; - }).when(tasksService).sendStartRequest(eq(job.getConfig().getId()), eq(RollupField.TASK_NAME), eq(job), requestCaptor.capture()); + }).when(tasksService) + .sendStartRequest(eq(job.getConfig().getId()), eq(RollupField.TASK_NAME), eq(job), eq(null), requestCaptor.capture()); ArgumentCaptor requestCaptor2 = ArgumentCaptor.forClass( PersistentTasksService.WaitForPersistentTaskListener.class @@ -413,7 +378,7 @@ public void testStartTask() { }).when(tasksService).waitForPersistentTaskCondition(eq(job.getConfig().getId()), any(), any(), requestCaptor2.capture()); TransportPutRollupJobAction.startPersistentTask(job, testListener, tasksService); - verify(tasksService).sendStartRequest(eq(job.getConfig().getId()), eq(RollupField.TASK_NAME), eq(job), any()); + verify(tasksService).sendStartRequest(eq(job.getConfig().getId()), eq(RollupField.TASK_NAME), eq(job), eq(null), any()); verify(tasksService).waitForPersistentTaskCondition(eq(job.getConfig().getId()), any(), any(), any()); } diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/FrozenSearchableSnapshotsIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/FrozenSearchableSnapshotsIntegTests.java index 18b4e6ed7cb31..4b9e1b0d9211e 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/FrozenSearchableSnapshotsIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/FrozenSearchableSnapshotsIntegTests.java @@ -102,7 +102,10 @@ public void testCreateAndRestorePartialSearchableSnapshot() throws Exception { // we can bypass this by forcing soft deletes to be used. TODO this restriction can be lifted when #55142 is resolved. final Settings.Builder originalIndexSettings = Settings.builder().put(INDEX_SOFT_DELETES_SETTING.getKey(), true); if (randomBoolean()) { - originalIndexSettings.put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), randomFrom("false", "true", "checksum")); + // INDEX_CHECK_ON_STARTUP requires expensive processing due to verification the integrity of many important files during + // a shard recovery or relocation. Therefore, it takes lots of time for the files to clean up and the assertShardFolder + // check may not complete in 30s. + originalIndexSettings.put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), "false"); } assertAcked(prepareCreate(indexName, originalIndexSettings)); assertAcked(indicesAdmin().prepareAliases().addAlias(indexName, aliasName)); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java index 38222f64b282b..ddd9f40b5404c 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java @@ -111,7 +111,7 @@ public void testCreateAndRestoreSearchableSnapshot() throws Exception { // we can bypass this by forcing soft deletes to be used. TODO this restriction can be lifted when #55142 is resolved. final Settings.Builder originalIndexSettings = Settings.builder().put(INDEX_SOFT_DELETES_SETTING.getKey(), true); if (randomBoolean()) { - originalIndexSettings.put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), randomFrom("false", "true", "checksum")); + originalIndexSettings.put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), "false"); } assertAcked(prepareCreate(indexName, originalIndexSettings)); assertAcked(indicesAdmin().prepareAliases().addAlias(indexName, aliasName)); diff --git a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java index 24ece3ff99bc4..a9c0653716851 100644 --- a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java +++ b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java @@ -590,6 +590,29 @@ static void verifyIssuer(Certificate certificate, CAInfo caInfo, Terminal termin throw new UserException(ExitCodes.CONFIG, "Certificate verification failed"); } } + + protected void writePemPrivateKey( + Terminal terminal, + OptionSet options, + ZipOutputStream outputStream, + JcaPEMWriter pemWriter, + String keyFileName, + PrivateKey privateKey + ) throws IOException { + final boolean usePassword = useOutputPassword(options); + final char[] outputPassword = getOutputPassword(options); + outputStream.putNextEntry(new ZipEntry(keyFileName)); + if (usePassword) { + withPassword(keyFileName, outputPassword, terminal, true, password -> { + pemWriter.writeObject(privateKey, getEncrypter(password)); + return null; + }); + } else { + pemWriter.writeObject(privateKey); + } + pemWriter.flush(); + outputStream.closeEntry(); + } } static class SigningRequestCommand extends CertificateCommand { @@ -621,9 +644,7 @@ public void execute(Terminal terminal, OptionSet options, Environment env, Proce terminal.println(""); final Path output = resolveOutputPath(terminal, options, DEFAULT_CSR_ZIP); - final int keySize = getKeySize(options); - Collection certificateInformations = getCertificateInformationList(terminal, options); - generateAndWriteCsrs(output, keySize, certificateInformations); + generateAndWriteCsrs(terminal, options, output); terminal.println(""); terminal.println("Certificate signing requests have been written to " + output); @@ -639,12 +660,25 @@ public void execute(Terminal terminal, OptionSet options, Environment env, Proce terminal.println("follow the SSL configuration instructions in the product guide."); } + // For testing + void generateAndWriteCsrs(Terminal terminal, OptionSet options, Path output) throws Exception { + final int keySize = getKeySize(options); + Collection certificateInformations = getCertificateInformationList(terminal, options); + generateAndWriteCsrs(terminal, options, output, keySize, certificateInformations); + } + /** * Generates certificate signing requests and writes them out to the specified file in zip format * * @param certInfo the details to use in the certificate signing requests */ - void generateAndWriteCsrs(Path output, int keySize, Collection certInfo) throws Exception { + void generateAndWriteCsrs( + Terminal terminal, + OptionSet options, + Path output, + int keySize, + Collection certInfo + ) throws Exception { fullyWriteZipFile(output, (outputStream, pemWriter) -> { for (CertificateInformation certificateInformation : certInfo) { KeyPair keyPair = CertGenUtils.generateKeyPair(keySize); @@ -667,10 +701,14 @@ void generateAndWriteCsrs(Path output, int keySize, Collection { for (CertificateInformation certificateInformation : certs) { CertificateAndKey pair = generateCertificateAndKey(certificateInformation, caInfo, keySize, days, terminal); @@ -825,20 +861,10 @@ void generateAndWriteSignedCertificates( outputStream.closeEntry(); // write private key - final String keyFileName = entryBase + ".key"; - outputStream.putNextEntry(new ZipEntry(keyFileName)); - if (usePassword) { - withPassword(keyFileName, outputPassword, terminal, true, password -> { - pemWriter.writeObject(pair.key, getEncrypter(password)); - return null; - }); - } else { - pemWriter.writeObject(pair.key); - } - pemWriter.flush(); - outputStream.closeEntry(); + writePemPrivateKey(terminal, options, outputStream, pemWriter, entryBase + ".key", pair.key); } else { final String fileName = entryBase + ".p12"; + final char[] outputPassword = super.getOutputPassword(options); outputStream.putNextEntry(new ZipEntry(fileName)); writePkcs12( fileName, @@ -855,6 +881,7 @@ void generateAndWriteSignedCertificates( }); } else { assert certs.size() == 1; + final char[] outputPassword = super.getOutputPassword(options); CertificateInformation certificateInformation = certs.iterator().next(); CertificateAndKey pair = generateCertificateAndKey(certificateInformation, caInfo, keySize, days, terminal); fullyWriteFile( diff --git a/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/CertificateToolTests.java b/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/CertificateToolTests.java index 702bfac2a3ea5..1a11234c98e6e 100644 --- a/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/CertificateToolTests.java +++ b/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/CertificateToolTests.java @@ -25,7 +25,10 @@ import org.bouncycastle.asn1.x509.GeneralName; import org.bouncycastle.asn1.x509.GeneralNames; import org.bouncycastle.cert.X509CertificateHolder; +import org.bouncycastle.openssl.PEMEncryptedKeyPair; +import org.bouncycastle.openssl.PEMKeyPair; import org.bouncycastle.openssl.PEMParser; +import org.bouncycastle.openssl.bc.BcPEMDecryptorProvider; import org.bouncycastle.pkcs.PKCS10CertificationRequest; import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.MockTerminal; @@ -77,6 +80,7 @@ import java.security.cert.X509Certificate; import java.security.interfaces.RSAKey; import java.time.temporal.ChronoUnit; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -88,6 +92,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Function; import java.util.stream.Collectors; +import java.util.stream.Stream; import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.TrustManagerFactory; @@ -266,9 +271,12 @@ public void testParsingFileWithInvalidDetails() throws Exception { assertThat(terminal.getErrorOutput(), containsString("could not be converted to a valid DN")); } - public void testGeneratingCsr() throws Exception { + public void testGeneratingCsrFromInstancesFile() throws Exception { Path tempDir = initTempDir(); Path outputFile = tempDir.resolve("out.zip"); + MockTerminal terminal = MockTerminal.create(); + final List args = new ArrayList<>(); + Path instanceFile = writeInstancesTo(tempDir.resolve("instances.yml")); Collection certInfos = CertificateTool.parseFile(instanceFile); assertEquals(4, certInfos.size()); @@ -276,7 +284,22 @@ public void testGeneratingCsr() throws Exception { assertFalse(Files.exists(outputFile)); int keySize = randomFrom(1024, 2048); - new CertificateTool.SigningRequestCommand().generateAndWriteCsrs(outputFile, keySize, certInfos); + final boolean encrypt = randomBoolean(); + final String password = encrypt ? randomAlphaOfLengthBetween(8, 12) : null; + if (encrypt) { + args.add("--pass"); + if (randomBoolean()) { + args.add(password); + } else { + for (var ignore : certInfos) { + terminal.addSecretInput(password); + } + } + } + + final CertificateTool.SigningRequestCommand command = new CertificateTool.SigningRequestCommand(); + final OptionSet options = command.getParser().parse(Strings.toStringArray(args)); + command.generateAndWriteCsrs(terminal, options, outputFile, keySize, certInfos); assertTrue(Files.exists(outputFile)); Set perms = Files.getPosixFilePermissions(outputFile); @@ -292,7 +315,6 @@ public void testGeneratingCsr() throws Exception { assertTrue(Files.exists(zipRoot.resolve(filename))); final Path csr = zipRoot.resolve(filename + "/" + filename + ".csr"); assertTrue(Files.exists(csr)); - assertTrue(Files.exists(zipRoot.resolve(filename + "/" + filename + ".key"))); PKCS10CertificationRequest request = readCertificateRequest(csr); assertEquals(certInfo.name.x500Principal.getName(), request.getSubject().toString()); Attribute[] extensionsReq = request.getAttributes(PKCSObjectIdentifiers.pkcs_9_at_extensionRequest); @@ -304,9 +326,84 @@ public void testGeneratingCsr() throws Exception { } else { assertEquals(0, extensionsReq.length); } + + final Path keyPath = zipRoot.resolve(filename + "/" + filename + ".key"); + assertTrue(Files.exists(keyPath)); + PEMKeyPair key = readPrivateKey(keyPath, password); + assertNotNull(key); } } + public void testGeneratingCsrFromCommandLineParameters() throws Exception { + Path tempDir = initTempDir(); + Path outputFile = tempDir.resolve("out.zip"); + MockTerminal terminal = MockTerminal.create(); + final List args = new ArrayList<>(); + + final int keySize = randomFrom(1024, 2048); + args.add("--keysize"); + args.add(String.valueOf(keySize)); + + final String name = randomAlphaOfLengthBetween(4, 16); + args.add("--name"); + args.add(name); + + final List dns = randomList(0, 4, () -> randomAlphaOfLengthBetween(4, 8) + "." + randomAlphaOfLengthBetween(2, 5)); + dns.stream().map(s -> "--dns=" + s).forEach(args::add); + final List ip = randomList( + 0, + 2, + () -> Stream.generate(() -> randomIntBetween(10, 250)).limit(4).map(String::valueOf).collect(Collectors.joining(".")) + ); + ip.stream().map(s -> "--ip=" + s).forEach(args::add); + + final boolean encrypt = randomBoolean(); + final String password = encrypt ? randomAlphaOfLengthBetween(8, 12) : null; + if (encrypt) { + args.add("--pass"); + if (randomBoolean()) { + args.add(password); + } else { + terminal.addSecretInput(password); + } + } + + final CertificateTool.SigningRequestCommand command = new CertificateTool.SigningRequestCommand(); + final OptionSet options = command.getParser().parse(Strings.toStringArray(args)); + command.generateAndWriteCsrs(terminal, options, outputFile); + assertTrue(Files.exists(outputFile)); + + Set perms = Files.getPosixFilePermissions(outputFile); + assertTrue(perms.toString(), perms.contains(PosixFilePermission.OWNER_READ)); + assertTrue(perms.toString(), perms.contains(PosixFilePermission.OWNER_WRITE)); + assertEquals(perms.toString(), 2, perms.size()); + + final Path zipRoot = getRootPathOfZip(outputFile); + + assertFalse(Files.exists(zipRoot.resolve("ca"))); + assertTrue(Files.exists(zipRoot.resolve(name))); + final Path csr = zipRoot.resolve(name + "/" + name + ".csr"); + assertTrue(Files.exists(csr)); + + PKCS10CertificationRequest request = readCertificateRequest(csr); + assertEquals("CN=" + name, request.getSubject().toString()); + + Attribute[] extensionsReq = request.getAttributes(PKCSObjectIdentifiers.pkcs_9_at_extensionRequest); + if (dns.size() > 0 || ip.size() > 0) { + assertEquals(1, extensionsReq.length); + Extensions extensions = Extensions.getInstance(extensionsReq[0].getAttributeValues()[0]); + GeneralNames subjAltNames = GeneralNames.fromExtensions(extensions, Extension.subjectAlternativeName); + assertSubjAltNames(subjAltNames, ip, dns); + } else { + assertEquals(0, extensionsReq.length); + } + + final Path keyPath = zipRoot.resolve(name + "/" + name + ".key"); + assertTrue(Files.exists(keyPath)); + PEMKeyPair key = readPrivateKey(keyPath, password); + assertNotNull(key); + } + public void testGeneratingSignedPemCertificates() throws Exception { Path tempDir = initTempDir(); Path outputFile = tempDir.resolve("out.zip"); @@ -939,19 +1036,6 @@ private int getDurationInDays(X509Certificate cert) { return (int) ChronoUnit.DAYS.between(cert.getNotBefore().toInstant(), cert.getNotAfter().toInstant()); } - private void assertSubjAltNames(Certificate certificate, String ip, String dns) throws Exception { - final X509CertificateHolder holder = new X509CertificateHolder(certificate.getEncoded()); - final GeneralNames names = GeneralNames.fromExtensions(holder.getExtensions(), Extension.subjectAlternativeName); - final CertificateInformation certInfo = new CertificateInformation( - "n", - "n", - Collections.singletonList(ip), - Collections.singletonList(dns), - Collections.emptyList() - ); - assertSubjAltNames(names, certInfo); - } - /** * Checks whether there are keys in {@code keyStore} that are trusted by {@code trustStore}. */ @@ -981,6 +1065,21 @@ private PKCS10CertificationRequest readCertificateRequest(Path path) throws Exce } } + private PEMKeyPair readPrivateKey(Path path, String password) throws Exception { + try (Reader reader = Files.newBufferedReader(path); PEMParser pemParser = new PEMParser(reader)) { + Object object = pemParser.readObject(); + if (password == null) { + assertThat(object, instanceOf(PEMKeyPair.class)); + return (PEMKeyPair) object; + } else { + assertThat(object, instanceOf(PEMEncryptedKeyPair.class)); + final PEMEncryptedKeyPair encryptedKeyPair = (PEMEncryptedKeyPair) object; + assertThat(encryptedKeyPair.getDekAlgName(), is("AES-128-CBC")); + return encryptedKeyPair.decryptKeyPair(new BcPEMDecryptorProvider(password.toCharArray())); + } + } + } + private X509Certificate readX509Certificate(InputStream input) throws Exception { List list = CertParsingUtils.readCertificates(input); assertEquals(1, list.size()); @@ -988,6 +1087,17 @@ private X509Certificate readX509Certificate(InputStream input) throws Exception return (X509Certificate) list.get(0); } + private void assertSubjAltNames(Certificate certificate, String ip, String dns) throws Exception { + final X509CertificateHolder holder = new X509CertificateHolder(certificate.getEncoded()); + final GeneralNames names = GeneralNames.fromExtensions(holder.getExtensions(), Extension.subjectAlternativeName); + assertSubjAltNames(names, Collections.singletonList(ip), Collections.singletonList(dns)); + } + + private void assertSubjAltNames(GeneralNames generalNames, List ip, List dns) throws Exception { + final CertificateInformation certInfo = new CertificateInformation("n", "n", ip, dns, Collections.emptyList()); + assertSubjAltNames(generalNames, certInfo); + } + private void assertSubjAltNames(GeneralNames subjAltNames, CertificateInformation certInfo) throws Exception { final int expectedCount = certInfo.ipAddresses.size() + certInfo.dnsNames.size() + certInfo.commonNames.size(); assertEquals(expectedCount, subjAltNames.getNames().length); diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityBwcRestIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityBwcRestIT.java index 7c26b8e386cc5..fee5129f8c9b8 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityBwcRestIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityBwcRestIT.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.remotecluster; -import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; @@ -41,7 +40,6 @@ /** * BWC test which ensures that users and API keys with defined {@code remote_indices} privileges can be used to query legacy remote clusters */ -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/104858") public class RemoteClusterSecurityBwcRestIT extends AbstractRemoteClusterSecurityTestCase { private static final Version OLD_CLUSTER_VERSION = Version.fromString(System.getProperty("tests.old_cluster_version")); diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityRCS2PainlessExecuteIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityRCS2PainlessExecuteIT.java index b24122c1302fc..2fca49191d51b 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityRCS2PainlessExecuteIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityRCS2PainlessExecuteIT.java @@ -179,6 +179,15 @@ public void testPainlessExecute() throws Exception { String responseBody = EntityUtils.toString(response.getEntity()); assertThat(responseBody, equalTo("{\"result\":[\"test\"]}")); } + { + // TEST CASE 2: Query remote cluster for index1 - should fail since no permissions granted for remote clusters yet + Request painlessExecuteRemote = createPainlessExecuteRequest("my_remote_cluster:index1"); + ResponseException exc = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(painlessExecuteRemote)); + assertThat(exc.getResponse().getStatusLine().getStatusCode(), is(403)); + String errorResponseBody = EntityUtils.toString(exc.getResponse().getEntity()); + assertThat(errorResponseBody, containsString("unauthorized for user [remote_search_user]")); + assertThat(errorResponseBody, containsString("\"type\":\"security_exception\"")); + } { // update role to have permissions to remote index* pattern var updateRoleRequest = new Request("PUT", "/_security/role/" + REMOTE_SEARCH_ROLE); @@ -202,7 +211,7 @@ public void testPainlessExecute() throws Exception { assertOK(adminClient().performRequest(updateRoleRequest)); } { - // TEST CASE 2: Query remote cluster for secretindex - should fail since no perms granted for it + // TEST CASE 3: Query remote cluster for secretindex - should fail since no perms granted for it Request painlessExecuteRemote = createPainlessExecuteRequest("my_remote_cluster:secretindex"); ResponseException exc = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(painlessExecuteRemote)); String errorResponseBody = EntityUtils.toString(exc.getResponse().getEntity()); @@ -212,7 +221,7 @@ public void testPainlessExecute() throws Exception { assertThat(errorResponseBody, containsString("\"type\":\"security_exception\"")); } { - // TEST CASE 3: Query remote cluster for index1 - should succeed since read and cross-cluster-read perms granted + // TEST CASE 4: Query remote cluster for index1 - should succeed since read and cross-cluster-read perms granted Request painlessExecuteRemote = createPainlessExecuteRequest("my_remote_cluster:index1"); Response response = performRequestWithRemoteSearchUser(painlessExecuteRemote); String responseBody = EntityUtils.toString(response.getEntity()); @@ -220,7 +229,7 @@ public void testPainlessExecute() throws Exception { assertThat(responseBody, equalTo("{\"result\":[\"test\"]}")); } { - // TEST CASE 4: Query local cluster for not_present index - should fail with 403 since role does not have perms for this index + // TEST CASE 5: Query local cluster for not_present index - should fail with 403 since role does not have perms for this index Request painlessExecuteLocal = createPainlessExecuteRequest("index_not_present"); ResponseException exc = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(painlessExecuteLocal)); assertThat(exc.getResponse().getStatusLine().getStatusCode(), is(403)); @@ -230,7 +239,7 @@ public void testPainlessExecute() throws Exception { assertThat(errorResponseBody, containsString("\"type\":\"security_exception\"")); } { - // TEST CASE 5: Query local cluster for my_local_123 index - role has perms for this pattern, but index does not exist, so 404 + // TEST CASE 6: Query local cluster for my_local_123 index - role has perms for this pattern, but index does not exist, so 404 Request painlessExecuteLocal = createPainlessExecuteRequest("my_local_123"); ResponseException exc = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(painlessExecuteLocal)); assertThat(exc.getResponse().getStatusLine().getStatusCode(), is(404)); @@ -238,7 +247,7 @@ public void testPainlessExecute() throws Exception { assertThat(errorResponseBody, containsString("\"type\":\"index_not_found_exception\"")); } { - // TEST CASE 6: Query local cluster for my_local* index - painless/execute does not allow wildcards, so fails with 400 + // TEST CASE 7: Query local cluster for my_local* index - painless/execute does not allow wildcards, so fails with 400 Request painlessExecuteLocal = createPainlessExecuteRequest("my_local*"); ResponseException exc = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(painlessExecuteLocal)); assertThat(exc.getResponse().getStatusLine().getStatusCode(), is(400)); @@ -247,7 +256,7 @@ public void testPainlessExecute() throws Exception { assertThat(errorResponseBody, containsString("\"type\":\"illegal_argument_exception\"")); } { - // TEST CASE 7: Query remote cluster for cluster that does not exist, and user does not have perms for that pattern - 403 ??? + // TEST CASE 8: Query remote cluster for cluster that does not exist, and user does not have perms for that pattern - 403 ??? Request painlessExecuteRemote = createPainlessExecuteRequest("my_remote_cluster:abc123"); ResponseException exc = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(painlessExecuteRemote)); assertThat(exc.getResponse().getStatusLine().getStatusCode(), is(403)); @@ -257,7 +266,7 @@ public void testPainlessExecute() throws Exception { assertThat(errorResponseBody, containsString("\"type\":\"security_exception\"")); } { - // TEST CASE 8: Query remote cluster for cluster that does not exist, but has permissions for the index pattern - 404 + // TEST CASE 9: Query remote cluster for cluster that does not exist, but has permissions for the index pattern - 404 Request painlessExecuteRemote = createPainlessExecuteRequest("my_remote_cluster:index123"); ResponseException exc = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(painlessExecuteRemote)); assertThat(exc.getResponse().getStatusLine().getStatusCode(), is(404)); @@ -265,7 +274,7 @@ public void testPainlessExecute() throws Exception { assertThat(errorResponseBody, containsString("\"type\":\"index_not_found_exception\"")); } { - // TEST CASE 9: Query remote cluster with wildcard in index - painless/execute does not allow wildcards, so fails with 400 + // TEST CASE 10: Query remote cluster with wildcard in index - painless/execute does not allow wildcards, so fails with 400 Request painlessExecuteRemote = createPainlessExecuteRequest("my_remote_cluster:index*"); ResponseException exc = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(painlessExecuteRemote)); assertThat(exc.getResponse().getStatusLine().getStatusCode(), is(400)); diff --git a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java index 2d743f562df8e..e65db8632062d 100644 --- a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java +++ b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java @@ -340,6 +340,7 @@ public class Constants { "cluster:monitor/nodes/info", "cluster:monitor/nodes/stats", "cluster:monitor/nodes/usage", + "cluster:monitor/allocation/stats", "cluster:monitor/profiling/status/get", "cluster:monitor/remote/info", "cluster:monitor/settings", @@ -348,6 +349,8 @@ public class Constants { "cluster:monitor/task", "cluster:monitor/task/get", "cluster:monitor/tasks/lists", + "cluster:monitor/text_structure/find_field_structure", + "cluster:monitor/text_structure/find_message_structure", "cluster:monitor/text_structure/findstructure", "cluster:monitor/text_structure/test_grok_pattern", "cluster:monitor/transform/get", diff --git a/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/SecuritySlowLogIT.java b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/SecuritySlowLogIT.java new file mode 100644 index 0000000000000..704799a45824c --- /dev/null +++ b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/SecuritySlowLogIT.java @@ -0,0 +1,392 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security; + +import org.apache.http.HttpHeaders; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.Strings; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.LogType; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xcontent.XContentType; +import org.junit.ClassRule; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Base64; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; + +import static org.hamcrest.collection.IsIn.in; +import static org.hamcrest.core.Every.everyItem; +import static org.hamcrest.core.IsNot.not; + +public class SecuritySlowLogIT extends ESRestTestCase { + + private record TestIndexData( + String name, + boolean searchSlowLogEnabled, + boolean indexSlowLogEnabled, + boolean searchSlowLogUserEnabled, + boolean indexSlowLogUserEnabled + ) {} + + private static int currentSearchLogIndex = 0; + private static int currentIndexLogIndex = 0; + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .nodes(1) + .distribution(DistributionType.DEFAULT) + .setting("xpack.security.enabled", "true") + .user("admin_user", "admin-password") + .user("api_user", "api-password", "superuser", false) + .build(); + + @Override + protected Settings restAdminSettings() { + String token = basicAuthHeaderValue("admin_user", new SecureString("admin-password".toCharArray())); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); + } + + @Override + protected Settings restClientSettings() { + String token = basicAuthHeaderValue("api_user", new SecureString("api-password".toCharArray())); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); + } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + public void testSlowLogWithApiUser() throws Exception { + List testIndices = randomTestIndexData(); + for (TestIndexData testData : testIndices) { + searchSomeData(testData.name); + indexSomeData(testData.name); + } + + Map expectedUser = Map.of("user.name", "api_user", "user.realm", "default_file", "auth.type", "REALM"); + + verifySearchSlowLogMatchesTestData(testIndices, expectedUser); + verifyIndexSlowLogMatchesTestData(testIndices, expectedUser); + } + + public void testSlowLogWithUserWithFullName() throws Exception { + List testIndices = randomTestIndexData(); + createUserWithFullName("full_name", "full-name-password", "Full Name", new String[] { "superuser" }); + for (TestIndexData testData : testIndices) { + final RequestOptions requestOptions = RequestOptions.DEFAULT.toBuilder() + .addHeader("Authorization", basicAuthHeaderValue("full_name", new SecureString("full-name-password".toCharArray()))) + .build(); + searchSomeData(testData.name, requestOptions); + indexSomeData(testData.name, requestOptions); + } + + Map expectedUser = Map.of( + "user.name", + "full_name", + "user.full_name", + "Full Name", + "user.realm", + "default_native", + "auth.type", + "REALM" + ); + + verifySearchSlowLogMatchesTestData(testIndices, expectedUser); + verifyIndexSlowLogMatchesTestData(testIndices, expectedUser); + } + + public void testSlowLogWithUserWithFullNameWithRunAs() throws Exception { + List testIndices = randomTestIndexData(); + createUserWithFullName("full_name", "full-name-password", "Full Name", new String[] { "superuser" }); + for (TestIndexData testData : testIndices) { + final RequestOptions requestOptions = RequestOptions.DEFAULT.toBuilder() + .addHeader("es-security-runas-user", "full_name") + .addHeader("Authorization", basicAuthHeaderValue("admin_user", new SecureString("admin-password".toCharArray()))) + .build(); + searchSomeData(testData.name, requestOptions); + indexSomeData(testData.name, requestOptions); + } + + Map expectedUser = Map.of( + "user.name", + "admin_user", + "user.effective.full_name", + "Full Name", + "user.realm", + "default_file", + "auth.type", + "REALM" + ); + + verifySearchSlowLogMatchesTestData(testIndices, expectedUser); + verifyIndexSlowLogMatchesTestData(testIndices, expectedUser); + } + + public void testSlowLogWithApiKey() throws Exception { + List testIndices = randomTestIndexData(); + String apiKeyName = randomAlphaOfLengthBetween(10, 15); + Map createApiKeyResponse = createApiKey( + apiKeyName, + basicAuthHeaderValue("admin_user", new SecureString("admin-password".toCharArray())) + ); + String apiKeyHeader = Base64.getEncoder() + .encodeToString( + (createApiKeyResponse.get("id") + ":" + createApiKeyResponse.get("api_key").toString()).getBytes(StandardCharsets.UTF_8) + ); + + for (TestIndexData testData : testIndices) { + final RequestOptions requestOptions = RequestOptions.DEFAULT.toBuilder() + .addHeader("Authorization", "ApiKey " + apiKeyHeader) + .build(); + searchSomeData(testData.name, requestOptions); + indexSomeData(testData.name, requestOptions); + } + + Map expectedUser = Map.of( + "user.name", + "admin_user", + "user.realm", + "_es_api_key", + "auth.type", + "API_KEY", + "apikey.id", + createApiKeyResponse.get("id"), + "apikey.name", + apiKeyName + ); + + verifySearchSlowLogMatchesTestData(testIndices, expectedUser); + verifyIndexSlowLogMatchesTestData(testIndices, expectedUser); + } + + public void testSlowLogWithRunAs() throws Exception { + List testIndices = randomTestIndexData(); + + for (TestIndexData testData : testIndices) { + final RequestOptions requestOptions = RequestOptions.DEFAULT.toBuilder() + .addHeader("es-security-runas-user", "api_user") + .addHeader("Authorization", basicAuthHeaderValue("admin_user", new SecureString("admin-password".toCharArray()))) + .build(); + searchSomeData(testData.name, requestOptions); + indexSomeData(testData.name, requestOptions); + } + + Map expectedUser = Map.of( + "user.name", + "admin_user", + "user.effective.name", + "api_user", + "user.realm", + "default_file", + "user.effective.realm", + "default_file", + "auth.type", + "REALM" + ); + + verifySearchSlowLogMatchesTestData(testIndices, expectedUser); + verifyIndexSlowLogMatchesTestData(testIndices, expectedUser); + } + + public void testSlowLogWithServiceAccount() throws Exception { + List testIndices = randomTestIndexData(); + Map createServiceAccountResponse = createServiceAccountToken(); + @SuppressWarnings("unchecked") + String tokenValue = ((Map) createServiceAccountResponse.get("token")).get("value").toString(); + + for (TestIndexData testData : testIndices) { + final RequestOptions requestOptions = RequestOptions.DEFAULT.toBuilder() + .addHeader("Authorization", "Bearer " + tokenValue) + .build(); + searchSomeData(testData.name, requestOptions); + indexSomeData(testData.name, requestOptions); + } + + Map expectedUser = Map.of( + "user.name", + "elastic/enterprise-search-server", + "user.realm", + "_service_account", + "auth.type", + "TOKEN" + ); + + verifySearchSlowLogMatchesTestData(testIndices, expectedUser); + verifyIndexSlowLogMatchesTestData(testIndices, expectedUser); + } + + private static void enableSearchSlowLog(String index, boolean includeUser) throws IOException { + final Request request = new Request("PUT", "/" + index + "/_settings"); + request.setJsonEntity( + "{\"index.search.slowlog.threshold.query." + + randomFrom("trace", "warn", "debug", "info") + + "\": \"0\", " + + "\"index.search.slowlog.include.user\": " + + includeUser + + "}" + ); + client().performRequest(request); + } + + private static void enableIndexingSlowLog(String index, boolean includeUser) throws IOException { + final Request request = new Request("PUT", "/" + index + "/_settings"); + request.setJsonEntity( + "{\"index.indexing.slowlog.threshold.index." + + randomFrom("trace", "warn", "debug", "info") + + "\": \"0\", " + + "\"index.indexing.slowlog.include.user\": " + + includeUser + + "}" + ); + client().performRequest(request); + } + + private static void indexSomeData(String index) throws IOException { + indexSomeData(index, RequestOptions.DEFAULT.toBuilder().build()); + } + + private static void searchSomeData(String index) throws IOException { + searchSomeData(index, RequestOptions.DEFAULT.toBuilder().build()); + } + + private static void indexSomeData(String index, RequestOptions requestOptions) throws IOException { + final Request request = new Request("PUT", "/" + index + "/_doc/1"); + request.setOptions(requestOptions); + request.setJsonEntity("{ \"foobar\" : true }"); + client().performRequest(request); + } + + private static void searchSomeData(String index, RequestOptions requestOptions) throws IOException { + Request request = new Request("GET", "/" + index + "/_search"); + request.setOptions(requestOptions); + client().performRequest(request); + } + + private static void setupTestIndex(TestIndexData testIndexData) throws IOException { + indexSomeData(testIndexData.name); + if (testIndexData.indexSlowLogEnabled) { + enableIndexingSlowLog(testIndexData.name, testIndexData.indexSlowLogUserEnabled); + } + if (testIndexData.searchSlowLogEnabled) { + enableSearchSlowLog(testIndexData.name, testIndexData.searchSlowLogUserEnabled); + } + } + + private static void createUserWithFullName(String user, String password, String fullName, String[] roles) throws IOException { + Request request = new Request("POST", "/_security/user/" + user); + request.setJsonEntity( + "{ \"full_name\" : \"" + + fullName + + "\", \"roles\": [\"" + + String.join("\",\"", roles) + + "\"], \"password\": \"" + + password + + "\" }" + ); + Response response = client().performRequest(request); + assertOK(response); + } + + private static List randomTestIndexData() throws IOException { + List testData = new ArrayList<>(); + for (int i = 0; i < randomIntBetween(1, 10); i++) { + TestIndexData randomTestData = new TestIndexData( + "search-" + randomAlphaOfLengthBetween(5, 10).toLowerCase() + "-" + i, + randomBoolean(), + randomBoolean(), + randomBoolean(), + randomBoolean() + ); + setupTestIndex(randomTestData); + testData.add(randomTestData); + } + return testData; + } + + private void verifySearchSlowLogMatchesTestData(List testIndices, Map expectedUserData) + throws Exception { + verifySlowLog(logLines -> { + for (TestIndexData testIndex : testIndices) { + if (testIndex.searchSlowLogEnabled) { + Map logLine = logLines.get(currentSearchLogIndex); + if (testIndex.searchSlowLogUserEnabled) { + assertThat(expectedUserData.entrySet(), everyItem(in(logLine.entrySet()))); + } else { + assertThat(expectedUserData.entrySet(), everyItem(not(in(logLine.entrySet())))); + } + currentSearchLogIndex++; + } + } + }, LogType.SEARCH_SLOW); + } + + private void verifyIndexSlowLogMatchesTestData(List testIndices, Map expectedUserData) throws Exception { + verifySlowLog(logLines -> { + for (TestIndexData testIndex : testIndices) { + if (testIndex.indexSlowLogEnabled) { + Map logLine = logLines.get(currentIndexLogIndex); + if (testIndex.indexSlowLogUserEnabled) { + assertThat(expectedUserData.entrySet(), everyItem(in(logLine.entrySet()))); + } else { + assertThat(expectedUserData.entrySet(), everyItem(not(in(logLine.entrySet())))); + } + currentIndexLogIndex++; + } + } + }, LogType.INDEXING_SLOW); + } + + private static void verifySlowLog(Consumer>> logVerifier, LogType logType) throws Exception { + assertBusy(() -> { + try (var slowLog = cluster.getNodeLog(0, logType)) { + final List lines = Streams.readAllLines(slowLog); + logVerifier.accept( + lines.stream().map(line -> XContentHelper.convertToMap(XContentType.JSON.xContent(), line, true)).toList() + ); + } + }, 5, TimeUnit.SECONDS); + } + + private static Map createApiKey(String name, String authHeader) throws IOException { + final Request request = new Request("POST", "/_security/api_key"); + + request.setJsonEntity(Strings.format(""" + {"name":"%s"}""", name)); + + request.setOptions(request.getOptions().toBuilder().addHeader(HttpHeaders.AUTHORIZATION, authHeader)); + final Response response = client().performRequest(request); + assertOK(response); + return responseAsMap(response); + } + + private static Map createServiceAccountToken() throws IOException { + final Request createServiceTokenRequest = new Request( + "POST", + "/_security/service/elastic/enterprise-search-server/credential/token" + ); + final Response createServiceTokenResponse = adminClient().performRequest(createServiceTokenRequest); + assertOK(createServiceTokenResponse); + + return responseAsMap(createServiceTokenResponse); + } +} diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/test/NativeRealmIntegTestCase.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/test/NativeRealmIntegTestCase.java index e28fead63d4ea..f4c3b77af3abe 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/test/NativeRealmIntegTestCase.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/test/NativeRealmIntegTestCase.java @@ -11,7 +11,7 @@ import org.elasticsearch.client.RestClient; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.transport.netty4.Netty4Transport; +import org.elasticsearch.transport.netty4.Netty4Plugin; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.user.APMSystemUser; import org.elasticsearch.xpack.core.security.user.BeatsSystemUser; @@ -63,7 +63,7 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { // we are randomly running a large number of nodes in these tests so we limit the number of worker threads // since the default of 2 * CPU count might use up too much direct memory for thread-local direct buffers for each node's // transport threads - builder.put(Netty4Transport.WORKER_COUNT.getKey(), random().nextInt(3) + 1); + builder.put(Netty4Plugin.WORKER_COUNT.getKey(), random().nextInt(3) + 1); return builder.build(); } diff --git a/x-pack/plugin/security/src/main/java/module-info.java b/x-pack/plugin/security/src/main/java/module-info.java index 4b99ab5ed6b2c..9806650f99094 100644 --- a/x-pack/plugin/security/src/main/java/module-info.java +++ b/x-pack/plugin/security/src/main/java/module-info.java @@ -65,8 +65,10 @@ exports org.elasticsearch.xpack.security.action.user to org.elasticsearch.server; exports org.elasticsearch.xpack.security.action.settings to org.elasticsearch.server; exports org.elasticsearch.xpack.security.operator to org.elasticsearch.internal.operator, org.elasticsearch.internal.security; - exports org.elasticsearch.xpack.security.authc to org.elasticsearch.xcontent; + exports org.elasticsearch.xpack.security.slowlog to org.elasticsearch.server; + + provides org.elasticsearch.index.SlowLogFieldProvider with org.elasticsearch.xpack.security.slowlog.SecuritySlowLogFieldProvider; provides org.elasticsearch.cli.CliToolProvider with diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 3beff69849a58..d3898cc510d77 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -119,13 +119,16 @@ import org.elasticsearch.xpack.core.security.action.ClearSecurityCacheAction; import org.elasticsearch.xpack.core.security.action.DelegatePkiAuthenticationAction; import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyRequestTranslator; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequestBuilderFactory; import org.elasticsearch.xpack.core.security.action.apikey.CreateCrossClusterApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.QueryApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.UpdateApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.UpdateApiKeyRequestTranslator; import org.elasticsearch.xpack.core.security.action.apikey.UpdateCrossClusterApiKeyAction; import org.elasticsearch.xpack.core.security.action.enrollment.KibanaEnrollmentAction; import org.elasticsearch.xpack.core.security.action.enrollment.NodeEnrollmentAction; @@ -172,15 +175,19 @@ import org.elasticsearch.xpack.core.security.action.user.GetUserPrivilegesAction; import org.elasticsearch.xpack.core.security.action.user.GetUsersAction; import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesAction; +import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesRequestBuilderFactory; import org.elasticsearch.xpack.core.security.action.user.ProfileHasPrivilegesAction; import org.elasticsearch.xpack.core.security.action.user.PutUserAction; +import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.AuthenticationFailureHandler; +import org.elasticsearch.xpack.core.security.authc.AuthenticationField; import org.elasticsearch.xpack.core.security.authc.AuthenticationServiceField; import org.elasticsearch.xpack.core.security.authc.DefaultAuthenticationFailureHandler; import org.elasticsearch.xpack.core.security.authc.InternalRealmsSettings; import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; +import org.elasticsearch.xpack.core.security.authc.Subject; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.authz.AuthorizationEngine; import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField; @@ -561,7 +568,11 @@ public class Security extends Plugin private final SetOnce scriptServiceReference = new SetOnce<>(); private final SetOnce operatorOnlyRegistry = new SetOnce<>(); private final SetOnce putRoleRequestBuilderFactory = new SetOnce<>(); + private final SetOnce createApiKeyRequestBuilderFactory = new SetOnce<>(); + private final SetOnce updateApiKeyRequestTranslator = new SetOnce<>(); + private final SetOnce bulkUpdateApiKeyRequestTranslator = new SetOnce<>(); private final SetOnce getBuiltinPrivilegesResponseTranslator = new SetOnce<>(); + private final SetOnce hasPrivilegesRequestBuilderFactory = new SetOnce<>(); private final SetOnce fileRolesStore = new SetOnce<>(); private final SetOnce operatorPrivilegesService = new SetOnce<>(); private final SetOnce reservedRoleMappingAction = new SetOnce<>(); @@ -821,10 +832,21 @@ Collection createComponents( if (putRoleRequestBuilderFactory.get() == null) { putRoleRequestBuilderFactory.set(new PutRoleRequestBuilderFactory.Default()); } - + if (createApiKeyRequestBuilderFactory.get() == null) { + createApiKeyRequestBuilderFactory.set(new CreateApiKeyRequestBuilderFactory.Default()); + } if (getBuiltinPrivilegesResponseTranslator.get() == null) { getBuiltinPrivilegesResponseTranslator.set(new GetBuiltinPrivilegesResponseTranslator.Default()); } + if (updateApiKeyRequestTranslator.get() == null) { + updateApiKeyRequestTranslator.set(new UpdateApiKeyRequestTranslator.Default()); + } + if (bulkUpdateApiKeyRequestTranslator.get() == null) { + bulkUpdateApiKeyRequestTranslator.set(new BulkUpdateApiKeyRequestTranslator.Default()); + } + if (hasPrivilegesRequestBuilderFactory.get() == null) { + hasPrivilegesRequestBuilderFactory.trySet(new HasPrivilegesRequestBuilderFactory.Default()); + } final Map, ActionListener>>> customRoleProviders = new LinkedHashMap<>(); for (SecurityExtension extension : securityExtensions) { @@ -1435,7 +1457,7 @@ public List getRestHandlers( new RestDeleteRoleAction(settings, getLicenseState()), new RestChangePasswordAction(settings, securityContext.get(), getLicenseState()), new RestSetEnabledAction(settings, getLicenseState()), - new RestHasPrivilegesAction(settings, securityContext.get(), getLicenseState()), + new RestHasPrivilegesAction(settings, securityContext.get(), getLicenseState(), hasPrivilegesRequestBuilderFactory.get()), new RestGetUserPrivilegesAction(settings, securityContext.get(), getLicenseState()), new RestGetRoleMappingsAction(settings, getLicenseState()), new RestPutRoleMappingAction(settings, getLicenseState()), @@ -1456,10 +1478,10 @@ public List getRestHandlers( new RestGetPrivilegesAction(settings, getLicenseState()), new RestPutPrivilegesAction(settings, getLicenseState()), new RestDeletePrivilegesAction(settings, getLicenseState()), - new RestCreateApiKeyAction(settings, getLicenseState()), + new RestCreateApiKeyAction(settings, getLicenseState(), createApiKeyRequestBuilderFactory.get()), new RestCreateCrossClusterApiKeyAction(settings, getLicenseState()), - new RestUpdateApiKeyAction(settings, getLicenseState()), - new RestBulkUpdateApiKeyAction(settings, getLicenseState()), + new RestUpdateApiKeyAction(settings, getLicenseState(), updateApiKeyRequestTranslator.get()), + new RestBulkUpdateApiKeyAction(settings, getLicenseState(), bulkUpdateApiKeyRequestTranslator.get()), new RestUpdateCrossClusterApiKeyAction(settings, getLicenseState()), new RestGrantApiKeyAction(settings, getLicenseState()), new RestInvalidateApiKeyAction(settings, getLicenseState()), @@ -2003,6 +2025,37 @@ private void reloadRemoteClusterCredentials(Settings settingsWithKeystore) { future.actionGet(); } + public Map getAuthContextForSlowLog() { + if (this.securityContext.get() != null && this.securityContext.get().getAuthentication() != null) { + Authentication authentication = this.securityContext.get().getAuthentication(); + Subject authenticatingSubject = authentication.getAuthenticatingSubject(); + Subject effetctiveSubject = authentication.getEffectiveSubject(); + Map authContext = new HashMap<>(); + if (authenticatingSubject.getUser() != null) { + authContext.put("user.name", authenticatingSubject.getUser().principal()); + authContext.put("user.realm", authenticatingSubject.getRealm().getName()); + if (authenticatingSubject.getUser().fullName() != null) { + authContext.put("user.full_name", authenticatingSubject.getUser().fullName()); + } + } + // Only include effective user if different from authenticating user (run-as) + if (effetctiveSubject.getUser() != null && effetctiveSubject.equals(authenticatingSubject) == false) { + authContext.put("user.effective.name", effetctiveSubject.getUser().principal()); + authContext.put("user.effective.realm", effetctiveSubject.getRealm().getName()); + if (effetctiveSubject.getUser().fullName() != null) { + authContext.put("user.effective.full_name", effetctiveSubject.getUser().fullName()); + } + } + authContext.put("auth.type", authentication.getAuthenticationType().name()); + if (authentication.isApiKey()) { + authContext.put("apikey.id", authenticatingSubject.getMetadata().get(AuthenticationField.API_KEY_ID_KEY).toString()); + authContext.put("apikey.name", authenticatingSubject.getMetadata().get(AuthenticationField.API_KEY_NAME_KEY).toString()); + } + return authContext; + } + return Map.of(); + } + static final class ValidateLicenseForFIPS implements BiConsumer { private final boolean inFipsMode; private final LicenseService licenseService; @@ -2039,6 +2092,10 @@ public void loadExtensions(ExtensionLoader loader) { loadSingletonExtensionAndSetOnce(loader, operatorOnlyRegistry, OperatorOnlyRegistry.class); loadSingletonExtensionAndSetOnce(loader, putRoleRequestBuilderFactory, PutRoleRequestBuilderFactory.class); loadSingletonExtensionAndSetOnce(loader, getBuiltinPrivilegesResponseTranslator, GetBuiltinPrivilegesResponseTranslator.class); + loadSingletonExtensionAndSetOnce(loader, updateApiKeyRequestTranslator, UpdateApiKeyRequestTranslator.class); + loadSingletonExtensionAndSetOnce(loader, bulkUpdateApiKeyRequestTranslator, BulkUpdateApiKeyRequestTranslator.class); + loadSingletonExtensionAndSetOnce(loader, createApiKeyRequestBuilderFactory, CreateApiKeyRequestBuilderFactory.class); + loadSingletonExtensionAndSetOnce(loader, hasPrivilegesRequestBuilderFactory, HasPrivilegesRequestBuilderFactory.class); } private void loadSingletonExtensionAndSetOnce(ExtensionLoader loader, SetOnce setOnce, Class clazz) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheck.java index c6396f886b4bc..2d535100d468d 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheck.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheck.java @@ -34,7 +34,8 @@ public BootstrapCheckResult check(BootstrapContext context) { } if (licenseService instanceof ClusterStateLicenseService clusterStateLicenseService) { final License license = clusterStateLicenseService.getLicense(context.metadata()); - final Version lastKnownVersion = nodeMetadata.previousNodeVersion(); + // TODO[wrb]: Add an "isCurrentMajor" method to BuildVersion? + final Version lastKnownVersion = nodeMetadata.previousNodeVersion().toVersion(); // pre v7.2.0 nodes have Version.EMPTY and its id is 0, so Version#before handles this successfully if (lastKnownVersion.before(Version.V_8_0_0) && XPackSettings.SECURITY_ENABLED.exists(context.settings()) == false diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/service/TransportGetServiceAccountAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/service/TransportGetServiceAccountAction.java index f8a4a8a449f83..372a550eedbc9 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/service/TransportGetServiceAccountAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/service/TransportGetServiceAccountAction.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.Predicates; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.security.action.service.GetServiceAccountAction; @@ -38,7 +39,7 @@ public TransportGetServiceAccountAction(TransportService transportService, Actio @Override protected void doExecute(Task task, GetServiceAccountRequest request, ActionListener listener) { - Predicate filter = v -> true; + Predicate filter = Predicates.always(); if (request.getNamespace() != null) { filter = filter.and(v -> v.id().namespace().equals(request.getNamespace())); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java index 87c372f561757..01104806c4a1c 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Predicates; import org.elasticsearch.core.TimeValue; import org.elasticsearch.http.HttpPreRequest; import org.elasticsearch.node.Node; @@ -1908,7 +1909,7 @@ Predicate ignorePredicate() { } private static Predicate buildIgnorePredicate(Map policyMap) { - return policyMap.values().stream().map(EventFilterPolicy::ignorePredicate).reduce(x -> false, (x, y) -> x.or(y)); + return policyMap.values().stream().map(EventFilterPolicy::ignorePredicate).reduce(Predicates.never(), Predicate::or); } @Override diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/service/FileTokensTool.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/service/FileTokensTool.java index 51adcab5c3c13..14ca1663e16a5 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/service/FileTokensTool.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/service/FileTokensTool.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.cli.EnvironmentAwareCommand; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Predicates; import org.elasticsearch.env.Environment; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.security.authc.support.Hasher; @@ -132,7 +133,7 @@ public void execute(Terminal terminal, OptionSet options, Environment env, Proce + "]" ); } - Predicate filter = k -> true; + Predicate filter = Predicates.always(); if (args.size() == 1) { final String principal = args.get(0); if (false == ServiceAccountService.isServiceAccountPrincipal(principal)) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/enrollment/ExternalEnrollmentTokenGenerator.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/enrollment/ExternalEnrollmentTokenGenerator.java index c884435cdd04b..6d780adf49acb 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/enrollment/ExternalEnrollmentTokenGenerator.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/enrollment/ExternalEnrollmentTokenGenerator.java @@ -72,7 +72,7 @@ protected EnrollmentToken create(String user, SecureString password, String acti final String fingerprint = getHttpsCaFingerprint(sslService); final String apiKey = getApiKeyCredentials(user, password, action, baseUrl); final Tuple, String> httpInfo = getNodeInfo(user, password, baseUrl); - return new EnrollmentToken(apiKey, fingerprint, httpInfo.v2(), httpInfo.v1()); + return new EnrollmentToken(apiKey, fingerprint, httpInfo.v1()); } private static HttpResponse.HttpResponseBuilder responseBuilder(InputStream is) throws IOException { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGenerator.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGenerator.java index 446dfa7e7e310..ff973ce4319f6 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGenerator.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGenerator.java @@ -9,7 +9,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoMetrics; @@ -198,7 +197,7 @@ private void assembleToken(EnrollmentTokenType enrollTokenType, HttpInfo httpInf apiKeyRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); client.execute(CreateApiKeyAction.INSTANCE, apiKeyRequest, ActionListener.wrap(createApiKeyResponse -> { final String apiKey = createApiKeyResponse.getId() + ":" + createApiKeyResponse.getKey().toString(); - final EnrollmentToken enrollmentToken = new EnrollmentToken(apiKey, fingerprint, Version.CURRENT.toString(), tokenAddresses); + final EnrollmentToken enrollmentToken = new EnrollmentToken(apiKey, fingerprint, tokenAddresses); consumer.accept(enrollmentToken); }, e -> { LOGGER.error("Failed to create enrollment token when generating API key", e); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestBulkUpdateApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestBulkUpdateApiKeyAction.java index 584ad08704ddd..97ee7cc50a7d5 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestBulkUpdateApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestBulkUpdateApiKeyAction.java @@ -9,53 +9,32 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyRequest; -import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyRequestTranslator; import java.io.IOException; import java.util.List; -import java.util.Map; import static org.elasticsearch.rest.RestRequest.Method.POST; -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; -import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; @ServerlessScope(Scope.PUBLIC) public final class RestBulkUpdateApiKeyAction extends ApiKeyBaseRestHandler { - @SuppressWarnings("unchecked") - static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "bulk_update_api_key_request", - a -> new BulkUpdateApiKeyRequest( - (List) a[0], - (List) a[1], - (Map) a[2], - TimeValue.parseTimeValue((String) a[3], null, "expiration") - ) - ); + private final BulkUpdateApiKeyRequestTranslator requestTranslator; - static { - PARSER.declareStringArray(constructorArg(), new ParseField("ids")); - PARSER.declareNamedObjects(optionalConstructorArg(), (p, c, n) -> { - p.nextToken(); - return RoleDescriptor.parse(n, p, false); - }, new ParseField("role_descriptors")); - PARSER.declareObject(optionalConstructorArg(), (p, c) -> p.map(), new ParseField("metadata")); - PARSER.declareString(optionalConstructorArg(), new ParseField("expiration")); - } - - public RestBulkUpdateApiKeyAction(final Settings settings, final XPackLicenseState licenseState) { + public RestBulkUpdateApiKeyAction( + final Settings settings, + final XPackLicenseState licenseState, + final BulkUpdateApiKeyRequestTranslator requestTranslator + ) { super(settings, licenseState); + this.requestTranslator = requestTranslator; } @Override @@ -70,9 +49,7 @@ public String getName() { @Override protected RestChannelConsumer innerPrepareRequest(final RestRequest request, final NodeClient client) throws IOException { - try (XContentParser parser = request.contentParser()) { - final BulkUpdateApiKeyRequest parsed = PARSER.parse(parser, null); - return channel -> client.execute(BulkUpdateApiKeyAction.INSTANCE, parsed, new RestToXContentListener<>(channel)); - } + final BulkUpdateApiKeyRequest bulkUpdateApiKeyRequest = requestTranslator.translate(request); + return channel -> client.execute(BulkUpdateApiKeyAction.INSTANCE, bulkUpdateApiKeyRequest, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyAction.java index 2cb5a15f1e0f2..217afdb3cfea2 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyAction.java @@ -16,6 +16,7 @@ import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequestBuilder; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequestBuilderFactory; import org.elasticsearch.xpack.security.authc.ApiKeyService; import java.io.IOException; @@ -30,13 +31,16 @@ @ServerlessScope(Scope.PUBLIC) public final class RestCreateApiKeyAction extends ApiKeyBaseRestHandler { + private final CreateApiKeyRequestBuilderFactory builderFactory; + /** - * @param settings the node's settings - * @param licenseState the license state that will be used to determine if - * security is licensed + * @param settings the node's settings + * @param licenseState the license state that will be used to determine if + * security is licensed */ - public RestCreateApiKeyAction(Settings settings, XPackLicenseState licenseState) { + public RestCreateApiKeyAction(Settings settings, XPackLicenseState licenseState, CreateApiKeyRequestBuilderFactory builderFactory) { super(settings, licenseState); + this.builderFactory = builderFactory; } @Override @@ -51,10 +55,8 @@ public String getName() { @Override protected RestChannelConsumer innerPrepareRequest(final RestRequest request, final NodeClient client) throws IOException { - CreateApiKeyRequestBuilder builder = new CreateApiKeyRequestBuilder(client).source( - request.requiredContent(), - request.getXContentType() - ); + CreateApiKeyRequestBuilder builder = builderFactory.create(client, request.hasParam(RestRequest.PATH_RESTRICTED)) + .source(request.requiredContent(), request.getXContentType()); String refresh = request.param("refresh"); if (refresh != null) { builder.setRefreshPolicy(WriteRequest.RefreshPolicy.parse(request.param("refresh"))); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateApiKeyAction.java index d64e7f4007387..0fe0f3df0715f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateApiKeyAction.java @@ -9,49 +9,31 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xpack.core.security.action.apikey.UpdateApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.UpdateApiKeyRequest; -import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.action.apikey.UpdateApiKeyRequestTranslator; import java.io.IOException; import java.util.List; -import java.util.Map; import static org.elasticsearch.rest.RestRequest.Method.PUT; -import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; @ServerlessScope(Scope.PUBLIC) public final class RestUpdateApiKeyAction extends ApiKeyBaseRestHandler { + private final UpdateApiKeyRequestTranslator requestTranslator; - @SuppressWarnings("unchecked") - static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "update_api_key_request_payload", - a -> new Payload( - (List) a[0], - (Map) a[1], - TimeValue.parseTimeValue((String) a[2], null, "expiration") - ) - ); - - static { - PARSER.declareNamedObjects(optionalConstructorArg(), (p, c, n) -> { - p.nextToken(); - return RoleDescriptor.parse(n, p, false); - }, new ParseField("role_descriptors")); - PARSER.declareObject(optionalConstructorArg(), (p, c) -> p.map(), new ParseField("metadata")); - PARSER.declareString(optionalConstructorArg(), new ParseField("expiration")); - } - - public RestUpdateApiKeyAction(final Settings settings, final XPackLicenseState licenseState) { + public RestUpdateApiKeyAction( + final Settings settings, + final XPackLicenseState licenseState, + final UpdateApiKeyRequestTranslator requestTranslator + ) { super(settings, licenseState); + this.requestTranslator = requestTranslator; } @Override @@ -66,17 +48,8 @@ public String getName() { @Override protected RestChannelConsumer innerPrepareRequest(final RestRequest request, final NodeClient client) throws IOException { - // Note that we use `ids` here even though we only support a single id. This is because this route shares a path prefix with - // `RestClearApiKeyCacheAction` and our current REST implementation requires that path params have the same wildcard if their paths - // share a prefix - final var apiKeyId = request.param("ids"); - final var payload = request.hasContent() == false ? new Payload(null, null, null) : PARSER.parse(request.contentParser(), null); - return channel -> client.execute( - UpdateApiKeyAction.INSTANCE, - new UpdateApiKeyRequest(apiKeyId, payload.roleDescriptors, payload.metadata, payload.expiration), - new RestToXContentListener<>(channel) - ); + final UpdateApiKeyRequest updateApiKeyRequest = requestTranslator.translate(request); + return channel -> client.execute(UpdateApiKeyAction.INSTANCE, updateApiKeyRequest, new RestToXContentListener<>(channel)); } - record Payload(List roleDescriptors, Map metadata, TimeValue expiration) {} } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestHasPrivilegesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestHasPrivilegesAction.java index d3a969fce8841..5c9d68d3c8b66 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestHasPrivilegesAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestHasPrivilegesAction.java @@ -24,6 +24,7 @@ import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.security.SecurityContext; import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesRequestBuilder; +import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesRequestBuilderFactory; import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.user.User; @@ -43,10 +44,17 @@ public class RestHasPrivilegesAction extends SecurityBaseRestHandler { private final SecurityContext securityContext; + private final HasPrivilegesRequestBuilderFactory builderFactory; - public RestHasPrivilegesAction(Settings settings, SecurityContext securityContext, XPackLicenseState licenseState) { + public RestHasPrivilegesAction( + Settings settings, + SecurityContext securityContext, + XPackLicenseState licenseState, + HasPrivilegesRequestBuilderFactory builderFactory + ) { super(settings, licenseState); this.securityContext = securityContext; + this.builderFactory = builderFactory; } @Override @@ -83,7 +91,8 @@ public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient c if (username == null) { return restChannel -> { throw new ElasticsearchSecurityException("there is no authenticated user"); }; } - HasPrivilegesRequestBuilder requestBuilder = new HasPrivilegesRequestBuilder(client).source(username, content.v2(), content.v1()); + HasPrivilegesRequestBuilder requestBuilder = builderFactory.create(client, request.hasParam(RestRequest.PATH_RESTRICTED)) + .source(username, content.v2(), content.v1()); return channel -> requestBuilder.execute(new RestBuilderListener<>(channel) { @Override public RestResponse buildResponse(HasPrivilegesResponse response, XContentBuilder builder) throws Exception { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/slowlog/SecuritySlowLogFieldProvider.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/slowlog/SecuritySlowLogFieldProvider.java new file mode 100644 index 0000000000000..1610aedd1d363 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/slowlog/SecuritySlowLogFieldProvider.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.slowlog; + +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.SlowLogFieldProvider; +import org.elasticsearch.xpack.security.Security; + +import java.util.Map; + +import static org.elasticsearch.index.IndexingSlowLog.INDEX_INDEXING_SLOWLOG_INCLUDE_USER_SETTING; +import static org.elasticsearch.index.SearchSlowLog.INDEX_SEARCH_SLOWLOG_INCLUDE_USER_SETTING; + +public class SecuritySlowLogFieldProvider implements SlowLogFieldProvider { + private final Security plugin; + private boolean includeUserInIndexing = false; + private boolean includeUserInSearch = false; + + public SecuritySlowLogFieldProvider() { + throw new IllegalStateException("Provider must be constructed using PluginsService"); + } + + public SecuritySlowLogFieldProvider(Security plugin) { + this.plugin = plugin; + } + + @Override + public void init(IndexSettings indexSettings) { + indexSettings.getScopedSettings() + .addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_INCLUDE_USER_SETTING, newValue -> this.includeUserInSearch = newValue); + this.includeUserInSearch = indexSettings.getValue(INDEX_SEARCH_SLOWLOG_INCLUDE_USER_SETTING); + indexSettings.getScopedSettings() + .addSettingsUpdateConsumer(INDEX_INDEXING_SLOWLOG_INCLUDE_USER_SETTING, newValue -> this.includeUserInIndexing = newValue); + this.includeUserInIndexing = indexSettings.getValue(INDEX_INDEXING_SLOWLOG_INCLUDE_USER_SETTING); + } + + @Override + public Map indexSlowLogFields() { + if (includeUserInIndexing) { + return plugin.getAuthContextForSlowLog(); + } + return Map.of(); + } + + @Override + public Map searchSlowLogFields() { + if (includeUserInSearch) { + return plugin.getAuthContextForSlowLog(); + } + return Map.of(); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java index 162cabf5297ce..ca08f63a09bb0 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java @@ -47,6 +47,7 @@ import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.security.Security; +import org.elasticsearch.xpack.security.action.SecurityActionMapper; import org.elasticsearch.xpack.security.audit.AuditUtil; import org.elasticsearch.xpack.security.authc.ApiKeyService; import org.elasticsearch.xpack.security.authc.AuthenticationService; @@ -385,7 +386,11 @@ private void sendWithCrossClusterAccessHeaders( ) ); if (roleDescriptorsIntersection.isEmpty()) { - throw authzService.remoteActionDenied(authentication, action, remoteClusterAlias); + throw authzService.remoteActionDenied( + authentication, + SecurityActionMapper.action(action, request), + remoteClusterAlias + ); } final var crossClusterAccessHeaders = new CrossClusterAccessHeaders( remoteClusterCredentials.credentials(), diff --git a/x-pack/plugin/security/src/main/resources/META-INF/services/org.elasticsearch.index.SlowLogFieldProvider b/x-pack/plugin/security/src/main/resources/META-INF/services/org.elasticsearch.index.SlowLogFieldProvider new file mode 100644 index 0000000000000..41f0ec83ac3f1 --- /dev/null +++ b/x-pack/plugin/security/src/main/resources/META-INF/services/org.elasticsearch.index.SlowLogFieldProvider @@ -0,0 +1,8 @@ +# +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License +# 2.0; you may not use this file except in compliance with the Elastic License +# 2.0. +# + +org.elasticsearch.xpack.security.slowlog.SecuritySlowLogFieldProvider diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheckTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheckTests.java index 413358f784dea..6777c38b809e0 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheckTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheckTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.env.BuildVersion; import org.elasticsearch.env.NodeMetadata; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.license.ClusterStateLicenseService; @@ -32,9 +33,11 @@ public class SecurityImplicitBehaviorBootstrapCheckTests extends AbstractBootstrapCheckTestCase { public void testFailureUpgradeFrom7xWithImplicitSecuritySettings() throws Exception { - final Version previousVersion = randomValueOtherThan( - Version.V_8_0_0, - () -> VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.V_8_0_0) + final BuildVersion previousVersion = toBuildVersion( + randomValueOtherThan( + Version.V_8_0_0, + () -> VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.V_8_0_0) + ) ); NodeMetadata nodeMetadata = new NodeMetadata(randomAlphaOfLength(10), previousVersion, IndexVersion.current()); nodeMetadata = nodeMetadata.upgradeToCurrentVersion(); @@ -67,9 +70,11 @@ public void testFailureUpgradeFrom7xWithImplicitSecuritySettings() throws Except } public void testUpgradeFrom7xWithImplicitSecuritySettingsOnGoldPlus() throws Exception { - final Version previousVersion = randomValueOtherThan( - Version.V_8_0_0, - () -> VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.V_8_0_0) + final BuildVersion previousVersion = toBuildVersion( + randomValueOtherThan( + Version.V_8_0_0, + () -> VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.V_8_0_0) + ) ); NodeMetadata nodeMetadata = new NodeMetadata(randomAlphaOfLength(10), previousVersion, IndexVersion.current()); nodeMetadata = nodeMetadata.upgradeToCurrentVersion(); @@ -88,9 +93,11 @@ public void testUpgradeFrom7xWithImplicitSecuritySettingsOnGoldPlus() throws Exc } public void testUpgradeFrom7xWithExplicitSecuritySettings() throws Exception { - final Version previousVersion = randomValueOtherThan( - Version.V_8_0_0, - () -> VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.V_8_0_0) + final BuildVersion previousVersion = toBuildVersion( + randomValueOtherThan( + Version.V_8_0_0, + () -> VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.V_8_0_0) + ) ); NodeMetadata nodeMetadata = new NodeMetadata(randomAlphaOfLength(10), previousVersion, IndexVersion.current()); nodeMetadata = nodeMetadata.upgradeToCurrentVersion(); @@ -105,7 +112,7 @@ public void testUpgradeFrom7xWithExplicitSecuritySettings() throws Exception { } public void testUpgradeFrom8xWithImplicitSecuritySettings() throws Exception { - final Version previousVersion = VersionUtils.randomVersionBetween(random(), Version.V_8_0_0, null); + final BuildVersion previousVersion = toBuildVersion(VersionUtils.randomVersionBetween(random(), Version.V_8_0_0, null)); NodeMetadata nodeMetadata = new NodeMetadata(randomAlphaOfLength(10), previousVersion, IndexVersion.current()); nodeMetadata = nodeMetadata.upgradeToCurrentVersion(); ClusterStateLicenseService licenseService = mock(ClusterStateLicenseService.class); @@ -119,7 +126,7 @@ public void testUpgradeFrom8xWithImplicitSecuritySettings() throws Exception { } public void testUpgradeFrom8xWithExplicitSecuritySettings() throws Exception { - final Version previousVersion = VersionUtils.randomVersionBetween(random(), Version.V_8_0_0, null); + final BuildVersion previousVersion = toBuildVersion(VersionUtils.randomVersionBetween(random(), Version.V_8_0_0, null)); NodeMetadata nodeMetadata = new NodeMetadata(randomAlphaOfLength(10), previousVersion, IndexVersion.current()); nodeMetadata = nodeMetadata.upgradeToCurrentVersion(); ClusterStateLicenseService licenseService = mock(ClusterStateLicenseService.class); @@ -136,4 +143,8 @@ private Metadata createLicensesMetadata(TrialLicenseVersion era, String licenseM License license = TestUtils.generateSignedLicense(licenseMode, TimeValue.timeValueHours(2)); return Metadata.builder().putCustom(LicensesMetadata.TYPE, new LicensesMetadata(license, era)).build(); } + + private static BuildVersion toBuildVersion(Version version) { + return BuildVersion.fromVersionId(version.id()); + } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java index 4aefc436d82f5..66b03e8dedd32 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -11,7 +11,6 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchSecurityException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionModule; import org.elasticsearch.action.ActionResponse; @@ -33,6 +32,7 @@ import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.env.BuildVersion; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeMetadata; import org.elasticsearch.env.TestEnvironment; @@ -41,6 +41,7 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; +import org.elasticsearch.index.SlowLogFieldProvider; import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.engine.InternalEngineFactory; import org.elasticsearch.indices.TestIndexNameExpressionResolver; @@ -202,7 +203,7 @@ protected SSLService getSslService() { private Collection createComponentsUtil(Settings settings) throws Exception { Environment env = TestEnvironment.newEnvironment(settings); - NodeMetadata nodeMetadata = new NodeMetadata(randomAlphaOfLength(8), Version.CURRENT, IndexVersion.current()); + NodeMetadata nodeMetadata = new NodeMetadata(randomAlphaOfLength(8), BuildVersion.current(), IndexVersion.current()); ThreadPool threadPool = mock(ThreadPool.class); ClusterService clusterService = mock(ClusterService.class); settings = Security.additionalSettings(settings, true); @@ -373,7 +374,8 @@ public void testOnIndexModuleIsNoOpWithSecurityDisabled() throws Exception { Collections.emptyMap(), () -> true, TestIndexNameExpressionResolver.newInstance(threadPool.getThreadContext()), - Collections.emptyMap() + Collections.emptyMap(), + mock(SlowLogFieldProvider.class) ); security.onIndexModule(indexModule); // indexReaderWrapper is a SetOnce so if Security#onIndexModule had already set an ReaderWrapper we would get an exception here diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/ExternalEnrollmentTokenGeneratorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/ExternalEnrollmentTokenGeneratorTests.java index 669f67d80c1f8..e46c05d9c9683 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/ExternalEnrollmentTokenGeneratorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/ExternalEnrollmentTokenGeneratorTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.security.CommandLineHttpClient; +import org.elasticsearch.xpack.core.security.EnrollmentToken; import org.elasticsearch.xpack.core.security.HttpResponse; import org.elasticsearch.xpack.core.security.user.ElasticUser; import org.hamcrest.Matchers; @@ -152,7 +153,7 @@ public void testCreateSuccess() throws Exception { ).getEncoded(); Map infoNode = getDecoded(tokenNode); - assertEquals("8.0.0", infoNode.get("ver")); + assertEquals(EnrollmentToken.CURRENT_TOKEN_VERSION, infoNode.get("ver")); assertEquals("[192.168.0.1:9201, 172.16.254.1:9202, [2001:db8:0:1234:0:567:8:1]:9203]", infoNode.get("adr")); assertEquals("ce480d53728605674fcfd8ffb51000d8a33bf32de7c7f1e26b4d428f8a91362d", infoNode.get("fgr")); assertEquals("DR6CzXkBDf8amV_48yYX:x3YqU_rqQwm-ESrkExcnOg", infoNode.get("key")); @@ -164,7 +165,7 @@ public void testCreateSuccess() throws Exception { ).getEncoded(); Map infoKibana = getDecoded(tokenKibana); - assertEquals("8.0.0", infoKibana.get("ver")); + assertEquals(EnrollmentToken.CURRENT_TOKEN_VERSION, infoKibana.get("ver")); assertEquals("[192.168.0.1:9201, 172.16.254.1:9202, [2001:db8:0:1234:0:567:8:1]:9203]", infoKibana.get("adr")); assertEquals("ce480d53728605674fcfd8ffb51000d8a33bf32de7c7f1e26b4d428f8a91362d", infoKibana.get("fgr")); assertEquals("DR6CzXkBDf8amV_48yYX:x3YqU_rqQwm-ESrkExcnOg", infoKibana.get("key")); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGeneratorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGeneratorTests.java index 3a4e5a404eace..888483613a187 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGeneratorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGeneratorTests.java @@ -9,7 +9,6 @@ import org.elasticsearch.Build; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; @@ -155,7 +154,7 @@ public void testCreationSuccess() { assertThat(token.getApiKey(), equalTo("api-key-id:api-key-secret")); assertThat(token.getBoundAddress().size(), equalTo(1)); assertThat(token.getBoundAddress().get(0), equalTo("192.168.1.2:9200")); - assertThat(token.getVersion(), equalTo(Version.CURRENT.toString())); + assertThat(token.getVersion(), equalTo(EnrollmentToken.CURRENT_TOKEN_VERSION)); assertThat(token.getFingerprint(), equalTo("ce480d53728605674fcfd8ffb51000d8a33bf32de7c7f1e26b4d428f8a91362d")); } @@ -209,7 +208,7 @@ public void testRetryToGetNodesHttpInfo() { assertThat(token.getApiKey(), equalTo("api-key-id:api-key-secret")); assertThat(token.getBoundAddress().size(), equalTo(1)); assertThat(token.getBoundAddress().get(0), equalTo("192.168.1.2:9200")); - assertThat(token.getVersion(), equalTo(Version.CURRENT.toString())); + assertThat(token.getVersion(), equalTo(EnrollmentToken.CURRENT_TOKEN_VERSION)); assertThat(token.getFingerprint(), equalTo("ce480d53728605674fcfd8ffb51000d8a33bf32de7c7f1e26b4d428f8a91362d")); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java index 0ab9533e62d4c..d487eab9f7887 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.security.action.apikey.ApiKey; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequestBuilderFactory; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyResponse; import java.time.Duration; @@ -105,7 +106,11 @@ public void doE } } }; - final RestCreateApiKeyAction restCreateApiKeyAction = new RestCreateApiKeyAction(Settings.EMPTY, mockLicenseState); + final RestCreateApiKeyAction restCreateApiKeyAction = new RestCreateApiKeyAction( + Settings.EMPTY, + mockLicenseState, + new CreateApiKeyRequestBuilderFactory.Default() + ); restCreateApiKeyAction.handleRequest(restRequest, restChannel, client); final RestResponse restResponse = responseSetOnce.get(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateApiKeyActionTests.java index eb0b7bea1a5fb..c349ad57a486c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateApiKeyActionTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.security.action.apikey.UpdateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.UpdateApiKeyRequestTranslator; import org.elasticsearch.xpack.core.security.action.apikey.UpdateApiKeyResponse; import org.junit.Before; @@ -34,7 +35,7 @@ public void init() { final Settings settings = Settings.builder().put(XPackSettings.SECURITY_ENABLED.getKey(), true).build(); final XPackLicenseState licenseState = mock(XPackLicenseState.class); requestHolder = new AtomicReference<>(); - restAction = new RestUpdateApiKeyAction(settings, licenseState); + restAction = new RestUpdateApiKeyAction(settings, licenseState, new UpdateApiKeyRequestTranslator.Default()); controller().registerHandler(restAction); verifyingClient.setExecuteVerifier(((actionType, actionRequest) -> { assertThat(actionRequest, instanceOf(UpdateApiKeyRequest.class)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestHasPrivilegesActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestHasPrivilegesActionTests.java index 56eeb3405875c..02b7b88c29d0d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestHasPrivilegesActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestHasPrivilegesActionTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.security.SecurityContext; +import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesRequestBuilderFactory; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -41,7 +42,12 @@ public class RestHasPrivilegesActionTests extends ESTestCase { */ public void testBodyConsumed() throws Exception { final XPackLicenseState licenseState = mock(XPackLicenseState.class); - final RestHasPrivilegesAction action = new RestHasPrivilegesAction(Settings.EMPTY, mock(SecurityContext.class), licenseState); + final RestHasPrivilegesAction action = new RestHasPrivilegesAction( + Settings.EMPTY, + mock(SecurityContext.class), + licenseState, + new HasPrivilegesRequestBuilderFactory.Default() + ); try (XContentBuilder bodyBuilder = JsonXContent.contentBuilder().startObject().endObject(); var threadPool = createThreadPool()) { final var client = new NoOpNodeClient(threadPool); final RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withPath("/_security/user/_has_privileges/") @@ -63,7 +69,8 @@ public void testSecurityDisabled() throws Exception { final RestHasPrivilegesAction action = new RestHasPrivilegesAction( securityDisabledSettings, mock(SecurityContext.class), - licenseState + licenseState, + new HasPrivilegesRequestBuilderFactory.Default() ); try (XContentBuilder bodyBuilder = JsonXContent.contentBuilder().startObject().endObject(); var threadPool = createThreadPool()) { final var client = new NoOpNodeClient(threadPool); diff --git a/x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownTasksIT.java b/x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownTasksIT.java index 584ee628e81e5..7c32311237c57 100644 --- a/x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownTasksIT.java +++ b/x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownTasksIT.java @@ -188,7 +188,7 @@ protected void nodeOperation(AllocatedPersistentTask task, TestTaskParams params private void startTask() { logger.info("--> sending start request"); - persistentTasksService.sendStartRequest("task_id", "task_name", new TestTaskParams(), ActionListener.wrap(r -> {}, e -> { + persistentTasksService.sendStartRequest("task_id", "task_name", new TestTaskParams(), null, ActionListener.wrap(r -> {}, e -> { if (e instanceof ResourceAlreadyExistsException == false) { logger.error("failed to create task", e); fail("failed to create task"); diff --git a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java index 3832bbf488045..9e8c54ba594ea 100644 --- a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java +++ b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java @@ -376,6 +376,7 @@ private static boolean hasShardCopyOnAnotherNode(ClusterState clusterState, Shar .allShards(shardRouting.index().getName()) .stream() .filter(sr -> sr.id() == shardRouting.id()) + .filter(sr -> sr.role().equals(shardRouting.role())) // If any shards are both 1) `STARTED` and 2) are not on a node that's shutting down, we have at least one copy // of this shard safely on a node that's not shutting down, so we don't want to report `STALLED` because of this shard. .filter(ShardRouting::started) diff --git a/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusActionTests.java b/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusActionTests.java index 2d4aaada484ad..9807fa72247a7 100644 --- a/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusActionTests.java +++ b/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusActionTests.java @@ -468,6 +468,51 @@ public void testStalledUnassigned() { ); } + public void testStalledIfShardCopyOnAnotherNodeHasDifferentRole() { + Index index = new Index(randomIdentifier(), randomUUID()); + IndexMetadata imd = generateIndexMetadata(index, 3, 0); + IndexRoutingTable indexRoutingTable = IndexRoutingTable.builder(index) + .addShard( + new TestShardRouting.Builder(new ShardId(index, 0), LIVE_NODE_ID, true, ShardRoutingState.STARTED).withRole( + ShardRouting.Role.INDEX_ONLY + ).build() + ) + .addShard( + new TestShardRouting.Builder(new ShardId(index, 0), SHUTTING_DOWN_NODE_ID, false, ShardRoutingState.STARTED).withRole( + ShardRouting.Role.SEARCH_ONLY + ).build() + ) + .build(); + + // Force a decision of NO for all moves and new allocations, simulating a decider that's stuck + canAllocate.set((r, n, a) -> Decision.NO); + // And the remain decider simulates NodeShutdownAllocationDecider + canRemain.set((r, n, a) -> n.nodeId().equals(SHUTTING_DOWN_NODE_ID) ? Decision.NO : Decision.YES); + + RoutingTable.Builder routingTable = RoutingTable.builder(); + routingTable.add(indexRoutingTable); + ClusterState state = createTestClusterState(routingTable.build(), List.of(imd), SingleNodeShutdownMetadata.Type.REMOVE); + + ShutdownShardMigrationStatus status = TransportGetShutdownStatusAction.shardMigrationStatus( + new CancellableTask(1, "direct", GetShutdownStatusAction.NAME, "", TaskId.EMPTY_TASK_ID, Map.of()), + state, + SHUTTING_DOWN_NODE_ID, + SingleNodeShutdownMetadata.Type.SIGTERM, + true, + clusterInfoService, + snapshotsInfoService, + allocationService, + allocationDeciders + ); + + assertShardMigration( + status, + SingleNodeShutdownMetadata.Status.STALLED, + 1, + allOf(containsString(index.getName()), containsString("[0] [replica]")) + ); + } + public void testNotStalledIfAllShardsHaveACopyOnAnotherNode() { Index index = new Index(randomAlphaOfLength(5), randomAlphaOfLengthBetween(1, 20)); IndexMetadata imd = generateIndexMetadata(index, 3, 0); diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/TransportSLMGetExpiredSnapshotsAction.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/TransportSLMGetExpiredSnapshotsAction.java index b1ec8f3a28f1b..cf3a114fc5803 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/TransportSLMGetExpiredSnapshotsAction.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/TransportSLMGetExpiredSnapshotsAction.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.core.Tuple; -import org.elasticsearch.repositories.GetSnapshotInfoContext; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; @@ -194,16 +193,14 @@ static void getSnapshotDetailsByPolicy( snapshotsWithMissingDetails ); repository.getSnapshotInfo( - new GetSnapshotInfoContext( - snapshotsWithMissingDetails, - false, - () -> false, - (ignored, snapshotInfo) -> snapshotDetailsByPolicy.add( - snapshotInfo.snapshotId(), - RepositoryData.SnapshotDetails.fromSnapshotInfo(snapshotInfo) - ), - new ThreadedActionListener<>(executor, listener.map(ignored -> snapshotDetailsByPolicy)) - ) + snapshotsWithMissingDetails, + false, + () -> false, + snapshotInfo -> snapshotDetailsByPolicy.add( + snapshotInfo.snapshotId(), + RepositoryData.SnapshotDetails.fromSnapshotInfo(snapshotInfo) + ), + new ThreadedActionListener<>(executor, listener.map(ignored -> snapshotDetailsByPolicy)) ); } } diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/TransportSLMGetExpiredSnapshotsActionTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/TransportSLMGetExpiredSnapshotsActionTests.java index 1a49ad114f33f..e6d7a66a2bdb3 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/TransportSLMGetExpiredSnapshotsActionTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/TransportSLMGetExpiredSnapshotsActionTests.java @@ -11,15 +11,16 @@ import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.repositories.GetSnapshotInfoContext; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; @@ -28,7 +29,6 @@ import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; -import org.elasticsearch.snapshots.SnapshotMissingException; import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.tasks.Task; @@ -39,6 +39,7 @@ import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicy; import org.elasticsearch.xpack.core.slm.SnapshotRetentionConfiguration; +import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -51,6 +52,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.oneOf; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; @@ -285,7 +287,7 @@ private static Repository createMockRepository(ThreadPool threadPool, List { - final GetSnapshotInfoContext getSnapshotInfoContext = invocation.getArgument(0); - final Set snapshotIds = new HashSet<>(getSnapshotInfoContext.snapshotIds()); - for (SnapshotInfo snapshotInfo : snapshotInfos) { - if (snapshotIds.remove(snapshotInfo.snapshotId())) { - threadPool.generic().execute(ActionRunnable.supply(getSnapshotInfoContext, () -> snapshotInfo)); + final Collection snapshotIdCollection = invocation.getArgument(0); + assertFalse("should not abort on failure", invocation.getArgument(1)); + final CheckedConsumer consumer = invocation.getArgument(3); + final ActionListener listener = invocation.getArgument(4); + + try (var refs = new RefCountingRunnable(() -> listener.onResponse(null))) { + final Set snapshotIds = new HashSet<>(snapshotIdCollection); + for (SnapshotInfo snapshotInfo : snapshotInfos) { + if (snapshotIds.remove(snapshotInfo.snapshotId())) { + threadPool.generic().execute(ActionRunnable.run(refs.acquireListener(), () -> { + try { + consumer.accept(snapshotInfo); + } catch (Exception e) { + fail(e); + } + })); + } } } - for (SnapshotId snapshotId : snapshotIds) { - threadPool.generic().execute(ActionRunnable.supply(getSnapshotInfoContext, () -> { - throw new SnapshotMissingException(REPO_NAME, snapshotId, null); - })); - } return null; - }).when(repository).getSnapshotInfo(any()); + }).when(repository).getSnapshotInfo(any(), anyBoolean(), any(), any(), any()); doAnswer(invocation -> new RepositoryMetadata(REPO_NAME, "test", Settings.EMPTY)).when(repository).getMetadata(); diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/fielddata/GeometryDocValueTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/fielddata/GeometryDocValueTests.java index 55988e72a2383..c92d65a301a3a 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/fielddata/GeometryDocValueTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/fielddata/GeometryDocValueTests.java @@ -28,8 +28,8 @@ import org.elasticsearch.lucene.spatial.GeometryDocValueReader; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.spatial.util.GeoTestUtils; -import org.hamcrest.BaseMatcher; import org.hamcrest.Description; +import org.hamcrest.TypeSafeMatcher; import java.io.BufferedReader; import java.io.IOException; @@ -226,7 +226,7 @@ private static RectangleLabelPosition isRectangleLabelPosition(Rectangle... rect return new RectangleLabelPosition(rectangles); } - private static class RectangleLabelPosition extends BaseMatcher { + private static class RectangleLabelPosition extends TypeSafeMatcher { private final Point[] encodedPositions; private RectangleLabelPosition(Rectangle... rectangles) { @@ -257,14 +257,10 @@ private Point average(GeoPoint... points) { } @Override - public boolean matches(Object actual) { - if (actual instanceof GeoPoint) { - GeoPoint point = (GeoPoint) actual; - int x = CoordinateEncoder.GEO.encodeX(point.lon()); - int y = CoordinateEncoder.GEO.encodeY(point.lat()); - return is(oneOf(encodedPositions)).matches(new Point(x, y)); - } - return false; + public boolean matchesSafely(GeoPoint point) { + int x = CoordinateEncoder.GEO.encodeX(point.lon()); + int y = CoordinateEncoder.GEO.encodeY(point.lat()); + return is(oneOf(encodedPositions)).matches(new Point(x, y)); } @Override diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregatorTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregatorTests.java index 0b76e786b26be..86575d418e605 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregatorTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregatorTests.java @@ -57,10 +57,10 @@ import org.elasticsearch.search.aggregations.support.MultiValuesSourceFieldConfig; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.xpack.spatial.SpatialPlugin; -import org.hamcrest.BaseMatcher; import org.hamcrest.Description; import org.hamcrest.Matcher; import org.hamcrest.Matchers; +import org.hamcrest.TypeSafeMatcher; import java.io.IOException; import java.util.ArrayList; @@ -458,11 +458,11 @@ private void assertGeoLine(SortOrder sortOrder, String group, InternalGeoLine ge } } - private Matcher isGeoLine(int checkCount, long[] line) { + private static Matcher isGeoLine(int checkCount, long[] line) { return new TestGeoLineLongArrayMatcher(checkCount, line); } - private static class TestGeoLineLongArrayMatcher extends BaseMatcher { + private static class TestGeoLineLongArrayMatcher extends TypeSafeMatcher { private final int checkCount; private final long[] expectedLine; private final ArrayList failures = new ArrayList<>(); @@ -473,26 +473,23 @@ private TestGeoLineLongArrayMatcher(int checkCount, long[] expectedLine) { } @Override - public boolean matches(Object actualObj) { + public boolean matchesSafely(long[] actualLine) { failures.clear(); - if (actualObj instanceof long[] actualLine) { - if (checkCount == expectedLine.length && actualLine.length != expectedLine.length) { - failures.add("Expected length " + expectedLine.length + " but got " + actualLine.length); - } - for (int i = 0; i < checkCount; i++) { - Point actual = asPoint(actualLine[i]); - Point expected = asPoint(expectedLine[i]); - if (actual.equals(expected) == false) { - failures.add("At line position " + i + " expected " + expected + " but got " + actual); - } + if (checkCount == expectedLine.length && actualLine.length != expectedLine.length) { + failures.add("Expected length " + expectedLine.length + " but got " + actualLine.length); + } + for (int i = 0; i < checkCount; i++) { + Point actual = asPoint(actualLine[i]); + Point expected = asPoint(expectedLine[i]); + if (actual.equals(expected) == false) { + failures.add("At line position " + i + " expected " + expected + " but got " + actual); } - return failures.size() == 0; } - return false; + return failures.isEmpty(); } @Override - public void describeMismatch(Object item, Description description) { + public void describeMismatchSafely(long[] item, Description description) { description.appendText("had ").appendValue(failures.size()).appendText(" failures"); for (String failure : failures) { description.appendText("\n\t").appendText(failure); diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/BoundedGeoHexGridTilerTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/BoundedGeoHexGridTilerTests.java index 08850c982c206..9ffcdebc729f6 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/BoundedGeoHexGridTilerTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/BoundedGeoHexGridTilerTests.java @@ -17,8 +17,8 @@ import org.elasticsearch.h3.LatLng; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.spatial.common.H3CartesianUtil; -import org.hamcrest.BaseMatcher; import org.hamcrest.Description; +import org.hamcrest.TypeSafeMatcher; import static org.elasticsearch.common.geo.GeoUtils.normalizeLon; import static org.elasticsearch.xpack.spatial.search.aggregations.bucket.geogrid.GeoHexGridTiler.BoundedGeoHexGridTiler.height; @@ -205,7 +205,7 @@ private static TestCompareBounds withinBounds(GeoBoundingBox other) { return new TestCompareBounds(other, -1); } - private static class TestCompareBounds extends BaseMatcher { + private static class TestCompareBounds extends TypeSafeMatcher { private final GeoBoundingBox other; private final int comparison; @@ -225,25 +225,22 @@ private TestCompareBounds(GeoBoundingBox other, int comparison) { } @Override - public boolean matches(Object actual) { - if (actual instanceof GeoBoundingBox bbox) { - if (comparison == 0) { - matchedTop = closeTo(bbox.top(), 1e-10).matches(other.top()); - matchedBottom = closeTo(bbox.bottom(), 1e-10).matches(other.bottom()); - matchedLeft = closeTo(posLon(bbox.left()), 1e-10).matches(posLon(other.left())); - matchedRight = closeTo(posLon(bbox.right()), 1e-10).matches(posLon(other.right())); + public boolean matchesSafely(GeoBoundingBox bbox) { + if (comparison == 0) { + matchedTop = closeTo(bbox.top(), 1e-10).matches(other.top()); + matchedBottom = closeTo(bbox.bottom(), 1e-10).matches(other.bottom()); + matchedLeft = closeTo(posLon(bbox.left()), 1e-10).matches(posLon(other.left())); + matchedRight = closeTo(posLon(bbox.right()), 1e-10).matches(posLon(other.right())); + } else { + if (comparison > 0) { + // assert that 'bbox' is larger than and entirely contains 'other' + setBoxWithinBox(other, bbox); } else { - if (comparison > 0) { - // assert that 'bbox' is larger than and entirely contains 'other' - setBoxWithinBox(other, bbox); - } else { - // assert that 'bbox' is smaller than and entirely contained within 'other' - setBoxWithinBox(bbox, other); - } + // assert that 'bbox' is smaller than and entirely contained within 'other' + setBoxWithinBox(bbox, other); } - return matchedTop && matchedBottom && matchedLeft && matchedRight; } - return false; + return matchedTop && matchedBottom && matchedLeft && matchedRight; } private void setBoxWithinBox(GeoBoundingBox smaller, GeoBoundingBox larger) { @@ -295,22 +292,22 @@ public void describeTo(Description description) { } @Override - public void describeMismatch(Object item, Description description) { - super.describeMismatch(item, description); - if (item instanceof GeoBoundingBox bbox) { - if (matchedTop == false) { - describeMismatchOf(description, "top", other.top(), bbox.top(), true); - } - if (matchedBottom == false) { - describeMismatchOf(description, "bottom", other.bottom(), bbox.bottom(), false); - } - if (matchedLeft == false) { - describeMismatchOf(description, "left", other.left(), bbox.left(), false); - } - if (matchedRight == false) { - describeMismatchOf(description, "right", other.right(), bbox.right(), true); - } + public void describeMismatchSafely(GeoBoundingBox bbox, Description description) { + super.describeMismatchSafely(bbox, description); + + if (matchedTop == false) { + describeMismatchOf(description, "top", other.top(), bbox.top(), true); + } + if (matchedBottom == false) { + describeMismatchOf(description, "bottom", other.bottom(), bbox.bottom(), false); + } + if (matchedLeft == false) { + describeMismatchOf(description, "left", other.left(), bbox.left(), false); } + if (matchedRight == false) { + describeMismatchOf(description, "right", other.right(), bbox.right(), true); + } + } private void describeMismatchOf(Description description, String field, double thisValue, double thatValue, boolean max) { diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianBoundsTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianBoundsTests.java index 89e186fafd994..244439889aa0b 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianBoundsTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianBoundsTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.spatial.search.aggregations.metrics; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.search.aggregations.AggregationInitializationException; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; import org.elasticsearch.xcontent.XContentParseException; @@ -59,7 +58,7 @@ public void testFailWithSubAgg() throws Exception { """; XContentParser parser = createParser(JsonXContent.jsonXContent, source); assertSame(XContentParser.Token.START_OBJECT, parser.nextToken()); - Exception e = expectThrows(AggregationInitializationException.class, () -> AggregatorFactories.parseAggregators(parser)); + Exception e = expectThrows(IllegalArgumentException.class, () -> AggregatorFactories.parseAggregators(parser)); assertThat(e.toString(), containsString("Aggregator [viewport] of type [cartesian_bounds] cannot accept sub-aggregations")); } diff --git a/x-pack/plugin/sql/qa/jdbc/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/FetchSizeTestCase.java b/x-pack/plugin/sql/qa/jdbc/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/FetchSizeTestCase.java index b8af2ae44623a..ec20cc3c64104 100644 --- a/x-pack/plugin/sql/qa/jdbc/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/FetchSizeTestCase.java +++ b/x-pack/plugin/sql/qa/jdbc/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/FetchSizeTestCase.java @@ -89,6 +89,7 @@ public void testScroll() throws SQLException { * Test for {@code SELECT} that is implemented as a scroll query. * In this test we don't retrieve all records and rely on close() to clean the cursor */ + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105840") public void testIncompleteScroll() throws SQLException { try (Connection c = esJdbc(); Statement s = c.createStatement()) { s.setFetchSize(4); @@ -152,6 +153,7 @@ public void testScrollWithDatetimeAndTimezoneParam() throws IOException, SQLExce /** * Test for {@code SELECT} that is implemented as an aggregation. */ + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105840") public void testAggregation() throws SQLException { try (Connection c = esJdbc(); Statement s = c.createStatement()) { s.setFetchSize(4); @@ -170,6 +172,7 @@ public void testAggregation() throws SQLException { /** * Test for nested documents. */ + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105840") public void testNestedDocuments() throws SQLException { try (Connection c = esJdbc(); Statement s = c.createStatement()) { s.setFetchSize(5); diff --git a/x-pack/plugin/sql/qa/jdbc/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcErrorsTestCase.java b/x-pack/plugin/sql/qa/jdbc/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcErrorsTestCase.java index e962f35be2a94..bd49ef0f6b39d 100644 --- a/x-pack/plugin/sql/qa/jdbc/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcErrorsTestCase.java +++ b/x-pack/plugin/sql/qa/jdbc/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcErrorsTestCase.java @@ -78,6 +78,7 @@ public void testSelectProjectScoreInAggContext() throws IOException, SQLExceptio } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105840") public void testSelectOrderByScoreInAggContext() throws IOException, SQLException { index("test", body -> body.field("foo", 1)); try (Connection c = esJdbc()) { @@ -111,6 +112,7 @@ public void testSelectScoreSubField() throws IOException, SQLException { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105840") public void testHardLimitForSortOnAggregate() throws IOException, SQLException { index("test", body -> body.field("a", 1).field("b", 2)); try (Connection c = esJdbc()) { diff --git a/x-pack/plugin/sql/qa/jdbc/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/PreparedStatementTestCase.java b/x-pack/plugin/sql/qa/jdbc/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/PreparedStatementTestCase.java index b2b983803260c..6575ff780ccb8 100644 --- a/x-pack/plugin/sql/qa/jdbc/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/PreparedStatementTestCase.java +++ b/x-pack/plugin/sql/qa/jdbc/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/PreparedStatementTestCase.java @@ -301,6 +301,7 @@ public void testWildcardField() throws IOException, SQLException { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105840") public void testConstantKeywordField() throws IOException, SQLException { String mapping = """ "properties":{"id":{"type":"integer"},"text":{"type":"constant_keyword"}}"""; @@ -368,6 +369,7 @@ public void testTooMayParameters() throws IOException, SQLException { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105840") public void testStringEscaping() throws SQLException { try (Connection connection = esJdbc()) { try (PreparedStatement statement = connection.prepareStatement("SELECT ?, ?, ?, ?")) { diff --git a/x-pack/plugin/sql/qa/jdbc/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetTestCase.java b/x-pack/plugin/sql/qa/jdbc/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetTestCase.java index 1793369c14905..d8534b963c2d7 100644 --- a/x-pack/plugin/sql/qa/jdbc/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetTestCase.java +++ b/x-pack/plugin/sql/qa/jdbc/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetTestCase.java @@ -73,7 +73,6 @@ import static org.elasticsearch.common.time.DateUtils.toMilliSeconds; import static org.elasticsearch.xpack.sql.qa.jdbc.JdbcTestUtils.JDBC_DRIVER_VERSION; import static org.elasticsearch.xpack.sql.qa.jdbc.JdbcTestUtils.JDBC_TIMEZONE; -import static org.elasticsearch.xpack.sql.qa.jdbc.JdbcTestUtils.UNSIGNED_LONG_MAX; import static org.elasticsearch.xpack.sql.qa.jdbc.JdbcTestUtils.UNSIGNED_LONG_TYPE_NAME; import static org.elasticsearch.xpack.sql.qa.jdbc.JdbcTestUtils.asDate; import static org.elasticsearch.xpack.sql.qa.jdbc.JdbcTestUtils.asTime; @@ -846,6 +845,7 @@ public void testGettingValidNumbersWithCastingFromUnsignedLong() throws IOExcept } // Double values testing + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105840") public void testGettingValidDoubleWithoutCasting() throws IOException, SQLException { List doubleTestValues = createTestDataForNumericValueTests(ESTestCase::randomDouble); double random1 = doubleTestValues.get(0); @@ -1019,17 +1019,21 @@ public void testGettingInvalidFloat() throws Exception { // // BigDecimal fetching testing // - static final Map, Integer> JAVA_TO_SQL_NUMERIC_TYPES_MAP = new HashMap<>() { - { - put(Byte.class, Types.TINYINT); - put(Short.class, Types.SMALLINT); - put(Integer.class, Types.INTEGER); - put(Long.class, Types.BIGINT); - put(Float.class, Types.REAL); - put(Double.class, Types.DOUBLE); - // TODO: no half & scaled float testing - } - }; + static final Map, Integer> JAVA_TO_SQL_NUMERIC_TYPES_MAP = Map.of( + Byte.class, + Types.TINYINT, + Short.class, + Types.SMALLINT, + Integer.class, + Types.INTEGER, + Long.class, + Types.BIGINT, + Float.class, + Types.REAL, + Double.class, + Types.DOUBLE + // TODO: no half & scaled float testing + ); private static void validateBigDecimalWithoutCasting(ResultSet results, List testValues) throws SQLException { @@ -1154,6 +1158,7 @@ public void testGettingValidBigDecimalFromFloatWithoutCasting() throws IOExcepti ); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105840") public void testGettingValidBigDecimalFromDoubleWithoutCasting() throws IOException, SQLException { List doubleTestValues = createTestDataForNumericValueTests(ESTestCase::randomDouble); doWithQuery( @@ -1401,6 +1406,7 @@ public void testGettingDateWithoutCalendarWithNanos() throws Exception { }); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105840") public void testGettingDateWithCalendar() throws Exception { long randomLongDate = randomMillisUpToYear9999(); setupDataForDateTimeTests(randomLongDate); @@ -1430,6 +1436,7 @@ public void testGettingDateWithCalendar() throws Exception { }); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105840") public void testGettingDateWithCalendarWithNanos() throws Exception { assumeTrue( "Driver version [" + JDBC_DRIVER_VERSION + "] doesn't support DATETIME with nanosecond resolution]", @@ -1593,6 +1600,7 @@ public void testGettingTimestampWithoutCalendar() throws Exception { }); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105840") public void testGettingTimestampWithoutCalendarWithNanos() throws Exception { assumeTrue( "Driver version [" + JDBC_DRIVER_VERSION + "] doesn't support DATETIME with nanosecond resolution]", @@ -1925,6 +1933,7 @@ public void testGetTimeType() throws IOException, SQLException { }); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105840") public void testValidGetObjectCalls() throws IOException, SQLException { createIndexWithMapping("test"); updateMappingForNumericValuesTests("test"); diff --git a/x-pack/plugin/sql/qa/server/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSecurityIT.java b/x-pack/plugin/sql/qa/server/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSecurityIT.java index 0e0c2bc8d78b4..6a46346f627ac 100644 --- a/x-pack/plugin/sql/qa/server/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSecurityIT.java +++ b/x-pack/plugin/sql/qa/server/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSecurityIT.java @@ -345,6 +345,7 @@ public void testMetadataGetColumnsSingleFieldExcepted() throws Exception { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105840") public void testMetadataGetColumnsDocumentExcluded() throws Exception { createUser("no_3s", "read_test_without_c_3"); diff --git a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/cli/LenientTestCase.java b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/cli/LenientTestCase.java index ab63913760fea..90fcab839da90 100644 --- a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/cli/LenientTestCase.java +++ b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/cli/LenientTestCase.java @@ -6,20 +6,19 @@ */ package org.elasticsearch.xpack.sql.qa.cli; -import org.elasticsearch.test.hamcrest.RegexMatcher; - import java.io.IOException; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.matchesRegex; public abstract class LenientTestCase extends CliIntegrationTestCase { public void testLenientCommand() throws IOException { index("test", body -> body.field("name", "foo").field("tags", new String[] { "bar", "bar" })); assertEquals("[?1l>[?1000l[?2004llenient set to [90mtrue[0m", command("lenient = true")); - assertThat(command("SELECT * FROM test"), RegexMatcher.matches("\\s*name\\s*\\|\\s*tags\\s*")); + assertThat(command("SELECT * FROM test"), matchesRegex(".*\\s*name\\s*\\|\\s*tags\\s*.*")); assertThat(readLine(), containsString("----------")); - assertThat(readLine(), RegexMatcher.matches("\\s*foo\\s*\\|\\s*bar\\s*")); + assertThat(readLine(), matchesRegex(".*\\s*foo\\s*\\|\\s*bar\\s*.*")); assertEquals("", readLine()); } diff --git a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/cli/SelectTestCase.java b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/cli/SelectTestCase.java index d4e70378627bc..3d148aaf98bf4 100644 --- a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/cli/SelectTestCase.java +++ b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/cli/SelectTestCase.java @@ -6,11 +6,10 @@ */ package org.elasticsearch.xpack.sql.qa.cli; -import org.elasticsearch.test.hamcrest.RegexMatcher; - import java.io.IOException; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.matchesRegex; public abstract class SelectTestCase extends CliIntegrationTestCase { public void testSelect() throws IOException { @@ -32,9 +31,9 @@ public void testMultiLineSelect() throws IOException { public void testSelectWithWhere() throws IOException { index("test", body -> body.field("test_field", "test_value1").field("i", 1)); index("test", body -> body.field("test_field", "test_value2").field("i", 2)); - assertThat(command("SELECT * FROM test WHERE i = 2"), RegexMatcher.matches("\\s*i\\s*\\|\\s*test_field\\s*")); + assertThat(command("SELECT * FROM test WHERE i = 2"), matchesRegex(".*\\s*i\\s*\\|\\s*test_field\\s*.*")); assertThat(readLine(), containsString("----------")); - assertThat(readLine(), RegexMatcher.matches("\\s*2\\s*\\|\\s*test_value2\\s*")); + assertThat(readLine(), matchesRegex(".*\\s*2\\s*\\|\\s*test_value2\\s*.*")); assertEquals("", readLine()); } } diff --git a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/cli/ShowTestCase.java b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/cli/ShowTestCase.java index e83f1e0046c3b..44aadc3e76309 100644 --- a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/cli/ShowTestCase.java +++ b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/cli/ShowTestCase.java @@ -6,12 +6,13 @@ */ package org.elasticsearch.xpack.sql.qa.cli; -import org.elasticsearch.test.hamcrest.RegexMatcher; - import java.io.IOException; import java.util.regex.Pattern; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.emptyString; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.matchesRegex; public abstract class ShowTestCase extends CliIntegrationTestCase { @@ -20,24 +21,24 @@ public abstract class ShowTestCase extends CliIntegrationTestCase { public void testShowTables() throws IOException { index("test1", body -> body.field("test_field", "test_value")); index("test2", body -> body.field("test_field", "test_value")); - assertThat(command("SHOW TABLES"), RegexMatcher.matches("\\s*name\\s*")); + assertThat(command("SHOW TABLES"), matchesRegex(".*\\s*name\\s*.*")); assertThat(readLine(), containsString(HEADER_SEPARATOR)); - assertThat(readLine(), RegexMatcher.matches("\\s*test[12]\\s*")); - assertThat(readLine(), RegexMatcher.matches("\\s*test[12]\\s*")); - assertEquals("", readLine()); + assertThat(readLine(), matchesRegex(".*\\s*test[12]\\s*.*")); + assertThat(readLine(), matchesRegex(".*\\s*test[12]\\s*.*")); + assertThat(readLine(), is(emptyString())); } public void testShowFunctions() throws IOException { - assertThat(command("SHOW FUNCTIONS"), RegexMatcher.matches("\\s*name\\s*\\|\\s*type\\s*")); + assertThat(command("SHOW FUNCTIONS"), matchesRegex(".*\\s*name\\s*\\|\\s*type\\s*.*")); assertThat(readLine(), containsString(HEADER_SEPARATOR)); - assertThat(readLine(), RegexMatcher.matches("\\s*AVG\\s*\\|\\s*AGGREGATE\\s*")); - assertThat(readLine(), RegexMatcher.matches("\\s*COUNT\\s*\\|\\s*AGGREGATE\\s*")); - assertThat(readLine(), RegexMatcher.matches("\\s*FIRST\\s*\\|\\s*AGGREGATE\\s*")); - assertThat(readLine(), RegexMatcher.matches("\\s*FIRST_VALUE\\s*\\|\\s*AGGREGATE\\s*")); - assertThat(readLine(), RegexMatcher.matches("\\s*LAST\\s*\\|\\s*AGGREGATE\\s*")); - assertThat(readLine(), RegexMatcher.matches("\\s*LAST_VALUE\\s*\\|\\s*AGGREGATE\\s*")); - assertThat(readLine(), RegexMatcher.matches("\\s*MAX\\s*\\|\\s*AGGREGATE\\s*")); - assertThat(readLine(), RegexMatcher.matches("\\s*MIN\\s*\\|\\s*AGGREGATE\\s*")); + assertThat(readLine(), matchesRegex(".*\\s*AVG\\s*\\|\\s*AGGREGATE\\s*.*")); + assertThat(readLine(), matchesRegex(".*\\s*COUNT\\s*\\|\\s*AGGREGATE\\s*.*")); + assertThat(readLine(), matchesRegex(".*\\s*FIRST\\s*\\|\\s*AGGREGATE\\s*.*")); + assertThat(readLine(), matchesRegex(".*\\s*FIRST_VALUE\\s*\\|\\s*AGGREGATE\\s*.*")); + assertThat(readLine(), matchesRegex(".*\\s*LAST\\s*\\|\\s*AGGREGATE\\s*.*")); + assertThat(readLine(), matchesRegex(".*\\s*LAST_VALUE\\s*\\|\\s*AGGREGATE\\s*.*")); + assertThat(readLine(), matchesRegex(".*\\s*MAX\\s*\\|\\s*AGGREGATE\\s*.*")); + assertThat(readLine(), matchesRegex(".*\\s*MIN\\s*\\|\\s*AGGREGATE\\s*.*")); String line = readLine(); Pattern aggregateFunction = Pattern.compile("\\s*[A-Z0-9_~]+\\s*\\|\\s*AGGREGATE\\s*"); while (aggregateFunction.matcher(line).matches()) { @@ -56,43 +57,43 @@ public void testShowFunctions() throws IOException { line = readLine(); } - assertThat(line, RegexMatcher.matches("\\s*SCORE\\s*\\|\\s*SCORE\\s*")); - assertEquals("", readLine()); + assertThat(line, matchesRegex(".*\\s*SCORE\\s*\\|\\s*SCORE\\s*.*")); + assertThat(readLine(), is(emptyString())); } public void testShowFunctionsLikePrefix() throws IOException { - assertThat(command("SHOW FUNCTIONS LIKE 'L%'"), RegexMatcher.matches("\\s*name\\s*\\|\\s*type\\s*")); + assertThat(command("SHOW FUNCTIONS LIKE 'L%'"), matchesRegex(".*\\s*name\\s*\\|\\s*type\\s*.*")); assertThat(readLine(), containsString(HEADER_SEPARATOR)); - assertThat(readLine(), RegexMatcher.matches("\\s*LAST\\s*\\|\\s*AGGREGATE\\s*")); - assertThat(readLine(), RegexMatcher.matches("\\s*LAST_VALUE\\s*\\|\\s*AGGREGATE\\s*")); - assertThat(readLine(), RegexMatcher.matches("\\s*LEAST\\s*\\|\\s*CONDITIONAL\\s*")); - assertThat(readLine(), RegexMatcher.matches("\\s*LOG\\s*\\|\\s*SCALAR\\s*")); - assertThat(readLine(), RegexMatcher.matches("\\s*LOG10\\s*\\|\\s*SCALAR\\s*")); - assertThat(readLine(), RegexMatcher.matches("\\s*LCASE\\s*\\|\\s*SCALAR\\s*")); - assertThat(readLine(), RegexMatcher.matches("\\s*LEFT\\s*\\|\\s*SCALAR\\s*")); - assertThat(readLine(), RegexMatcher.matches("\\s*LENGTH\\s*\\|\\s*SCALAR\\s*")); - assertThat(readLine(), RegexMatcher.matches("\\s*LOCATE\\s*\\|\\s*SCALAR\\s*")); - assertThat(readLine(), RegexMatcher.matches("\\s*LTRIM\\s*\\|\\s*SCALAR\\s*")); - assertEquals("", readLine()); + assertThat(readLine(), matchesRegex(".*\\s*LAST\\s*\\|\\s*AGGREGATE\\s*.*")); + assertThat(readLine(), matchesRegex(".*\\s*LAST_VALUE\\s*\\|\\s*AGGREGATE\\s*.*")); + assertThat(readLine(), matchesRegex(".*\\s*LEAST\\s*\\|\\s*CONDITIONAL\\s*.*")); + assertThat(readLine(), matchesRegex(".*\\s*LOG\\s*\\|\\s*SCALAR\\s*.*")); + assertThat(readLine(), matchesRegex(".*\\s*LOG10\\s*\\|\\s*SCALAR\\s*.*")); + assertThat(readLine(), matchesRegex(".*\\s*LCASE\\s*\\|\\s*SCALAR\\s*.*")); + assertThat(readLine(), matchesRegex(".*\\s*LEFT\\s*\\|\\s*SCALAR\\s*.*")); + assertThat(readLine(), matchesRegex(".*\\s*LENGTH\\s*\\|\\s*SCALAR\\s*.*")); + assertThat(readLine(), matchesRegex(".*\\s*LOCATE\\s*\\|\\s*SCALAR\\s*.*")); + assertThat(readLine(), matchesRegex(".*\\s*LTRIM\\s*\\|\\s*SCALAR\\s*.*")); + assertThat(readLine(), is(emptyString())); } public void testShowFunctionsLikeInfix() throws IOException { - assertThat(command("SHOW FUNCTIONS LIKE '%DAY%'"), RegexMatcher.matches("\\s*name\\s*\\|\\s*type\\s*")); + assertThat(command("SHOW FUNCTIONS LIKE '%DAY%'"), matchesRegex(".*\\s*name\\s*\\|\\s*type\\s*.*")); assertThat(readLine(), containsString(HEADER_SEPARATOR)); - assertThat(readLine(), RegexMatcher.matches("\\s*DAY\\s*\\|\\s*SCALAR\\s*")); - assertThat(readLine(), RegexMatcher.matches("\\s*DAYNAME\\s*\\|\\s*SCALAR\\s*")); - assertThat(readLine(), RegexMatcher.matches("\\s*DAYOFMONTH\\s*\\|\\s*SCALAR\\s*")); - assertThat(readLine(), RegexMatcher.matches("\\s*DAYOFWEEK\\s*\\|\\s*SCALAR\\s*")); - assertThat(readLine(), RegexMatcher.matches("\\s*DAYOFYEAR\\s*\\|\\s*SCALAR\\s*")); - assertThat(readLine(), RegexMatcher.matches("\\s*DAY_NAME\\s*\\|\\s*SCALAR\\s*")); - assertThat(readLine(), RegexMatcher.matches("\\s*DAY_OF_MONTH\\s*\\|\\s*SCALAR\\s*")); - assertThat(readLine(), RegexMatcher.matches("\\s*DAY_OF_WEEK\\s*\\|\\s*SCALAR\\s*")); - assertThat(readLine(), RegexMatcher.matches("\\s*DAY_OF_YEAR\\s*\\|\\s*SCALAR\\s*")); - assertThat(readLine(), RegexMatcher.matches("\\s*HOUR_OF_DAY\\s*\\|\\s*SCALAR\\s*")); - assertThat(readLine(), RegexMatcher.matches("\\s*ISODAYOFWEEK\\s*\\|\\s*SCALAR\\s*")); - assertThat(readLine(), RegexMatcher.matches("\\s*ISO_DAY_OF_WEEK\\s*\\|\\s*SCALAR\\s*")); - assertThat(readLine(), RegexMatcher.matches("\\s*MINUTE_OF_DAY\\s*\\|\\s*SCALAR\\s*")); - assertThat(readLine(), RegexMatcher.matches("\\s*TODAY\\s*\\|\\s*SCALAR\\s*")); - assertEquals("", readLine()); + assertThat(readLine(), matchesRegex(".*\\s*DAY\\s*\\|\\s*SCALAR\\s*.*")); + assertThat(readLine(), matchesRegex(".*\\s*DAYNAME\\s*\\|\\s*SCALAR\\s*.*")); + assertThat(readLine(), matchesRegex(".*\\s*DAYOFMONTH\\s*\\|\\s*SCALAR\\s*.*")); + assertThat(readLine(), matchesRegex(".*\\s*DAYOFWEEK\\s*\\|\\s*SCALAR\\s*.*")); + assertThat(readLine(), matchesRegex(".*\\s*DAYOFYEAR\\s*\\|\\s*SCALAR\\s*.*")); + assertThat(readLine(), matchesRegex(".*\\s*DAY_NAME\\s*\\|\\s*SCALAR\\s*.*")); + assertThat(readLine(), matchesRegex(".*\\s*DAY_OF_MONTH\\s*\\|\\s*SCALAR\\s*.*")); + assertThat(readLine(), matchesRegex(".*\\s*DAY_OF_WEEK\\s*\\|\\s*SCALAR\\s*.*")); + assertThat(readLine(), matchesRegex(".*\\s*DAY_OF_YEAR\\s*\\|\\s*SCALAR\\s*.*")); + assertThat(readLine(), matchesRegex(".*\\s*HOUR_OF_DAY\\s*\\|\\s*SCALAR\\s*.*")); + assertThat(readLine(), matchesRegex(".*\\s*ISODAYOFWEEK\\s*\\|\\s*SCALAR\\s*.*")); + assertThat(readLine(), matchesRegex(".*\\s*ISO_DAY_OF_WEEK\\s*\\|\\s*SCALAR\\s*.*")); + assertThat(readLine(), matchesRegex(".*\\s*MINUTE_OF_DAY\\s*\\|\\s*SCALAR\\s*.*")); + assertThat(readLine(), matchesRegex(".*\\s*TODAY\\s*\\|\\s*SCALAR\\s*.*")); + assertThat(readLine(), is(emptyString())); } } diff --git a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java index fb92ac096fc36..ca9532d8dc7d0 100644 --- a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java +++ b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java @@ -1511,18 +1511,19 @@ public void testBasicAsyncWait() throws IOException { public void testAsyncTextWait() throws IOException { RequestObjectBuilder builder = query("SELECT 1").waitForCompletionTimeout("1d").keepOnCompletion(false); - Map contentMap = new HashMap<>() { - { - put("txt", " 1 \n---------------\n1 \n"); - put("csv", "1\r\n1\r\n"); - put("tsv", "1\n1\n"); - } - }; + Map contentMap = Map.of( + "txt", + " 1 \n---------------\n1 \n", + "csv", + "1\r\n1\r\n", + "tsv", + "1\n1\n" + ); - for (String format : contentMap.keySet()) { - Response response = runSqlAsTextWithFormat(builder, format); + for (var format : contentMap.entrySet()) { + Response response = runSqlAsTextWithFormat(builder, format.getKey()); - assertEquals(contentMap.get(format), responseBody(response)); + assertEquals(format.getValue(), responseBody(response)); assertTrue(hasText(response.getHeader(HEADER_NAME_ASYNC_ID))); assertEquals("false", response.getHeader(HEADER_NAME_ASYNC_PARTIAL)); @@ -1532,13 +1533,7 @@ public void testAsyncTextWait() throws IOException { @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/80089") public void testAsyncTextPaginated() throws IOException, InterruptedException { - final Map acceptMap = new HashMap<>() { - { - put("txt", "text/plain"); - put("csv", "text/csv"); - put("tsv", "text/tab-separated-values"); - } - }; + final Map acceptMap = Map.of("txt", "text/plain", "csv", "text/csv", "tsv", "text/tab-separated-values"); final int fetchSize = randomIntBetween(1, 10); final int fetchCount = randomIntBetween(1, 9); bulkLoadTestData(fetchSize * fetchCount); // NB: product needs to stay below 100, for txt format tests diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerRunTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerRunTests.java index 9ce49721ba2ae..81a1e3b0741f4 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerRunTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerRunTests.java @@ -32,7 +32,6 @@ import org.elasticsearch.xpack.sql.types.SqlTypesTests; import java.time.ZonedDateTime; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -52,17 +51,22 @@ public class OptimizerRunTests extends ESTestCase { private final IndexResolution getIndexResult; private final Analyzer analyzer; private final Optimizer optimizer; - private static final Map> COMPARISONS = new HashMap<>() { - { - put(EQ.symbol(), Equals.class); - put(NULLEQ.symbol(), NullEquals.class); - put(NEQ.symbol(), NotEquals.class); - put(GT.symbol(), GreaterThan.class); - put(GTE.symbol(), GreaterThanOrEqual.class); - put(LT.symbol(), LessThan.class); - put(LTE.symbol(), LessThanOrEqual.class); - } - }; + private static final Map> COMPARISONS = Map.of( + EQ.symbol(), + Equals.class, + NULLEQ.symbol(), + NullEquals.class, + NEQ.symbol(), + NotEquals.class, + GT.symbol(), + GreaterThan.class, + GTE.symbol(), + GreaterThanOrEqual.class, + LT.symbol(), + LessThan.class, + LTE.symbol(), + LessThanOrEqual.class + ); private static final LiteralsOnTheRight LITERALS_ON_THE_RIGHT = new LiteralsOnTheRight(); public OptimizerRunTests() { diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/10_basic.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/10_basic.yml index eda47355af0cf..2aa78a91f4dbe 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/10_basic.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/10_basic.yml @@ -350,3 +350,20 @@ - match: { hits.hits.1.fields.metric.0.max: 1000 } - match: { hits.hits.1.fields.metric.0.sum: 5000 } - match: { hits.hits.1.fields.metric.0.value_count: 10 } +--- +"deprecated use of multi-fields": + - skip: + version: " - 8.13.99" + reason: "deprecation added in 8.14" + features: warnings + + - do: + warnings: + - "Adding multifields to [aggregate_metric_double] mappers has no effect and will be forbidden in future" + indices.create: + index: aggregate_metric_double-multi-field + body: + mappings: + properties: + aggregated: { "type": "aggregate_metric_double", "metrics": ["max"], "default_metric": "max", "fields": {"keyword": {"type": "keyword"}} } + diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/constant_keyword/10_basic.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/constant_keyword/10_basic.yml index a89b24ff45593..ee08fcc3693d4 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/constant_keyword/10_basic.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/constant_keyword/10_basic.yml @@ -450,3 +450,20 @@ Cardinality agg: field: test - match: { aggregations.card.value: 1 } +--- +"deprecated use of multi-fields": + - skip: + version: " - 8.13.99" + reason: "deprecation added in 8.14" + features: warnings + + - do: + warnings: + - "Adding multifields to [constant_keyword] mappers has no effect and will be forbidden in future" + indices.create: + index: constant_keyword-multi-field + body: + mappings: + properties: + keyword: { "type": "constant_keyword", "fields": {"keyword": {"type": "keyword"}} } + diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/120_profile.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/120_profile.yml new file mode 100644 index 0000000000000..c2e728535a408 --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/120_profile.yml @@ -0,0 +1,142 @@ +--- +setup: + - skip: + version: " - 8.11.99" + reason: "profile option added in 8.12" + features: warnings + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 1 + mappings: + properties: + data: + type: long + data_d: + type: double + count: + type: long + count_d: + type: double + time: + type: long + color: + type: keyword + text: + type: text + + - do: + cluster.health: # older versions of ESQL don't wait for the nodes to become available. + wait_for_no_initializing_shards: true + wait_for_events: languid + + - do: + bulk: + index: "test" + refresh: true + body: + - { "index": { } } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275187, "color": "red", "text": "rr red" } + - { "index": { } } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275188, "color": "blue", "text": "bb blue" } + - { "index": { } } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275189, "color": "green", "text": "gg green" } + - { "index": { } } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275190, "color": "red", "text": "rr red" } + - { "index": { } } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275191, "color": "red", "text": "rr red" } + - { "index": { } } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275192, "color": "blue", "text": "bb blue" } + - { "index": { } } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275193, "color": "green", "text": "gg green" } + - { "index": { } } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275194, "color": "red", "text": "rr red" } + - { "index": { } } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275195, "color": "red", "text": "rr red" } + - { "index": { } } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275196, "color": "blue", "text": "bb blue" } + - { "index": { } } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275197, "color": "green", "text": "gg green" } + - { "index": { } } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275198, "color": "red", "text": "rr red" } + - { "index": { } } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275199, "color": "red", "text": "rr red" } + - { "index": { } } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275200, "color": "blue", "text": "bb blue" } + - { "index": { } } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275201, "color": "green", "text": "gg green" } + - { "index": { } } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275202, "color": "red", "text": "rr red" } + - { "index": { } } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275203, "color": "red", "text": "rr red" } + - { "index": { } } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275204, "color": "blue", "text": "bb blue" } + - { "index": { } } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275205, "color": "green", "text": "gg green" } + - { "index": { } } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275206, "color": "red", "text": "rr red" } + - { "index": { } } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275207, "color": "red", "text": "rr red" } + - { "index": { } } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275208, "color": "blue", "text": "bb blue" } + - { "index": { } } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275209, "color": "green", "text": "gg green" } + - { "index": { } } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275210, "color": "red", "text": "rr red" } + - { "index": { } } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275211, "color": "red", "text": "rr red" } + - { "index": { } } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275212, "color": "blue", "text": "bb blue" } + - { "index": { } } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275213, "color": "green", "text": "gg green" } + - { "index": { } } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275214, "color": "red", "text": "rr red" } + - { "index": { } } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275215, "color": "red", "text": "rr red" } + - { "index": { } } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275216, "color": "blue", "text": "bb blue" } + - { "index": { } } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275217, "color": "green", "text": "gg green" } + - { "index": { } } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275218, "color": "red", "text": "rr red" } + - { "index": { } } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275219, "color": "red", "text": "rr red" } + - { "index": { } } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275220, "color": "blue", "text": "bb blue" } + - { "index": { } } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275221, "color": "green", "text": "gg green" } + - { "index": { } } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275222, "color": "red", "text": "rr red" } + - { "index": { } } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275223, "color": "red", "text": "rr red" } + - { "index": { } } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275224, "color": "blue", "text": "bb blue" } + - { "index": { } } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275225, "color": "green", "text": "gg green" } + - { "index": { } } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275226, "color": "red", "text": "rr red" } + +--- +avg 8.14 or after: + - skip: + version: " - 8.13.99" + reason: "avg changed starting 8.14" + + - do: + esql.query: + body: + query: 'FROM test | STATS AVG(data) | LIMIT 1' + columnar: true + profile: true + + - match: {columns.0.name: "AVG(data)"} + - match: {columns.0.type: "double"} + - match: {values.0.0: 1.5} + - match: {profile.drivers.0.operators.0.operator: /ExchangeSourceOperator|LuceneSourceOperator.+/} + - gte: {profile.drivers.0.took_nanos: 0} + - gte: {profile.drivers.0.cpu_nanos: 0} + - gte: {profile.drivers.1.took_nanos: 0} + - gte: {profile.drivers.1.cpu_nanos: 0} +# It's hard to assert much about these because they don't come back in any particular order. diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/81_text_exact_subfields.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/81_text_exact_subfields.yml index 64d4665e3cfe7..3b58ee01edfa0 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/81_text_exact_subfields.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/81_text_exact_subfields.yml @@ -147,6 +147,29 @@ setup: - length: { values: 0 } + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'from test | where text_ignore_above is not null | keep text_ignore_above, text_ignore_above.raw, text_normalizer, text_normalizer.raw, non_indexed, non_indexed.raw' + + - match: { columns.0.name: "text_ignore_above" } + - match: { columns.0.type: "text" } + - match: { columns.1.name: "text_ignore_above.raw" } + - match: { columns.1.type: "keyword" } + - match: { columns.2.name: "text_normalizer" } + - match: { columns.2.type: "text" } + - match: { columns.3.name: "text_normalizer.raw" } + - match: { columns.3.type: "keyword" } + - match: { columns.4.name: "non_indexed" } + - match: { columns.4.type: "text" } + - match: { columns.5.name: "non_indexed.raw" } + - match: { columns.5.type: "keyword" } + + - length: { values: 2 } + + - do: allowed_warnings_regex: - "No limit defined, adding default limit of \\[.*\\]" diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/jobs_crud.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/jobs_crud.yml index 3c4439444d1a1..24e869781f677 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/jobs_crud.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/jobs_crud.yml @@ -1130,7 +1130,7 @@ "Test cannot create job with model snapshot id set": - do: - catch: /illegal_argument_exception/ + catch: /x_content_parse_exception/ ml.put_job: job_id: has-model-snapshot-id body: > diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/text_expansion_search.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/text_expansion_search.yml index 5e29d3cdf2ae6..f92870b61f1b1 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/text_expansion_search.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/text_expansion_search.yml @@ -287,3 +287,21 @@ setup: tokens_weight_threshold: 0.4 only_score_pruned_tokens: true - match: { hits.total.value: 0 } + +--- +"Test text-expansion that displays error for invalid queried field type": + - skip: + version: " - 8.13.99" + reason: "validation for invalid field type introduced in 8.14.0" + + - do: + catch: /\[keyword\] is not an appropriate field type for this query/ + search: + index: index-with-rank-features + body: + query: + text_expansion: + source_text: + model_id: text_expansion_model + model_text: "octopus comforter smells" + pruning_config: {} diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/validate.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/validate.yml index 1df34a64f860a..a2cfb65b08a11 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/validate.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/validate.yml @@ -76,21 +76,7 @@ "Test job config is invalid because model snapshot id set": - do: - catch: /illegal_argument_exception/ - ml.validate: - body: > - { - "model_snapshot_id": "wont-create-with-this-setting", - "analysis_config" : { - "bucket_span": "1h", - "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] - }, - "data_description" : { - } - } - - - do: - catch: /The job is configured with fields \[model_snapshot_id\] that are illegal to set at job creation/ + catch: /x_content_parse_exception/ ml.validate: body: > { diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/text_structure/find_field_structure.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/text_structure/find_field_structure.yml new file mode 100644 index 0000000000000..c2e9dbea1600a --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/text_structure/find_field_structure.yml @@ -0,0 +1,63 @@ +setup: + - do: + indices.create: + index: airlines + body: + mappings: + properties: + message: + type: text + - do: + bulk: + refresh: true + body: + - index: + _index: airlines + - message: "{\"airline\": \"AAL\", \"responsetime\": 132.2046, \"sourcetype\": \"text-structure-test\", \"time\": 1403481600}" + - index: + _index: airlines + - message: "{\"airline\": \"JZA\", \"responsetime\": 990.4628, \"sourcetype\": \"text-structure-test\", \"time\": 1403481700}" + - index: + _index: airlines + - message: "{\"airline\": \"AAL\", \"responsetime\": 134.2046, \"sourcetype\": \"text-structure-test\", \"time\": 1403481800}" +--- +"Field structure finder with JSON messages": + - do: + text_structure.find_field_structure: + index: airlines + field: message + documents_to_sample: 3 + timeout: 10s + - match: { num_lines_analyzed: 3 } + - match: { num_messages_analyzed: 3 } + - match: { charset: "UTF-8" } + - match: { has_byte_order_marker: null } + - match: { format: ndjson } + - match: { timestamp_field: time } + - match: { joda_timestamp_formats.0: UNIX } + - match: { java_timestamp_formats.0: UNIX } + - match: { need_client_timezone: false } + - match: { mappings.properties.airline.type: keyword } + - match: { mappings.properties.responsetime.type: double } + - match: { mappings.properties.sourcetype.type: keyword } + - match: { mappings.properties.time.type: date } + - match: { mappings.properties.time.format: epoch_second } + - match: { ingest_pipeline.description: "Ingest pipeline created by text structure finder" } + - match: { ingest_pipeline.processors.0.date.field: time } + - match: { ingest_pipeline.processors.0.date.formats.0: UNIX } + - match: { field_stats.airline.count: 3 } + - match: { field_stats.airline.cardinality: 2 } + - match: { field_stats.responsetime.count: 3 } + - match: { field_stats.responsetime.cardinality: 3 } + - match: { field_stats.responsetime.min_value: 132.2046 } + - match: { field_stats.responsetime.max_value: 990.4628 } + # Not asserting on field_stats.responsetime.mean as it's a recurring decimal + # so its representation in the response could cause spurious failures + - match: { field_stats.responsetime.median_value: 134.2046 } + - match: { field_stats.sourcetype.count: 3 } + - match: { field_stats.sourcetype.cardinality: 1 } + - match: { field_stats.time.count: 3 } + - match: { field_stats.time.cardinality: 3 } + - match: { field_stats.time.earliest: "1403481600" } + - match: { field_stats.time.latest: "1403481800" } + - is_false: explanation diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/text_structure/find_message_structure.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/text_structure/find_message_structure.yml new file mode 100644 index 0000000000000..b1000510f2972 --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/text_structure/find_message_structure.yml @@ -0,0 +1,56 @@ +"Messages structure finder with JSON messages": + - do: + text_structure.find_message_structure: + timeout: 10s + body: + messages: + - "{\"airline\": \"AAL\", \"responsetime\": 132.2046, \"sourcetype\": \"text-structure-test\", \"time\": 1403481600}" + - "{\"airline\": \"JZA\", \"responsetime\": 990.4628, \"sourcetype\": \"text-structure-test\", \"time\": 1403481700}" + - "{\"airline\": \"AAL\", \"responsetime\": 134.2046, \"sourcetype\": \"text-structure-test\", \"time\": 1403481800}" + - match: { num_lines_analyzed: 3 } + - match: { num_messages_analyzed: 3 } + - match: { charset: "UTF-8" } + - match: { has_byte_order_marker: null } + - match: { format: ndjson } + - match: { timestamp_field: time } + - match: { joda_timestamp_formats.0: UNIX } + - match: { java_timestamp_formats.0: UNIX } + - match: { need_client_timezone: false } + - match: { mappings.properties.airline.type: keyword } + - match: { mappings.properties.responsetime.type: double } + - match: { mappings.properties.sourcetype.type: keyword } + - match: { mappings.properties.time.type: date } + - match: { mappings.properties.time.format: epoch_second } + - match: { ingest_pipeline.description: "Ingest pipeline created by text structure finder" } + - match: { ingest_pipeline.processors.0.date.field: time } + - match: { ingest_pipeline.processors.0.date.formats.0: UNIX } + - match: { field_stats.airline.count: 3 } + - match: { field_stats.airline.cardinality: 2 } + - match: { field_stats.responsetime.count: 3 } + - match: { field_stats.responsetime.cardinality: 3 } + - match: { field_stats.responsetime.min_value: 132.2046 } + - match: { field_stats.responsetime.max_value: 990.4628 } + # Not asserting on field_stats.responsetime.mean as it's a recurring decimal + # so its representation in the response could cause spurious failures + - match: { field_stats.responsetime.median_value: 134.2046 } + - match: { field_stats.sourcetype.count: 3 } + - match: { field_stats.sourcetype.cardinality: 1 } + - match: { field_stats.time.count: 3 } + - match: { field_stats.time.cardinality: 3 } + - match: { field_stats.time.earliest: "1403481600" } + - match: { field_stats.time.latest: "1403481800" } + - is_false: explanation +--- +"Messages structure finder with log messages": + - do: + text_structure.find_message_structure: + timeout: 10s + body: + messages: + - "2019-05-16 16:56:14 line 1 abcdefghijklmnopqrstuvwxyz" + - "2019-05-16 16:56:14 line 2 abcdefghijklmnopqrstuvwxyz\ncontinuation...\ncontinuation...\n" + - "2019-05-16 16:56:14 line 3 abcdefghijklmnopqrstuvwxyz" + - match: { num_lines_analyzed: 3 } + - match: { num_messages_analyzed: 3 } + - match: { format: semi_structured_text } + - match: { grok_pattern: "%{TIMESTAMP_ISO8601:timestamp} .*? %{INT:field} .*" } diff --git a/x-pack/plugin/text-structure/qa/text-structure-with-security/build.gradle b/x-pack/plugin/text-structure/qa/text-structure-with-security/build.gradle index 5fc76885aa7eb..1e592615da1f2 100644 --- a/x-pack/plugin/text-structure/qa/text-structure-with-security/build.gradle +++ b/x-pack/plugin/text-structure/qa/text-structure-with-security/build.gradle @@ -9,7 +9,7 @@ dependencies { restResources { restApi { // needed for template installation, etc. - include '_common', 'indices', 'text_structure' + include '_common', 'bulk', 'indices', 'text_structure' } restTests { includeXpack 'text_structure' diff --git a/x-pack/plugin/text-structure/qa/text-structure-with-security/roles.yml b/x-pack/plugin/text-structure/qa/text-structure-with-security/roles.yml index 7eff54728320a..7095acb3c60a1 100644 --- a/x-pack/plugin/text-structure/qa/text-structure-with-security/roles.yml +++ b/x-pack/plugin/text-structure/qa/text-structure-with-security/roles.yml @@ -6,3 +6,15 @@ minimal: # This is always required because the REST client uses it to find the version of # Elasticsearch it's talking to - cluster:monitor/main + indices: + # Give all users involved in these tests access to the indices where the data to + # be analyzed is stored. + - names: [ 'airlines' ] + privileges: + - create_index + - indices:admin/refresh + - read + - write + - view_index_metadata + - indices:data/write/bulk + - indices:data/write/index diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/TextStructurePlugin.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/TextStructurePlugin.java index 2a2fe1ea5a55a..07e49989b9f09 100644 --- a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/TextStructurePlugin.java +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/TextStructurePlugin.java @@ -21,10 +21,16 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.xpack.core.textstructure.action.FindFieldStructureAction; +import org.elasticsearch.xpack.core.textstructure.action.FindMessageStructureAction; import org.elasticsearch.xpack.core.textstructure.action.FindStructureAction; import org.elasticsearch.xpack.core.textstructure.action.TestGrokPatternAction; +import org.elasticsearch.xpack.textstructure.rest.RestFindFieldStructureAction; +import org.elasticsearch.xpack.textstructure.rest.RestFindMessageStructureAction; import org.elasticsearch.xpack.textstructure.rest.RestFindStructureAction; import org.elasticsearch.xpack.textstructure.rest.RestTestGrokPatternAction; +import org.elasticsearch.xpack.textstructure.transport.TransportFindFieldStructureAction; +import org.elasticsearch.xpack.textstructure.transport.TransportFindMessageStructureAction; import org.elasticsearch.xpack.textstructure.transport.TransportFindStructureAction; import org.elasticsearch.xpack.textstructure.transport.TransportTestGrokPatternAction; @@ -53,12 +59,19 @@ public List getRestHandlers( Supplier nodesInCluster, Predicate clusterSupportsFeature ) { - return Arrays.asList(new RestFindStructureAction(), new RestTestGrokPatternAction()); + return Arrays.asList( + new RestFindFieldStructureAction(), + new RestFindMessageStructureAction(), + new RestFindStructureAction(), + new RestTestGrokPatternAction() + ); } @Override public List> getActions() { return Arrays.asList( + new ActionHandler<>(FindFieldStructureAction.INSTANCE, TransportFindFieldStructureAction.class), + new ActionHandler<>(FindMessageStructureAction.INSTANCE, TransportFindMessageStructureAction.class), new ActionHandler<>(FindStructureAction.INSTANCE, TransportFindStructureAction.class), new ActionHandler<>(TestGrokPatternAction.INSTANCE, TransportTestGrokPatternAction.class) ); diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindFieldStructureAction.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindFieldStructureAction.java new file mode 100644 index 0000000000000..0f81a4fc9726b --- /dev/null +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindFieldStructureAction.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.textstructure.rest; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.textstructure.action.FindFieldStructureAction; +import org.elasticsearch.xpack.core.textstructure.structurefinder.TextStructure; + +import java.util.Collections; +import java.util.List; +import java.util.Set; + +import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.xpack.textstructure.TextStructurePlugin.BASE_PATH; + +@ServerlessScope(Scope.INTERNAL) +public class RestFindFieldStructureAction extends BaseRestHandler { + + @Override + public List routes() { + return List.of(new Route(GET, BASE_PATH + "find_field_structure")); + } + + @Override + public String getName() { + return "text_structure_find_field_structure_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { + FindFieldStructureAction.Request request = new FindFieldStructureAction.Request(); + RestFindStructureArgumentsParser.parse(restRequest, request); + request.setIndex(restRequest.param(FindFieldStructureAction.Request.INDEX.getPreferredName())); + request.setField(restRequest.param(FindFieldStructureAction.Request.FIELD.getPreferredName())); + return channel -> client.execute(FindFieldStructureAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } + + @Override + protected Set responseParams() { + return Collections.singleton(TextStructure.EXPLAIN); + } +} diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindMessageStructureAction.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindMessageStructureAction.java new file mode 100644 index 0000000000000..cc607dbdcd646 --- /dev/null +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindMessageStructureAction.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.textstructure.rest; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.textstructure.action.FindMessageStructureAction; +import org.elasticsearch.xpack.core.textstructure.structurefinder.TextStructure; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Set; + +import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.xpack.textstructure.TextStructurePlugin.BASE_PATH; + +@ServerlessScope(Scope.INTERNAL) +public class RestFindMessageStructureAction extends BaseRestHandler { + + @Override + public List routes() { + return List.of(new Route(GET, BASE_PATH + "find_message_structure"), new Route(POST, BASE_PATH + "find_message_structure")); + } + + @Override + public String getName() { + return "text_structure_find_message_structure_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + FindMessageStructureAction.Request request; + try (XContentParser parser = restRequest.contentOrSourceParamParser()) { + request = FindMessageStructureAction.Request.parseRequest(parser); + } + RestFindStructureArgumentsParser.parse(restRequest, request); + return channel -> client.execute(FindMessageStructureAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } + + @Override + protected Set responseParams() { + return Collections.singleton(TextStructure.EXPLAIN); + } +} diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindStructureAction.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindStructureAction.java index 94aee3c2a5f49..65325f2268ed2 100644 --- a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindStructureAction.java +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindStructureAction.java @@ -9,7 +9,6 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.core.RestApiVersion; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; @@ -17,12 +16,10 @@ import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.core.textstructure.action.FindStructureAction; import org.elasticsearch.xpack.core.textstructure.structurefinder.TextStructure; -import org.elasticsearch.xpack.textstructure.structurefinder.TextStructureFinderManager; import java.util.Collections; import java.util.List; import java.util.Set; -import java.util.concurrent.TimeUnit; import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.xpack.textstructure.TextStructurePlugin.BASE_PATH; @@ -30,8 +27,6 @@ @ServerlessScope(Scope.INTERNAL) public class RestFindStructureAction extends BaseRestHandler { - private static final TimeValue DEFAULT_TIMEOUT = new TimeValue(25, TimeUnit.SECONDS); - @Override public List routes() { return List.of( @@ -46,38 +41,9 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { - FindStructureAction.Request request = new FindStructureAction.Request(); - request.setLinesToSample( - restRequest.paramAsInt( - FindStructureAction.Request.LINES_TO_SAMPLE.getPreferredName(), - TextStructureFinderManager.DEFAULT_IDEAL_SAMPLE_LINE_COUNT - ) - ); - request.setLineMergeSizeLimit( - restRequest.paramAsInt( - FindStructureAction.Request.LINE_MERGE_SIZE_LIMIT.getPreferredName(), - TextStructureFinderManager.DEFAULT_LINE_MERGE_SIZE_LIMIT - ) - ); - request.setTimeout( - TimeValue.parseTimeValue( - restRequest.param(FindStructureAction.Request.TIMEOUT.getPreferredName()), - DEFAULT_TIMEOUT, - FindStructureAction.Request.TIMEOUT.getPreferredName() - ) - ); - request.setCharset(restRequest.param(FindStructureAction.Request.CHARSET.getPreferredName())); - request.setFormat(restRequest.param(FindStructureAction.Request.FORMAT.getPreferredName())); - request.setColumnNames(restRequest.paramAsStringArray(FindStructureAction.Request.COLUMN_NAMES.getPreferredName(), null)); - request.setHasHeaderRow(restRequest.paramAsBoolean(FindStructureAction.Request.HAS_HEADER_ROW.getPreferredName(), null)); - request.setDelimiter(restRequest.param(FindStructureAction.Request.DELIMITER.getPreferredName())); - request.setQuote(restRequest.param(FindStructureAction.Request.QUOTE.getPreferredName())); - request.setShouldTrimFields(restRequest.paramAsBoolean(FindStructureAction.Request.SHOULD_TRIM_FIELDS.getPreferredName(), null)); - request.setGrokPattern(restRequest.param(FindStructureAction.Request.GROK_PATTERN.getPreferredName())); - request.setEcsCompatibility(restRequest.param(FindStructureAction.Request.ECS_COMPATIBILITY.getPreferredName())); - request.setTimestampFormat(restRequest.param(FindStructureAction.Request.TIMESTAMP_FORMAT.getPreferredName())); - request.setTimestampField(restRequest.param(FindStructureAction.Request.TIMESTAMP_FIELD.getPreferredName())); + RestFindStructureArgumentsParser.parse(restRequest, request); + if (restRequest.hasContent()) { request.setSample(restRequest.content()); } else { diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindStructureArgumentsParser.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindStructureArgumentsParser.java new file mode 100644 index 0000000000000..bd6fe553fc447 --- /dev/null +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindStructureArgumentsParser.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.textstructure.rest; + +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.xpack.core.textstructure.action.AbstractFindStructureRequest; +import org.elasticsearch.xpack.core.textstructure.action.FindFieldStructureAction; +import org.elasticsearch.xpack.core.textstructure.action.FindMessageStructureAction; +import org.elasticsearch.xpack.core.textstructure.action.FindStructureAction; +import org.elasticsearch.xpack.core.textstructure.structurefinder.TextStructure; +import org.elasticsearch.xpack.textstructure.structurefinder.TextStructureFinderManager; + +import java.util.concurrent.TimeUnit; + +public class RestFindStructureArgumentsParser { + + private static final TimeValue DEFAULT_TIMEOUT = new TimeValue(25, TimeUnit.SECONDS); + + static void parse(RestRequest restRequest, AbstractFindStructureRequest request) { + if (request instanceof FindStructureAction.Request) { + request.setLinesToSample( + restRequest.paramAsInt( + FindStructureAction.Request.LINES_TO_SAMPLE.getPreferredName(), + TextStructureFinderManager.DEFAULT_IDEAL_SAMPLE_LINE_COUNT + ) + ); + request.setLineMergeSizeLimit( + restRequest.paramAsInt( + FindStructureAction.Request.LINE_MERGE_SIZE_LIMIT.getPreferredName(), + TextStructureFinderManager.DEFAULT_LINE_MERGE_SIZE_LIMIT + ) + ); + request.setCharset(restRequest.param(FindStructureAction.Request.CHARSET.getPreferredName())); + request.setHasHeaderRow(restRequest.paramAsBoolean(FindStructureAction.Request.HAS_HEADER_ROW.getPreferredName(), null)); + } else if (request instanceof FindFieldStructureAction.Request) { + request.setLinesToSample( + restRequest.paramAsInt( + FindStructureAction.Request.DOCUMENTS_TO_SAMPLE.getPreferredName(), + TextStructureFinderManager.DEFAULT_IDEAL_SAMPLE_LINE_COUNT + ) + ); + } + + request.setTimeout( + TimeValue.parseTimeValue( + restRequest.param(FindStructureAction.Request.TIMEOUT.getPreferredName()), + DEFAULT_TIMEOUT, + FindStructureAction.Request.TIMEOUT.getPreferredName() + ) + ); + request.setFormat(restRequest.param(FindStructureAction.Request.FORMAT.getPreferredName())); + request.setColumnNames(restRequest.paramAsStringArray(FindStructureAction.Request.COLUMN_NAMES.getPreferredName(), null)); + request.setDelimiter(restRequest.param(FindStructureAction.Request.DELIMITER.getPreferredName())); + request.setQuote(restRequest.param(FindStructureAction.Request.QUOTE.getPreferredName())); + request.setShouldTrimFields(restRequest.paramAsBoolean(FindStructureAction.Request.SHOULD_TRIM_FIELDS.getPreferredName(), null)); + request.setGrokPattern(restRequest.param(FindStructureAction.Request.GROK_PATTERN.getPreferredName())); + request.setEcsCompatibility(restRequest.param(FindStructureAction.Request.ECS_COMPATIBILITY.getPreferredName())); + request.setTimestampFormat(restRequest.param(FindStructureAction.Request.TIMESTAMP_FORMAT.getPreferredName())); + request.setTimestampField(restRequest.param(FindStructureAction.Request.TIMESTAMP_FIELD.getPreferredName())); + + if (request instanceof FindMessageStructureAction.Request || request instanceof FindFieldStructureAction.Request) { + if (TextStructure.Format.DELIMITED.equals(request.getFormat())) { + request.setHasHeaderRow(false); + } + } + } +} diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/DelimitedTextStructureFinder.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/DelimitedTextStructureFinder.java index 6d7faaadae433..7fc6db9cb5c6f 100644 --- a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/DelimitedTextStructureFinder.java +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/DelimitedTextStructureFinder.java @@ -44,7 +44,7 @@ public class DelimitedTextStructureFinder implements TextStructureFinder { private final List sampleMessages; private final TextStructure structure; - static DelimitedTextStructureFinder makeDelimitedTextStructureFinder( + static DelimitedTextStructureFinder createFromSample( List explanation, String sample, String charsetName, @@ -590,6 +590,36 @@ static boolean lineHasUnescapedQuote(String line, CsvPreference csvPreference) { return false; } + static boolean canCreateFromMessages( + List explanation, + List messages, + int minFieldsPerRow, + CsvPreference csvPreference, + String formatName, + double allowedFractionOfBadLines + ) { + for (String message : messages) { + try (CsvListReader csvReader = new CsvListReader(new StringReader(message), csvPreference)) { + if (csvReader.read() == null) { + explanation.add(format("Not %s because message with no lines: [%s]", formatName, message)); + return false; + } + if (csvReader.read() != null) { + explanation.add(format("Not %s because message with multiple lines: [%s]", formatName, message)); + return false; + } + } catch (IOException e) { + explanation.add(format("Not %s because there was a parsing exception: [%s]", formatName, e.getMessage())); + return false; + } + } + + // Every line contains a single valid delimited message, so + // we can safely concatenate and run the logic for a sample. + String sample = String.join("\n", messages); + return canCreateFromSample(explanation, sample, minFieldsPerRow, csvPreference, formatName, allowedFractionOfBadLines); + } + static boolean canCreateFromSample( List explanation, String sample, @@ -598,7 +628,6 @@ static boolean canCreateFromSample( String formatName, double allowedFractionOfBadLines ) { - // Logstash's CSV parser won't tolerate fields where just part of the // value is quoted, whereas SuperCSV will, hence this extra check String[] sampleLines = sample.split("\n"); @@ -619,7 +648,6 @@ static boolean canCreateFromSample( try (CsvListReader csvReader = new CsvListReader(new StringReader(sample), csvPreference)) { int fieldsInFirstRow = -1; - int fieldsInLastRow = -1; List illFormattedRows = new ArrayList<>(); int numberOfRows = 0; @@ -643,7 +671,6 @@ static boolean canCreateFromSample( ); return false; } - fieldsInLastRow = fieldsInFirstRow; continue; } @@ -676,26 +703,7 @@ static boolean canCreateFromSample( ); return false; } - continue; } - - fieldsInLastRow = fieldsInThisRow; - } - - if (fieldsInLastRow > fieldsInFirstRow) { - explanation.add( - "Not " - + formatName - + " because last row has more fields than first row: [" - + fieldsInFirstRow - + "] and [" - + fieldsInLastRow - + "]" - ); - return false; - } - if (fieldsInLastRow < fieldsInFirstRow) { - --numberOfRows; } } catch (SuperCsvException e) { // Tolerate an incomplete last row diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/DelimitedTextStructureFinderFactory.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/DelimitedTextStructureFinderFactory.java index f809665199fea..5f09fdb437fe4 100644 --- a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/DelimitedTextStructureFinderFactory.java +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/DelimitedTextStructureFinderFactory.java @@ -67,6 +67,22 @@ public boolean canCreateFromSample(List explanation, String sample, doub ); } + public boolean canCreateFromMessages(List explanation, List messages, double allowedFractionOfBadLines) { + String formatName = switch ((char) csvPreference.getDelimiterChar()) { + case ',' -> "CSV"; + case '\t' -> "TSV"; + default -> Character.getName(csvPreference.getDelimiterChar()).toLowerCase(Locale.ROOT) + " delimited values"; + }; + return DelimitedTextStructureFinder.canCreateFromMessages( + explanation, + messages, + minFieldsPerRow, + csvPreference, + formatName, + allowedFractionOfBadLines + ); + } + @Override public TextStructureFinder createFromSample( List explanation, @@ -78,7 +94,7 @@ public TextStructureFinder createFromSample( TimeoutChecker timeoutChecker ) throws IOException { CsvPreference adjustedCsvPreference = new CsvPreference.Builder(csvPreference).maxLinesPerRow(lineMergeSizeLimit).build(); - return DelimitedTextStructureFinder.makeDelimitedTextStructureFinder( + return DelimitedTextStructureFinder.createFromSample( explanation, sample, charsetName, @@ -89,4 +105,26 @@ public TextStructureFinder createFromSample( timeoutChecker ); } + + public TextStructureFinder createFromMessages( + List explanation, + List messages, + TextStructureOverrides overrides, + TimeoutChecker timeoutChecker + ) throws IOException { + // DelimitedTextStructureFinderFactory::canCreateFromMessages already + // checked that every line contains a single valid delimited message, + // so we can safely concatenate and run the logic for a sample. + String sample = String.join("\n", messages); + return DelimitedTextStructureFinder.createFromSample( + explanation, + sample, + "UTF-8", + null, + csvPreference, + trimFields, + overrides, + timeoutChecker + ); + } } diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/LogTextStructureFinder.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/LogTextStructureFinder.java index 4e01d32645008..c9ca6002b6c03 100644 --- a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/LogTextStructureFinder.java +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/LogTextStructureFinder.java @@ -36,7 +36,6 @@ private static LogTextStructureFinder makeSingleLineLogTextStructureFinder( String[] sampleLines, String charsetName, Boolean hasByteOrderMarker, - int lineMergeSizeLimit, TextStructureOverrides overrides, TimeoutChecker timeoutChecker ) { @@ -108,12 +107,9 @@ private static LogTextStructureFinder makeSingleLineLogTextStructureFinder( return new LogTextStructureFinder(sampleMessages, structure); } - private static LogTextStructureFinder makeMultiLineLogTextStructureFinder( + private static TimestampFormatFinder getTimestampFormatFinder( List explanation, String[] sampleLines, - String charsetName, - Boolean hasByteOrderMarker, - int lineMergeSizeLimit, TextStructureOverrides overrides, TimeoutChecker timeoutChecker ) { @@ -145,15 +141,20 @@ private static LogTextStructureFinder makeMultiLineLogTextStructureFinder( + timestampFormatFinder.getJavaTimestampFormats() ); + return timestampFormatFinder; + } + + private static Tuple, Integer> getSampleMessages( + String multiLineRegex, + String[] sampleLines, + int lineMergeSizeLimit, + TimeoutChecker timeoutChecker + ) { List sampleMessages = new ArrayList<>(); - StringBuilder preamble = new StringBuilder(); int linesConsumed = 0; StringBuilder message = null; int linesInMessage = 0; - String multiLineRegex = createMultiLineMessageStartRegex( - timestampFormatFinder.getPrefaces(), - timestampFormatFinder.getSimplePattern().pattern() - ); + Pattern multiLinePattern = Pattern.compile(multiLineRegex); for (String sampleLine : sampleLines) { if (multiLinePattern.matcher(sampleLine).find()) { @@ -195,9 +196,6 @@ private static LogTextStructureFinder makeMultiLineLogTextStructureFinder( } } timeoutChecker.check("multi-line message determination"); - if (sampleMessages.size() < 2) { - preamble.append(sampleLine).append('\n'); - } } // Don't add the last message, as it might be partial and mess up subsequent pattern finding @@ -209,8 +207,24 @@ private static LogTextStructureFinder makeMultiLineLogTextStructureFinder( ); } - // null to allow GC before Grok pattern search - sampleLines = null; + return new Tuple<>(sampleMessages, linesConsumed); + } + + private static LogTextStructureFinder makeMultiLineLogTextStructureFinder( + List explanation, + List sampleMessages, + String charsetName, + Boolean hasByteOrderMarker, + TextStructureOverrides overrides, + int linesConsumed, + TimestampFormatFinder timestampFormatFinder, + String multiLineRegex, + TimeoutChecker timeoutChecker + ) { + StringBuilder preamble = new StringBuilder(); + for (int i = 0; i < sampleMessages.size() && i < 2; i++) { + preamble.append(sampleMessages.get(i)).append('\n'); + } TextStructure.Builder structureBuilder = new TextStructure.Builder(TextStructure.Format.SEMI_STRUCTURED_TEXT).setCharset( charsetName @@ -300,6 +314,80 @@ private static LogTextStructureFinder makeMultiLineLogTextStructureFinder( return new LogTextStructureFinder(sampleMessages, structure); } + private static LogTextStructureFinder makeMultiLineLogTextStructureFinder( + List explanation, + String[] sampleLines, + String charsetName, + Boolean hasByteOrderMarker, + int lineMergeSizeLimit, + TextStructureOverrides overrides, + TimeoutChecker timeoutChecker + ) { + TimestampFormatFinder timestampFormatFinder = getTimestampFormatFinder(explanation, sampleLines, overrides, timeoutChecker); + + String multiLineRegex = createMultiLineMessageStartRegex( + timestampFormatFinder.getPrefaces(), + timestampFormatFinder.getSimplePattern().pattern() + ); + + Tuple, Integer> sampleMessagesAndLinesConsumed = getSampleMessages( + multiLineRegex, + sampleLines, + lineMergeSizeLimit, + timeoutChecker + ); + List sampleMessages = sampleMessagesAndLinesConsumed.v1(); + int linesConsumed = sampleMessagesAndLinesConsumed.v2(); + + // null to allow GC before Grok pattern search + sampleLines = null; + + return makeMultiLineLogTextStructureFinder( + explanation, + sampleMessages, + charsetName, + hasByteOrderMarker, + overrides, + linesConsumed, + timestampFormatFinder, + multiLineRegex, + timeoutChecker + ); + } + + private static LogTextStructureFinder makeMultiLineLogTextStructureFinder( + List explanation, + List messages, + String charsetName, + Boolean hasByteOrderMarker, + TextStructureOverrides overrides, + TimeoutChecker timeoutChecker + ) { + TimestampFormatFinder timestampFormatFinder = getTimestampFormatFinder( + explanation, + messages.toArray(new String[0]), + overrides, + timeoutChecker + ); + + String multiLineRegex = createMultiLineMessageStartRegex( + timestampFormatFinder.getPrefaces(), + timestampFormatFinder.getSimplePattern().pattern() + ); + + return makeMultiLineLogTextStructureFinder( + explanation, + messages, + charsetName, + hasByteOrderMarker, + overrides, + messages.size(), + timestampFormatFinder, + multiLineRegex, + timeoutChecker + ); + } + static LogTextStructureFinder makeLogTextStructureFinder( List explanation, String sample, @@ -316,7 +404,6 @@ static LogTextStructureFinder makeLogTextStructureFinder( sampleLines, charsetName, hasByteOrderMarker, - lineMergeSizeLimit, overrides, timeoutChecker ); @@ -333,6 +420,28 @@ static LogTextStructureFinder makeLogTextStructureFinder( } } + static LogTextStructureFinder makeLogTextStructureFinder( + List explanation, + List messages, + String charsetName, + Boolean hasByteOrderMarker, + TextStructureOverrides overrides, + TimeoutChecker timeoutChecker + ) { + if (TextStructureUtils.NULL_TIMESTAMP_FORMAT.equals(overrides.getTimestampFormat())) { + return makeSingleLineLogTextStructureFinder( + explanation, + messages.toArray(new String[0]), + charsetName, + hasByteOrderMarker, + overrides, + timeoutChecker + ); + } else { + return makeMultiLineLogTextStructureFinder(explanation, messages, charsetName, hasByteOrderMarker, overrides, timeoutChecker); + } + } + private LogTextStructureFinder(List sampleMessages, TextStructure structure) { this.sampleMessages = Collections.unmodifiableList(sampleMessages); this.structure = structure; diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/LogTextStructureFinderFactory.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/LogTextStructureFinderFactory.java index d3978946ce908..24532e9fdaae4 100644 --- a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/LogTextStructureFinderFactory.java +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/LogTextStructureFinderFactory.java @@ -40,6 +40,10 @@ public boolean canCreateFromSample(List explanation, String sample, doub return true; } + public boolean canCreateFromMessages(List explanation, List messages, double allowedFractionOfBadLines) { + return true; + } + @Override public TextStructureFinder createFromSample( List explanation, @@ -60,4 +64,13 @@ public TextStructureFinder createFromSample( timeoutChecker ); } + + public TextStructureFinder createFromMessages( + List explanation, + List messages, + TextStructureOverrides overrides, + TimeoutChecker timeoutChecker + ) { + return LogTextStructureFinder.makeLogTextStructureFinder(explanation, messages, "UTF-8", null, overrides, timeoutChecker); + } } diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/NdJsonTextStructureFinderFactory.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/NdJsonTextStructureFinderFactory.java index 5afba653dde6c..c98010d12e2fb 100644 --- a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/NdJsonTextStructureFinderFactory.java +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/NdJsonTextStructureFinderFactory.java @@ -72,6 +72,16 @@ public boolean canCreateFromSample(List explanation, String sample, doub return true; } + public boolean canCreateFromMessages(List explanation, List messages, double allowedFractionOfBadLines) { + for (String message : messages) { + if (message.contains("\n")) { + explanation.add("Not NDJSON because message contains multiple lines: [" + message + "]"); + return false; + } + } + return canCreateFromSample(explanation, String.join("\n", messages), allowedFractionOfBadLines); + } + @Override public TextStructureFinder createFromSample( List explanation, @@ -92,6 +102,19 @@ public TextStructureFinder createFromSample( ); } + public TextStructureFinder createFromMessages( + List explanation, + List messages, + TextStructureOverrides overrides, + TimeoutChecker timeoutChecker + ) throws IOException { + // NdJsonTextStructureFinderFactory::canCreateFromMessages already + // checked that every line contains a single valid JSON message, + // so we can safely concatenate and run the logic for a sample. + String sample = String.join("\n", messages); + return NdJsonTextStructureFinder.makeNdJsonTextStructureFinder(explanation, sample, "UTF-8", null, overrides, timeoutChecker); + } + private static class ContextPrintingStringReader extends StringReader { private final String str; diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TextStructureFinderFactory.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TextStructureFinderFactory.java index 63970dd2c58d9..1e8317400d09d 100644 --- a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TextStructureFinderFactory.java +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TextStructureFinderFactory.java @@ -33,6 +33,8 @@ public interface TextStructureFinderFactory { */ boolean canCreateFromSample(List explanation, String sample, double allowedFractionOfBadLines); + boolean canCreateFromMessages(List explanation, List messages, double allowedFractionOfBadMessages); + /** * Create an object representing the structure of some text. * @param explanation List of reasons for making decisions. May contain items when passed and new reasons @@ -56,4 +58,11 @@ TextStructureFinder createFromSample( TextStructureOverrides overrides, TimeoutChecker timeoutChecker ) throws Exception; + + TextStructureFinder createFromMessages( + List explanation, + List messages, + TextStructureOverrides overrides, + TimeoutChecker timeoutChecker + ) throws Exception; } diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TextStructureFinderManager.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TextStructureFinderManager.java index c0a100fbb280d..899f6c9108060 100644 --- a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TextStructureFinderManager.java +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TextStructureFinderManager.java @@ -13,7 +13,7 @@ import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; -import org.elasticsearch.xpack.core.textstructure.action.FindStructureAction; +import org.elasticsearch.xpack.core.textstructure.action.AbstractFindStructureRequest; import org.elasticsearch.xpack.core.textstructure.structurefinder.TextStructure; import java.io.BufferedInputStream; @@ -310,7 +310,7 @@ public TextStructureFinder findTextStructure(Integer idealSampleLineCount, Integ * Given a stream of text data, determine its structure. * @param idealSampleLineCount Ideally, how many lines from the stream will be read to determine the structure? * If the stream has fewer lines then an attempt will still be made, providing at - * least {@link FindStructureAction#MIN_SAMPLE_LINE_COUNT} lines can be read. If + * least {@link AbstractFindStructureRequest#MIN_SAMPLE_LINE_COUNT} lines can be read. If * null the value of {@link #DEFAULT_IDEAL_SAMPLE_LINE_COUNT} will be used. * @param lineMergeSizeLimit Maximum number of characters permitted when lines are merged to create messages. * If null the value of {@link #DEFAULT_LINE_MERGE_SIZE_LIMIT} will be used. @@ -383,11 +383,11 @@ public TextStructureFinder findTextStructure( sampleReader = charsetMatch.getReader(); } - assert idealSampleLineCount >= FindStructureAction.MIN_SAMPLE_LINE_COUNT; + assert idealSampleLineCount >= AbstractFindStructureRequest.MIN_SAMPLE_LINE_COUNT; Tuple sampleInfo = sampleText( sampleReader, charsetName, - FindStructureAction.MIN_SAMPLE_LINE_COUNT, + AbstractFindStructureRequest.MIN_SAMPLE_LINE_COUNT, idealSampleLineCount, timeoutChecker ); @@ -413,6 +413,23 @@ public TextStructureFinder findTextStructure( } } + public TextStructureFinder findTextStructure(List messages, TextStructureOverrides overrides, TimeValue timeout) + throws Exception { + List explanation = new ArrayList<>(); + try (TimeoutChecker timeoutChecker = new TimeoutChecker("structure analysis", timeout, scheduler)) { + return makeBestStructureFinder(explanation, messages, overrides, timeoutChecker); + } catch (Exception e) { + // Add a dummy exception containing the explanation so far - this can be invaluable for troubleshooting as incorrect + // decisions made early on in the structure analysis can result in seemingly crazy decisions or timeouts later on + if (explanation.isEmpty() == false) { + e.addSuppressed( + new ElasticsearchException(explanation.stream().collect(Collectors.joining("]\n[", "Explanation so far:\n[", "]\n"))) + ); + } + throw e; + } + } + CharsetMatch findCharset(List explanation, InputStream inputStream, TimeoutChecker timeoutChecker) throws Exception { // We need an input stream that supports mark and reset, so wrap the argument @@ -551,24 +568,12 @@ CharsetMatch findCharset(List explanation, InputStream inputStream, Time ); } - TextStructureFinder makeBestStructureFinder( - List explanation, - String sample, - String charsetName, - Boolean hasByteOrderMarker, - int lineMergeSizeLimit, - TextStructureOverrides overrides, - TimeoutChecker timeoutChecker - ) throws Exception { - + List getFactories(TextStructureOverrides overrides) { Character delimiter = overrides.getDelimiter(); Character quote = overrides.getQuote(); Boolean shouldTrimFields = overrides.getShouldTrimFields(); List factories; - double allowedFractionOfBadLines = 0.0; if (delimiter != null) { - allowedFractionOfBadLines = DelimitedTextStructureFinderFactory.DELIMITER_OVERRIDDEN_ALLOWED_FRACTION_OF_BAD_LINES; - // If a precise delimiter is specified, we only need one structure finder // factory, and we'll tolerate as little as one column in the input factories = Collections.singletonList( @@ -581,8 +586,6 @@ TextStructureFinder makeBestStructureFinder( ); } else if (quote != null || shouldTrimFields != null || TextStructure.Format.DELIMITED.equals(overrides.getFormat())) { - allowedFractionOfBadLines = DelimitedTextStructureFinderFactory.FORMAT_OVERRIDDEN_ALLOWED_FRACTION_OF_BAD_LINES; - // The delimiter is not specified, but some other aspect of delimited text is, // so clone our default delimited factories altering the overridden values factories = ORDERED_STRUCTURE_FACTORIES.stream() @@ -599,6 +602,34 @@ TextStructureFinder makeBestStructureFinder( } + return factories; + } + + private double getAllowedFractionOfBadLines(TextStructureOverrides overrides) { + Character delimiter = overrides.getDelimiter(); + Character quote = overrides.getQuote(); + Boolean shouldTrimFields = overrides.getShouldTrimFields(); + if (delimiter != null) { + return DelimitedTextStructureFinderFactory.DELIMITER_OVERRIDDEN_ALLOWED_FRACTION_OF_BAD_LINES; + } else if (quote != null || shouldTrimFields != null || TextStructure.Format.DELIMITED.equals(overrides.getFormat())) { + return DelimitedTextStructureFinderFactory.FORMAT_OVERRIDDEN_ALLOWED_FRACTION_OF_BAD_LINES; + } else { + return 0.0; + } + } + + TextStructureFinder makeBestStructureFinder( + List explanation, + String sample, + String charsetName, + Boolean hasByteOrderMarker, + int lineMergeSizeLimit, + TextStructureOverrides overrides, + TimeoutChecker timeoutChecker + ) throws Exception { + List factories = getFactories(overrides); + double allowedFractionOfBadLines = getAllowedFractionOfBadLines(overrides); + for (TextStructureFinderFactory factory : factories) { timeoutChecker.check("high level format detection"); if (factory.canCreateFromSample(explanation, sample, allowedFractionOfBadLines)) { @@ -620,6 +651,28 @@ TextStructureFinder makeBestStructureFinder( ); } + private TextStructureFinder makeBestStructureFinder( + List explanation, + List messages, + TextStructureOverrides overrides, + TimeoutChecker timeoutChecker + ) throws Exception { + List factories = getFactories(overrides); + double allowedFractionOfBadLines = getAllowedFractionOfBadLines(overrides); + + for (TextStructureFinderFactory factory : factories) { + timeoutChecker.check("high level format detection"); + if (factory.canCreateFromMessages(explanation, messages, allowedFractionOfBadLines)) { + return factory.createFromMessages(explanation, messages, overrides, timeoutChecker); + } + } + + throw new IllegalArgumentException( + "Input did not match " + + ((overrides.getFormat() == null) ? "any known formats" : "the specified format [" + overrides.getFormat() + "]") + ); + } + private Tuple sampleText(Reader reader, String charsetName, int minLines, int maxLines, TimeoutChecker timeoutChecker) throws IOException { diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TextStructureOverrides.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TextStructureOverrides.java index 5ba4e464508f1..303cb2a59ea16 100644 --- a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TextStructureOverrides.java +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TextStructureOverrides.java @@ -6,6 +6,7 @@ */ package org.elasticsearch.xpack.textstructure.structurefinder; +import org.elasticsearch.xpack.core.textstructure.action.AbstractFindStructureRequest; import org.elasticsearch.xpack.core.textstructure.action.FindStructureAction; import org.elasticsearch.xpack.core.textstructure.structurefinder.TextStructure; @@ -37,7 +38,7 @@ public class TextStructureOverrides { private final String ecsCompatibility; - public TextStructureOverrides(FindStructureAction.Request request) { + public TextStructureOverrides(AbstractFindStructureRequest request) { this( request.getCharset(), diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/XmlTextStructureFinderFactory.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/XmlTextStructureFinderFactory.java index 10f65564c3dde..2f56c73616866 100644 --- a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/XmlTextStructureFinderFactory.java +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/XmlTextStructureFinderFactory.java @@ -46,7 +46,42 @@ public boolean canFindFormat(TextStructure.Format format) { */ @Override public boolean canCreateFromSample(List explanation, String sample, double allowedFractionOfBadLines) { + int completeDocCount = parseXml(explanation, sample); + if (completeDocCount == -1) { + return false; + } + if (completeDocCount == 0) { + explanation.add("Not XML because sample didn't contain a complete document"); + return false; + } + explanation.add("Deciding sample is XML"); + return true; + } + + public boolean canCreateFromMessages(List explanation, List messages, double allowedFractionOfBadLines) { + for (String message : messages) { + int completeDocCount = parseXml(explanation, message); + if (completeDocCount == -1) { + return false; + } + if (completeDocCount == 0) { + explanation.add("Not XML because a message didn't contain a complete document"); + return false; + } + if (completeDocCount > 1) { + explanation.add("Not XML because a message contains a multiple documents"); + return false; + } + } + explanation.add("Deciding sample is XML"); + return true; + } + /** + * Tries to parse the sample as XML. + * @return -1 if invalid, otherwise the number of complete docs + */ + private int parseXml(List explanation, String sample) { int completeDocCount = 0; String commonRootElementName = null; String remainder = sample.trim(); @@ -80,14 +115,14 @@ public boolean canCreateFromSample(List explanation, String sample, doub + rootElementName + "]" ); - return false; + return -1; } } break; case XMLStreamReader.END_ELEMENT: if (--nestingLevel < 0) { explanation.add("Not XML because an end element occurs before a start element"); - return false; + return -1; } break; } @@ -111,7 +146,7 @@ public boolean canCreateFromSample(List explanation, String sample, doub + remainder + "]" ); - return false; + return -1; } } endPos += location.getColumnNumber() - 1; @@ -125,17 +160,11 @@ public boolean canCreateFromSample(List explanation, String sample, doub } } catch (IOException | XMLStreamException e) { explanation.add("Not XML because there was a parsing exception: [" + e.getMessage().replaceAll("\\s?\r?\n\\s?", " ") + "]"); - return false; + return -1; } } - if (completeDocCount == 0) { - explanation.add("Not XML because sample didn't contain a complete document"); - return false; - } - - explanation.add("Deciding sample is XML"); - return true; + return completeDocCount; } @Override @@ -157,4 +186,17 @@ public TextStructureFinder createFromSample( timeoutChecker ); } + + public TextStructureFinder createFromMessages( + List explanation, + List messages, + TextStructureOverrides overrides, + TimeoutChecker timeoutChecker + ) throws IOException, ParserConfigurationException, SAXException { + // XmlTextStructureFinderFactory::canCreateFromMessages already + // checked that every message contains a single valid XML document, + // so we can safely concatenate and run the logic for a sample. + String sample = String.join("\n", messages); + return XmlTextStructureFinder.makeXmlTextStructureFinder(explanation, sample, "UTF-8", null, overrides, timeoutChecker); + } } diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/transport/TransportFindFieldStructureAction.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/transport/TransportFindFieldStructureAction.java new file mode 100644 index 0000000000000..43a990f6f565b --- /dev/null +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/transport/TransportFindFieldStructureAction.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.textstructure.transport; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.client.internal.ParentTaskAssigningClient; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.ml.utils.MapHelper; +import org.elasticsearch.xpack.core.textstructure.action.AbstractFindStructureRequest; +import org.elasticsearch.xpack.core.textstructure.action.FindFieldStructureAction; +import org.elasticsearch.xpack.core.textstructure.action.FindStructureResponse; +import org.elasticsearch.xpack.textstructure.structurefinder.TextStructureFinder; +import org.elasticsearch.xpack.textstructure.structurefinder.TextStructureFinderManager; +import org.elasticsearch.xpack.textstructure.structurefinder.TextStructureOverrides; + +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +public class TransportFindFieldStructureAction extends HandledTransportAction { + + private final Client client; + private final TransportService transportService; + private final ThreadPool threadPool; + + @Inject + public TransportFindFieldStructureAction( + TransportService transportService, + ActionFilters actionFilters, + Client client, + ThreadPool threadPool + ) { + super(FindFieldStructureAction.NAME, transportService, actionFilters, FindFieldStructureAction.Request::new, threadPool.generic()); + this.client = client; + this.transportService = transportService; + this.threadPool = threadPool; + } + + @Override + protected void doExecute(Task task, FindFieldStructureAction.Request request, ActionListener listener) { + TaskId taskId = new TaskId(transportService.getLocalNode().getId(), task.getId()); + new ParentTaskAssigningClient(client, taskId).prepareSearch(request.getIndex()) + .setSize(request.getLinesToSample()) + .setFetchSource(true) + .setQuery(QueryBuilders.existsQuery(request.getField())) + .setFetchSource(new String[] { request.getField() }, null) + .execute(ActionListener.wrap(searchResponse -> { + long hitCount = searchResponse.getHits().getHits().length; + if (hitCount < AbstractFindStructureRequest.MIN_SAMPLE_LINE_COUNT) { + listener.onFailure( + new IllegalArgumentException("Input contained too few lines [" + hitCount + "] to obtain a meaningful sample") + ); + return; + } + List messages = getMessages(searchResponse, request.getField()); + try { + listener.onResponse(buildTextStructureResponse(messages, request)); + } catch (Exception e) { + listener.onFailure(e); + } + }, listener::onFailure)); + } + + private List getMessages(SearchResponse searchResponse, String field) { + return Arrays.stream(searchResponse.getHits().getHits()) + .map(hit -> MapHelper.dig(field, Objects.requireNonNull(hit.getSourceAsMap())).toString()) + .collect(Collectors.toList()); + } + + private FindStructureResponse buildTextStructureResponse(List messages, FindFieldStructureAction.Request request) + throws Exception { + TextStructureFinderManager structureFinderManager = new TextStructureFinderManager(threadPool.scheduler()); + TextStructureFinder textStructureFinder = structureFinderManager.findTextStructure( + messages, + new TextStructureOverrides(request), + request.getTimeout() + ); + return new FindStructureResponse(textStructureFinder.getStructure()); + } +} diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/transport/TransportFindMessageStructureAction.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/transport/TransportFindMessageStructureAction.java new file mode 100644 index 0000000000000..79c21b3cea306 --- /dev/null +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/transport/TransportFindMessageStructureAction.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.textstructure.transport; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.textstructure.action.FindMessageStructureAction; +import org.elasticsearch.xpack.core.textstructure.action.FindStructureResponse; +import org.elasticsearch.xpack.textstructure.structurefinder.TextStructureFinder; +import org.elasticsearch.xpack.textstructure.structurefinder.TextStructureFinderManager; +import org.elasticsearch.xpack.textstructure.structurefinder.TextStructureOverrides; + +public class TransportFindMessageStructureAction extends HandledTransportAction { + + private final ThreadPool threadPool; + + @Inject + public TransportFindMessageStructureAction(TransportService transportService, ActionFilters actionFilters, ThreadPool threadPool) { + super( + FindMessageStructureAction.NAME, + transportService, + actionFilters, + FindMessageStructureAction.Request::new, + threadPool.generic() + ); + this.threadPool = threadPool; + } + + @Override + protected void doExecute(Task task, FindMessageStructureAction.Request request, ActionListener listener) { + try { + listener.onResponse(buildTextStructureResponse(request)); + } catch (Exception e) { + listener.onFailure(e); + } + } + + private FindStructureResponse buildTextStructureResponse(FindMessageStructureAction.Request request) throws Exception { + TextStructureFinderManager structureFinderManager = new TextStructureFinderManager(threadPool.scheduler()); + TextStructureFinder textStructureFinder = structureFinderManager.findTextStructure( + request.getMessages(), + new TextStructureOverrides(request), + request.getTimeout() + ); + return new FindStructureResponse(textStructureFinder.getStructure()); + } +} diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/transport/TransportFindStructureAction.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/transport/TransportFindStructureAction.java index 8bf0f1cd4395f..4257a36bc150a 100644 --- a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/transport/TransportFindStructureAction.java +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/transport/TransportFindStructureAction.java @@ -10,53 +10,38 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.textstructure.action.FindStructureAction; +import org.elasticsearch.xpack.core.textstructure.action.FindStructureResponse; import org.elasticsearch.xpack.textstructure.structurefinder.TextStructureFinder; import org.elasticsearch.xpack.textstructure.structurefinder.TextStructureFinderManager; import org.elasticsearch.xpack.textstructure.structurefinder.TextStructureOverrides; import java.io.InputStream; -import static org.elasticsearch.threadpool.ThreadPool.Names.GENERIC; - -public class TransportFindStructureAction extends HandledTransportAction { +public class TransportFindStructureAction extends HandledTransportAction { private final ThreadPool threadPool; @Inject public TransportFindStructureAction(TransportService transportService, ActionFilters actionFilters, ThreadPool threadPool) { - super( - FindStructureAction.NAME, - transportService, - actionFilters, - FindStructureAction.Request::new, - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + super(FindStructureAction.NAME, transportService, actionFilters, FindStructureAction.Request::new, threadPool.generic()); this.threadPool = threadPool; } @Override - protected void doExecute(Task task, FindStructureAction.Request request, ActionListener listener) { - - // As determining the text structure might take a while, we run - // in a different thread to avoid blocking the network thread. - threadPool.executor(GENERIC).execute(() -> { - try { - listener.onResponse(buildTextStructureResponse(request)); - } catch (Exception e) { - listener.onFailure(e); - } - }); + protected void doExecute(Task task, FindStructureAction.Request request, ActionListener listener) { + try { + listener.onResponse(buildTextStructureResponse(request)); + } catch (Exception e) { + listener.onFailure(e); + } } - private FindStructureAction.Response buildTextStructureResponse(FindStructureAction.Request request) throws Exception { - + private FindStructureResponse buildTextStructureResponse(FindStructureAction.Request request) throws Exception { TextStructureFinderManager structureFinderManager = new TextStructureFinderManager(threadPool.scheduler()); - try (InputStream sampleStream = request.getSample().streamInput()) { TextStructureFinder textStructureFinder = structureFinderManager.findTextStructure( request.getLinesToSample(), @@ -65,8 +50,7 @@ private FindStructureAction.Response buildTextStructureResponse(FindStructureAct new TextStructureOverrides(request), request.getTimeout() ); - - return new FindStructureAction.Response(textStructureFinder.getStructure()); + return new FindStructureResponse(textStructureFinder.getStructure()); } } } diff --git a/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/DelimitedTextStructureFinderFactoryTests.java b/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/DelimitedTextStructureFinderFactoryTests.java index cd8c451ee0547..e28de72202460 100644 --- a/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/DelimitedTextStructureFinderFactoryTests.java +++ b/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/DelimitedTextStructureFinderFactoryTests.java @@ -6,6 +6,9 @@ */ package org.elasticsearch.xpack.textstructure.structurefinder; +import java.util.Arrays; +import java.util.List; + public class DelimitedTextStructureFinderFactoryTests extends TextStructureTestCase { private final TextStructureFinderFactory csvFactory = new DelimitedTextStructureFinderFactory(',', '"', 2, false); @@ -40,6 +43,21 @@ public void testCanCreateCsvFromSampleGivenText() { assertFalse(csvFactory.canCreateFromSample(explanation, TEXT_SAMPLE, 0.0)); } + public void testCanCreateCsvFromMessagesCsv() { + List messages = Arrays.asList(CSV_SAMPLE.split("\n")); + assertTrue(csvFactory.canCreateFromMessages(explanation, messages, 0.0)); + } + + public void testCanCreateCsvFromMessagesCsv_multipleCsvRowsPerMessage() { + List messages = List.of(CSV_SAMPLE, CSV_SAMPLE, CSV_SAMPLE); + assertFalse(csvFactory.canCreateFromMessages(explanation, messages, 0.0)); + } + + public void testCanCreateCsvFromMessagesCsv_emptyMessages() { + List messages = List.of("", "", ""); + assertFalse(csvFactory.canCreateFromMessages(explanation, messages, 0.0)); + } + // TSV - no need to check NDJSON, XML or CSV because they come earlier in the order we check formats public void testCanCreateTsvFromSampleGivenTsv() { diff --git a/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/DelimitedTextStructureFinderTests.java b/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/DelimitedTextStructureFinderTests.java index 478994178c5bc..62e06af809711 100644 --- a/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/DelimitedTextStructureFinderTests.java +++ b/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/DelimitedTextStructureFinderTests.java @@ -790,6 +790,30 @@ public void testCreateConfigsGivenDotInFieldName() throws Exception { assertEquals(Collections.singleton("properties"), structure.getMappings().keySet()); } + public void testCreateFromMessages() throws Exception { + List messages = List.of("a,b,c", "d,e,f", "g,h,i"); + assertTrue(csvFactory.canCreateFromMessages(explanation, messages, 0.0)); + TextStructureFinder structureFinder = csvFactory.createFromMessages( + explanation, + messages, + TextStructureOverrides.EMPTY_OVERRIDES, + NOOP_TIMEOUT_CHECKER + ); + TextStructure structure = structureFinder.getStructure(); + assertEquals(TextStructure.Format.DELIMITED, structure.getFormat()); + assertEquals(3, structure.getNumMessagesAnalyzed()); + } + + public void testCreateFromMessages_multipleRowPerMessage() { + List messages = List.of("a,b,c\nd,e,f", "g,h,i"); + assertFalse(csvFactory.canCreateFromMessages(explanation, messages, 0.0)); + } + + public void testCreateFromMessages_emptyMessage() { + List messages = List.of("a,b,c", "", "d,e,f"); + assertFalse(csvFactory.canCreateFromMessages(explanation, messages, 0.0)); + } + public void testFindHeaderFromSampleGivenHeaderInSample() throws IOException { String withHeader = """ time,airline,responsetime,sourcetype diff --git a/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/LogTextStructureFinderTests.java b/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/LogTextStructureFinderTests.java index 4ee651408af56..484fde023be6b 100644 --- a/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/LogTextStructureFinderTests.java +++ b/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/LogTextStructureFinderTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.xpack.core.textstructure.structurefinder.TextStructure; import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; @@ -106,6 +107,21 @@ public void testCreateConfigsGivenElasticsearchLog() throws Exception { assertTrue(keys.contains("@timestamp")); } + public void testCreateFromMessages() throws Exception { + List messages = List.of(TEXT_SAMPLE.split("\n")); + assertTrue(factory.canCreateFromMessages(explanation, messages, 0.0)); + + TextStructureFinder structureFinder = factory.createFromMessages( + explanation, + messages, + TextStructureOverrides.EMPTY_OVERRIDES, + NOOP_TIMEOUT_CHECKER + ); + + TextStructure structure = structureFinder.getStructure(); + assertEquals("\\[%{TIMESTAMP_ISO8601:timestamp}\\]\\[%{LOGLEVEL:loglevel} \\]\\[.*", structure.getGrokPattern()); + } + public void testCreateConfigsGivenElasticsearchLogWithNoTimestamps() throws Exception { assertTrue(factory.canCreateFromSample(explanation, TEXT_WITH_NO_TIMESTAMPS_SAMPLE, 0.0)); diff --git a/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/NdJsonTextStructureFinderFactoryTests.java b/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/NdJsonTextStructureFinderFactoryTests.java index 85baf238630bb..dac202df8e811 100644 --- a/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/NdJsonTextStructureFinderFactoryTests.java +++ b/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/NdJsonTextStructureFinderFactoryTests.java @@ -6,6 +6,9 @@ */ package org.elasticsearch.xpack.textstructure.structurefinder; +import java.util.Arrays; +import java.util.List; + public class NdJsonTextStructureFinderFactoryTests extends TextStructureTestCase { private final TextStructureFinderFactory factory = new NdJsonTextStructureFinderFactory(); @@ -15,6 +18,21 @@ public void testCanCreateFromSampleGivenNdJson() { assertTrue(factory.canCreateFromSample(explanation, NDJSON_SAMPLE, 0.0)); } + public void testCanCreateFromMessages() { + List messages = Arrays.asList(NDJSON_SAMPLE.split("\n")); + assertTrue(factory.canCreateFromMessages(explanation, messages, 0.0)); + } + + public void testCanCreateFromMessages_multipleJsonLinesPerMessage() { + List messages = List.of(NDJSON_SAMPLE, NDJSON_SAMPLE, NDJSON_SAMPLE); + assertFalse(factory.canCreateFromMessages(explanation, messages, 0.0)); + } + + public void testCanCreateFromMessages_emptyMessages() { + List messages = List.of("", "", ""); + assertFalse(factory.canCreateFromMessages(explanation, messages, 0.0)); + } + public void testCanCreateFromSampleGivenXml() { assertFalse(factory.canCreateFromSample(explanation, XML_SAMPLE, 0.0)); diff --git a/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/XmlTextStructureFinderFactoryTests.java b/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/XmlTextStructureFinderFactoryTests.java index ea92420a1ea5a..7340c0c3dff00 100644 --- a/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/XmlTextStructureFinderFactoryTests.java +++ b/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/XmlTextStructureFinderFactoryTests.java @@ -6,6 +6,9 @@ */ package org.elasticsearch.xpack.textstructure.structurefinder; +import java.util.Arrays; +import java.util.List; + public class XmlTextStructureFinderFactoryTests extends TextStructureTestCase { private final TextStructureFinderFactory factory = new XmlTextStructureFinderFactory(); @@ -17,6 +20,21 @@ public void testCanCreateFromSampleGivenXml() { assertTrue(factory.canCreateFromSample(explanation, XML_SAMPLE, 0.0)); } + public void testCanCreateFromMessages() { + List messages = Arrays.asList(XML_SAMPLE.split("\n\n")); + assertTrue(factory.canCreateFromMessages(explanation, messages, 0.0)); + } + + public void testCanCreateFromMessages_multipleXmlDocsPerMessage() { + List messages = List.of(XML_SAMPLE, XML_SAMPLE, XML_SAMPLE); + assertFalse(factory.canCreateFromMessages(explanation, messages, 0.0)); + } + + public void testCanCreateFromMessages_emptyMessages() { + List messages = List.of("", "", ""); + assertFalse(factory.canCreateFromMessages(explanation, messages, 0.0)); + } + public void testCanCreateFromSampleGivenCsv() { assertFalse(factory.canCreateFromSample(explanation, CSV_SAMPLE, 0.0)); diff --git a/x-pack/plugin/transform/qa/multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformInsufficientPermissionsIT.java b/x-pack/plugin/transform/qa/multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformInsufficientPermissionsIT.java index dc48ceb7b309b..105633c7340e5 100644 --- a/x-pack/plugin/transform/qa/multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformInsufficientPermissionsIT.java +++ b/x-pack/plugin/transform/qa/multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformInsufficientPermissionsIT.java @@ -418,16 +418,14 @@ public void testTransformPermissionsDeferUnattendedNoDest() throws Exception { ); assertRed(transformId, authIssue); - startTransform(config.getId(), RequestOptions.DEFAULT); - - // Give the transform indexer enough time to try creating destination index - Thread.sleep(5_000); + startTransform(transformId, RequestOptions.DEFAULT); String destIndexIssue = Strings.format("Could not create destination index [%s] for transform [%s]", destIndexName, transformId); // transform's auth state status is still RED due to: // - lacking permissions // - and the inability to create destination index in the indexer (which is also a consequence of lacking permissions) - assertRed(transformId, authIssue, destIndexIssue); + // wait for 10 seconds to give the transform indexer enough time to try creating destination index + assertBusy(() -> { assertRed(transformId, authIssue, destIndexIssue); }); // update transform's credentials so that the transform has permission to access source/dest indices updateConfig(transformId, "{}", RequestOptions.DEFAULT.toBuilder().addHeader(AUTH_KEY, Users.SENIOR.header).build()); @@ -442,6 +440,7 @@ public void testTransformPermissionsDeferUnattendedNoDest() throws Exception { * unattended = true * pre-existing dest index = true */ + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105794") public void testTransformPermissionsDeferUnattendedDest() throws Exception { String transformId = "transform-permissions-defer-unattended-dest-exists"; String sourceIndexName = transformId + "-index"; @@ -593,5 +592,7 @@ private void assertRed(String transformId, String... expectedHealthIssueDetails) .map(issue -> (String) extractValue((Map) issue, "details")) .collect(toSet()); assertThat("Stats were: " + stats, actualHealthIssueDetailsSet, containsInAnyOrder(expectedHealthIssueDetails)); + // We should not progress beyond the 0th checkpoint until we correctly configure the Transform. + assertThat("Stats were: " + stats, getCheckpoint(stats), equalTo(0L)); } } diff --git a/x-pack/plugin/transform/qa/multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformRestTestCase.java b/x-pack/plugin/transform/qa/multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformRestTestCase.java index eed849d35ea44..897de6c120a8b 100644 --- a/x-pack/plugin/transform/qa/multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformRestTestCase.java +++ b/x-pack/plugin/transform/qa/multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformRestTestCase.java @@ -294,14 +294,15 @@ protected void waitUntilCheckpoint(String id, long checkpoint) throws Exception } protected void waitUntilCheckpoint(String id, long checkpoint, TimeValue waitTime) throws Exception { - assertBusy( - () -> assertEquals( - checkpoint, - ((Integer) XContentMapValues.extractValue("checkpointing.last.checkpoint", getBasicTransformStats(id))).longValue() - ), - waitTime.getMillis(), - TimeUnit.MILLISECONDS - ); + assertBusy(() -> assertEquals(checkpoint, getCheckpoint(id)), waitTime.getMillis(), TimeUnit.MILLISECONDS); + } + + protected long getCheckpoint(String id) throws IOException { + return getCheckpoint(getBasicTransformStats(id)); + } + + protected long getCheckpoint(Map stats) { + return ((Integer) XContentMapValues.extractValue("checkpointing.last.checkpoint", stats)).longValue(); } protected DateHistogramGroupSource createDateHistogramGroupSourceWithFixedInterval( diff --git a/x-pack/plugin/transform/qa/multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformUsingSearchRuntimeFieldsIT.java b/x-pack/plugin/transform/qa/multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformUsingSearchRuntimeFieldsIT.java index 9f4a15029f05f..2e509bedbce39 100644 --- a/x-pack/plugin/transform/qa/multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformUsingSearchRuntimeFieldsIT.java +++ b/x-pack/plugin/transform/qa/multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformUsingSearchRuntimeFieldsIT.java @@ -28,12 +28,12 @@ import java.time.Instant; import java.time.temporal.ChronoUnit; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.stream.Collectors; import static java.util.Collections.singletonMap; +import static java.util.Map.entry; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -49,39 +49,31 @@ public class TransformUsingSearchRuntimeFieldsIT extends TransformRestTestCase { private static final int NUM_USERS = 28; private static Map createRuntimeMappings() { - return new HashMap<>() { - { - put("user-upper", new HashMap<>() { - { - put("type", "keyword"); - put( - "script", - singletonMap("source", "if (params._source.user_id != null) {emit(params._source.user_id.toUpperCase())}") - ); - } - }); - put("stars", new HashMap<>() { - { - put("type", "long"); - } - }); - put("stars-x2", new HashMap<>() { - { - put("type", "long"); - put("script", singletonMap("source", "if (params._source.stars != null) {emit(2 * params._source.stars)}")); - } - }); - put("timestamp-5m", new HashMap<>() { - { - put("type", "date"); - put( - "script", - singletonMap("source", "emit(doc['timestamp'].value.toInstant().minus(5, ChronoUnit.MINUTES).toEpochMilli())") - ); - } - }); - } - }; + return Map.ofEntries( + entry( + "user-upper", + Map.of( + "type", + "keyword", + "script", + Map.of("source", "if (params._source.user_id != null) {emit(params._source.user_id.toUpperCase())}") + ) + ), + entry("stars", Map.of("type", "long")), + entry( + "stars-x2", + Map.of("type", "long", "script", Map.of("source", "if (params._source.stars != null) {emit(2 * params._source.stars)}")) + ), + entry( + "timestamp-5m", + Map.of( + "type", + "date", + "script", + Map.of("source", "emit(doc['timestamp'].value.toInstant().minus(5, ChronoUnit.MINUTES).toEpochMilli())") + ) + ) + ); } @Before @@ -114,17 +106,15 @@ public void testPivotTransform() throws Exception { var previewResponse = previewTransform(Strings.toString(config), RequestOptions.DEFAULT); // Verify preview mappings - Map expectedMappingProperties = new HashMap<>() { - { - put("by-user", singletonMap("type", "keyword")); - put("review_score", singletonMap("type", "double")); - put("review_score_max", singletonMap("type", "long")); - put("review_score_rt_avg", singletonMap("type", "double")); - put("review_score_rt_max", singletonMap("type", "long")); - put("timestamp", singletonMap("type", "date")); - put("timestamp_rt", singletonMap("type", "date")); - } - }; + Map expectedMappingProperties = Map.ofEntries( + entry("by-user", Map.of("type", "keyword")), + entry("review_score", Map.of("type", "double")), + entry("review_score_max", Map.of("type", "long")), + entry("review_score_rt_avg", Map.of("type", "double")), + entry("review_score_rt_max", Map.of("type", "long")), + entry("timestamp", Map.of("type", "date")), + entry("timestamp_rt", Map.of("type", "date")) + ); var generatedMappings = (Map) XContentMapValues.extractValue("generated_dest_index.mappings", previewResponse); assertThat(generatedMappings, allOf(hasKey("_meta"), hasEntry("properties", expectedMappingProperties))); // Verify preview contents @@ -167,20 +157,16 @@ public void testPivotTransform() throws Exception { public void testPivotTransform_BadRuntimeFieldScript() throws Exception { String destIndexName = "reviews-by-user-pivot"; String transformId = "transform-with-st-rt-fields-pivot"; - Map runtimeMappings = new HashMap<>() { - { - put("user-upper", new HashMap<>() { - { - put("type", "keyword"); - // Method name used in the script is misspelled, i.e.: "toUperCase" instead of "toUpperCase" - put( - "script", - singletonMap("source", "if (params._source.user_id != null) {emit(params._source.user_id.toUperCase())}") - ); - } - }); - } - }; + Map runtimeMappings = Map.of( + "user-upper", + Map.of( + "type", + "keyword", + // Method name used in the script is misspelled, i.e.: "toUperCase" instead of "toUpperCase" + "script", + Map.of("source", "if (params._source.user_id != null) {emit(params._source.user_id.toUperCase())}") + ) + ); Map groups = singletonMap("by-user", new TermsGroupSource("user-upper", null, false)); AggregatorFactories.Builder aggs = AggregatorFactories.builder() @@ -273,20 +259,16 @@ public void testLatestTransform() throws Exception { public void testLatestTransform_BadRuntimeFieldScript() throws Exception { String destIndexName = "reviews-by-user-latest"; String transformId = "transform-with-st-rt-fields-latest"; - Map runtimeMappings = new HashMap<>() { - { - put("user-upper", new HashMap<>() { - { - put("type", "keyword"); - // Method name used in the script is misspelled, i.e.: "toUperCase" instead of "toUpperCase" - put( - "script", - singletonMap("source", "if (params._source.user_id != null) {emit(params._source.user_id.toUperCase())}") - ); - } - }); - } - }; + Map runtimeMappings = Map.of( + "user-upper", + Map.of( + "type", + "keyword", + // Method name used in the script is misspelled, i.e.: "toUperCase" instead of "toUpperCase" + "script", + Map.of("source", "if (params._source.user_id != null) {emit(params._source.user_id.toUperCase())}") + ) + ); SourceConfig sourceConfig = new SourceConfig(new String[] { REVIEWS_INDEX_NAME }, QueryConfig.matchAll(), runtimeMappings); TransformConfig configWithRuntimeFields = createTransformConfigBuilder(transformId, destIndexName, QueryConfig.matchAll(), "dummy") diff --git a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/integration/TransformNoRemoteClusterClientNodeIT.java b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/integration/TransformNoRemoteClusterClientNodeIT.java index 21ff1dded6ae5..5090a00211ff4 100644 --- a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/integration/TransformNoRemoteClusterClientNodeIT.java +++ b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/integration/TransformNoRemoteClusterClientNodeIT.java @@ -50,7 +50,7 @@ public void testPreviewTransformWithRemoteIndex() { e.getMessage(), allOf( containsString("No appropriate node to run on"), - containsString("transform requires a remote connection but remote is disabled") + containsString("transform requires a remote connection but the node does not have the remote_cluster_client role") ) ); } @@ -74,7 +74,7 @@ public void testPutTransformWithRemoteIndex_NoDeferValidation() { e.getMessage(), allOf( containsString("No appropriate node to run on"), - containsString("transform requires a remote connection but remote is disabled") + containsString("transform requires a remote connection but the node does not have the remote_cluster_client role") ) ); } @@ -140,7 +140,7 @@ public void testUpdateTransformWithRemoteIndex_NoDeferValidation() { e.getMessage(), allOf( containsString("No appropriate node to run on"), - containsString("transform requires a remote connection but remote is disabled") + containsString("transform requires a remote connection but the node does not have the remote_cluster_client role") ) ); } diff --git a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/integration/TransformProgressIT.java b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/integration/TransformProgressIT.java index c62ff49ae6865..dbe09663abc20 100644 --- a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/integration/TransformProgressIT.java +++ b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/integration/TransformProgressIT.java @@ -160,7 +160,7 @@ public void assertGetProgress(int userWithMissingBuckets) throws Exception { null ); - Pivot pivot = new Pivot(pivotConfig, new SettingsConfig(), TransformConfigVersion.CURRENT, Collections.emptySet()); + Pivot pivot = new Pivot(pivotConfig, SettingsConfig.EMPTY, TransformConfigVersion.CURRENT, Collections.emptySet()); TransformProgress progress = getProgress(pivot, getProgressQuery(pivot, config.getSource().getIndex(), null)); @@ -188,7 +188,7 @@ public void assertGetProgress(int userWithMissingBuckets) throws Exception { Collections.singletonMap("every_50", new HistogramGroupSource("missing_field", null, missingBucket, 50.0)) ); pivotConfig = new PivotConfig(histgramGroupConfig, aggregationConfig, null); - pivot = new Pivot(pivotConfig, new SettingsConfig(), TransformConfigVersion.CURRENT, Collections.emptySet()); + pivot = new Pivot(pivotConfig, SettingsConfig.EMPTY, TransformConfigVersion.CURRENT, Collections.emptySet()); progress = getProgress( pivot, diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPreviewTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPreviewTransformAction.java index 79644fac07579..f14ac9a534f28 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPreviewTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPreviewTransformAction.java @@ -52,6 +52,7 @@ import org.elasticsearch.xpack.core.transform.transforms.SyncConfig; import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; import org.elasticsearch.xpack.core.transform.transforms.TransformDestIndexSettings; +import org.elasticsearch.xpack.core.transform.transforms.TransformEffectiveSettings; import org.elasticsearch.xpack.transform.TransformExtensionHolder; import org.elasticsearch.xpack.transform.persistence.TransformIndex; import org.elasticsearch.xpack.transform.transforms.Function; @@ -289,7 +290,7 @@ private void getPreview( }, listener::onFailure); ActionListener> deduceMappingsListener = ActionListener.wrap(deducedMappings -> { - if (Boolean.FALSE.equals(settingsConfig.getDeduceMappings())) { + if (TransformEffectiveSettings.isDeduceMappingsDisabled(settingsConfig)) { mappings.set(emptyMap()); } else { mappings.set(deducedMappings); diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPutTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPutTransformAction.java index 2c9fc8ffce5bf..8a82880f4d9a3 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPutTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPutTransformAction.java @@ -42,13 +42,10 @@ import org.elasticsearch.xpack.transform.notifications.TransformAuditor; import org.elasticsearch.xpack.transform.persistence.AuthorizationStatePersistenceUtils; import org.elasticsearch.xpack.transform.persistence.TransformConfigManager; -import org.elasticsearch.xpack.transform.transforms.Function; import org.elasticsearch.xpack.transform.transforms.FunctionFactory; import java.time.Instant; -import java.util.List; -import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.transform.utils.SecondaryAuthorizationUtils.getSecurityHeadersPreferringSecondary; public class TransportPutTransformAction extends AcknowledgedTransportMasterNodeAction { @@ -108,21 +105,19 @@ protected void masterOperation(Task task, Request request, ClusterState clusterS } // <3> Create the transform - ActionListener validateTransformListener = ActionListener.wrap( - unusedValidationResponse -> putTransform(request, listener), - listener::onFailure + ActionListener validateTransformListener = listener.delegateFailureAndWrap( + (l, unused) -> putTransform(request, l) ); // <2> Validate source and destination indices - ActionListener checkPrivilegesListener = ActionListener.wrap( - aVoid -> ClientHelper.executeAsyncWithOrigin( + ActionListener checkPrivilegesListener = validateTransformListener.delegateFailureAndWrap( + (l, aVoid) -> ClientHelper.executeAsyncWithOrigin( client, ClientHelper.TRANSFORM_ORIGIN, ValidateTransformAction.INSTANCE, new ValidateTransformAction.Request(config, request.isDeferValidation(), request.timeout()), - validateTransformListener - ), - listener::onFailure + l + ) ); // <1> Early check to verify that the user can create the destination index and can read from the source @@ -170,24 +165,19 @@ protected ClusterBlockException checkBlock(PutTransformAction.Request request, C } private void putTransform(Request request, ActionListener listener) { - - final TransformConfig config = request.getConfig(); - // create the function for validation - final Function function = FunctionFactory.create(config); - - // <2> Return to the listener - ActionListener putTransformConfigurationListener = ActionListener.wrap(putTransformConfigurationResult -> { - logger.debug("[{}] created transform", config.getId()); - auditor.info(config.getId(), "Created transform."); - List warnings = TransformConfigLinter.getWarnings(function, config.getSource(), config.getSyncConfig()); - for (String warning : warnings) { - logger.warn(() -> format("[%s] %s", config.getId(), warning)); - auditor.warning(config.getId(), warning); - } - listener.onResponse(AcknowledgedResponse.TRUE); - }, listener::onFailure); - - // <1> Put our transform - transformConfigManager.putTransformConfiguration(config, putTransformConfigurationListener); + var config = request.getConfig(); + transformConfigManager.putTransformConfiguration(config, listener.delegateFailureAndWrap((l, unused) -> { + var transformId = config.getId(); + logger.debug("[{}] created transform", transformId); + auditor.info(transformId, "Created transform."); + + var validationFunc = FunctionFactory.create(config); + TransformConfigLinter.getWarnings(validationFunc, config.getSource(), config.getSyncConfig()).forEach(warning -> { + logger.warn("[{}] {}", transformId, warning); + auditor.warning(transformId, warning); + }); + + l.onResponse(AcknowledgedResponse.TRUE); + })); } } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStartTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStartTransformAction.java index db24470433003..01359f351f07a 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStartTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStartTransformAction.java @@ -39,6 +39,7 @@ import org.elasticsearch.xpack.core.transform.action.ValidateTransformAction; import org.elasticsearch.xpack.core.transform.transforms.AuthorizationState; import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformEffectiveSettings; import org.elasticsearch.xpack.core.transform.transforms.TransformState; import org.elasticsearch.xpack.core.transform.transforms.TransformTaskParams; import org.elasticsearch.xpack.core.transform.transforms.TransformTaskState; @@ -159,6 +160,7 @@ protected void masterOperation( transformTask.getId(), TransformTaskParams.NAME, transformTask, + null, newPersistentTaskActionListener ); } else { @@ -186,7 +188,7 @@ protected void masterOperation( // <3> If the destination index exists, start the task, otherwise deduce our mappings for the destination index and create it ActionListener validationListener = ActionListener.wrap(validationResponse -> { - if (Boolean.TRUE.equals(transformConfigHolder.get().getSettings().getUnattended())) { + if (TransformEffectiveSettings.isUnattended(transformConfigHolder.get().getSettings())) { logger.debug( () -> format("[%s] Skip dest index creation as this is an unattended transform", transformConfigHolder.get().getId()) ); @@ -204,7 +206,7 @@ protected void masterOperation( createOrGetIndexListener ); }, e -> { - if (Boolean.TRUE.equals(transformConfigHolder.get().getSettings().getUnattended())) { + if (TransformEffectiveSettings.isUnattended(transformConfigHolder.get().getSettings())) { logger.debug( () -> format("[%s] Skip dest index creation as this is an unattended transform", transformConfigHolder.get().getId()) ); @@ -267,7 +269,7 @@ protected void masterOperation( ActionListener getTransformListener = ActionListener.wrap(config -> { transformConfigHolder.set(config); - if (Boolean.TRUE.equals(config.getSettings().getUnattended())) { + if (TransformEffectiveSettings.isUnattended(config.getSettings())) { // We do not fail the _start request of the unattended transform due to permission issues, // we just let it run fetchAuthStateListener.onResponse(null); @@ -286,7 +288,7 @@ protected ClusterBlockException checkBlock(StartTransformAction.Request request, } private void cancelTransformTask(String taskId, String transformId, Exception exception, Consumer onFailure) { - persistentTasksService.sendRemoveRequest(taskId, new ActionListener<>() { + persistentTasksService.sendRemoveRequest(taskId, null, new ActionListener<>() { @Override public void onResponse(PersistentTasksCustomMetadata.PersistentTask task) { // We succeeded in canceling the persistent task, but the diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStopTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStopTransformAction.java index 54d33f0df3638..a309aaa2e4e0e 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStopTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStopTransformAction.java @@ -513,7 +513,7 @@ private ActionListener cancelTransformTasksWithNoAssignment( ); for (String unassignedTaskId : transformNodeAssignments.getWaitingForAssignment()) { - persistentTasksService.sendRemoveRequest(unassignedTaskId, groupedListener); + persistentTasksService.sendRemoveRequest(unassignedTaskId, null, groupedListener); } }, e -> { @@ -525,7 +525,7 @@ private ActionListener cancelTransformTasksWithNoAssignment( ); for (String unassignedTaskId : transformNodeAssignments.getWaitingForAssignment()) { - persistentTasksService.sendRemoveRequest(unassignedTaskId, groupedListener); + persistentTasksService.sendRemoveRequest(unassignedTaskId, null, groupedListener); } }); return doExecuteListener; diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/TimeBasedCheckpointProvider.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/TimeBasedCheckpointProvider.java index ec4cc2dcbcbf4..f49d5fc96f3ab 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/TimeBasedCheckpointProvider.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/TimeBasedCheckpointProvider.java @@ -22,6 +22,7 @@ import org.elasticsearch.xpack.core.transform.transforms.TimeSyncConfig; import org.elasticsearch.xpack.core.transform.transforms.TransformCheckpoint; import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformEffectiveSettings; import org.elasticsearch.xpack.core.transform.transforms.pivot.DateHistogramGroupSource; import org.elasticsearch.xpack.core.transform.transforms.pivot.SingleGroupSource; import org.elasticsearch.xpack.transform.notifications.TransformAuditor; @@ -109,7 +110,7 @@ public void createNextCheckpoint(final TransformCheckpoint lastCheckpoint, final * @return function aligning the given timestamp with date histogram interval */ private static Function createAlignTimestampFunction(TransformConfig transformConfig) { - if (Boolean.FALSE.equals(transformConfig.getSettings().getAlignCheckpoints())) { + if (TransformEffectiveSettings.isAlignCheckpointsDisabled(transformConfig.getSettings())) { return identity(); } // In case of transforms created before aligning timestamp optimization was introduced we assume the default was "false". diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/IndexBasedTransformConfigManager.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/IndexBasedTransformConfigManager.java index 1d44ed5a1f8ef..40eb2e2ad294a 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/IndexBasedTransformConfigManager.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/IndexBasedTransformConfigManager.java @@ -14,6 +14,9 @@ import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction; @@ -60,6 +63,7 @@ import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.action.util.ExpandedIdsMatcher; import org.elasticsearch.xpack.core.action.util.PageParams; import org.elasticsearch.xpack.core.transform.TransformField; @@ -76,10 +80,10 @@ import java.util.HashSet; import java.util.List; import java.util.Set; +import java.util.function.BiConsumer; import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; import static org.elasticsearch.xpack.core.ClientHelper.TRANSFORM_ORIGIN; -import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; /** * Place of all interactions with the internal transforms index. For configuration and mappings see @link{TransformInternalIndex} @@ -135,9 +139,7 @@ public void putTransformCheckpoint(TransformCheckpoint checkpoint, ActionListene .id(TransformCheckpoint.documentId(checkpoint.getTransformId(), checkpoint.getCheckpoint())) .source(source); - executeAsyncWithOrigin(client, TRANSFORM_ORIGIN, TransportIndexAction.TYPE, indexRequest, ActionListener.wrap(r -> { - listener.onResponse(true); - }, listener::onFailure)); + executeAsyncWithOrigin(TransportIndexAction.TYPE, indexRequest, listener.delegateFailureAndWrap((l, r) -> l.onResponse(true))); } catch (IOException e) { // not expected to happen but for the sake of completeness listener.onFailure(e); @@ -180,22 +182,16 @@ public void deleteOldTransformConfigurations(String transformId, ActionListener< ) ); - executeAsyncWithOrigin( - client, - TRANSFORM_ORIGIN, - DeleteByQueryAction.INSTANCE, - deleteByQueryRequest, - ActionListener.wrap(response -> { - if ((response.getBulkFailures().isEmpty() && response.getSearchFailures().isEmpty()) == false) { - Tuple statusAndReason = getStatusAndReason(response); - listener.onFailure( - new ElasticsearchStatusException(statusAndReason.v2().getMessage(), statusAndReason.v1(), statusAndReason.v2()) - ); - return; - } - listener.onResponse(true); - }, listener::onFailure) - ); + executeAsyncWithOrigin(DeleteByQueryAction.INSTANCE, deleteByQueryRequest, listener.delegateFailureAndWrap((l, response) -> { + if ((response.getBulkFailures().isEmpty() && response.getSearchFailures().isEmpty()) == false) { + Tuple statusAndReason = getStatusAndReason(response); + l.onFailure( + new ElasticsearchStatusException(statusAndReason.v2().getMessage(), statusAndReason.v1(), statusAndReason.v2()) + ); + return; + } + l.onResponse(true); + })); } @Override @@ -212,22 +208,7 @@ public void deleteOldTransformStoredDocuments(String transformId, ActionListener .filter(QueryBuilders.termQuery("_id", TransformStoredDoc.documentId(transformId))) ) ); - executeAsyncWithOrigin( - client, - TRANSFORM_ORIGIN, - DeleteByQueryAction.INSTANCE, - deleteByQueryRequest, - ActionListener.wrap(response -> { - if ((response.getBulkFailures().isEmpty() && response.getSearchFailures().isEmpty()) == false) { - Tuple statusAndReason = getStatusAndReason(response); - listener.onFailure( - new ElasticsearchStatusException(statusAndReason.v2().getMessage(), statusAndReason.v1(), statusAndReason.v2()) - ); - return; - } - listener.onResponse(response.getDeleted()); - }, listener::onFailure) - ); + deleteByQuery(listener, deleteByQueryRequest); } @Override @@ -247,22 +228,20 @@ public void deleteOldCheckpoints(String transformId, long deleteCheckpointsBelow ) ); logger.debug("Deleting old checkpoints using {}", deleteByQueryRequest.getSearchRequest()); - executeAsyncWithOrigin( - client, - TRANSFORM_ORIGIN, - DeleteByQueryAction.INSTANCE, - deleteByQueryRequest, - ActionListener.wrap(response -> { - if ((response.getBulkFailures().isEmpty() && response.getSearchFailures().isEmpty()) == false) { - Tuple statusAndReason = getStatusAndReason(response); - listener.onFailure( - new ElasticsearchStatusException(statusAndReason.v2().getMessage(), statusAndReason.v1(), statusAndReason.v2()) - ); - return; - } - listener.onResponse(response.getDeleted()); - }, listener::onFailure) - ); + deleteByQuery(listener, deleteByQueryRequest); + } + + private void deleteByQuery(ActionListener listener, DeleteByQueryRequest deleteByQueryRequest) { + executeAsyncWithOrigin(DeleteByQueryAction.INSTANCE, deleteByQueryRequest, listener.delegateFailureAndWrap((l, response) -> { + if ((response.getBulkFailures().isEmpty() && response.getSearchFailures().isEmpty()) == false) { + Tuple statusAndReason = getStatusAndReason(response); + l.onFailure( + new ElasticsearchStatusException(statusAndReason.v2().getMessage(), statusAndReason.v1(), statusAndReason.v2()) + ); + return; + } + l.onResponse(response.getDeleted()); + })); } @Override @@ -304,13 +283,13 @@ public void deleteOldIndices(ActionListener listener) { IndicesOptions.LENIENT_EXPAND_OPEN ); - executeAsyncWithOrigin(client, TRANSFORM_ORIGIN, TransportDeleteIndexAction.TYPE, deleteRequest, ActionListener.wrap(response -> { + executeAsyncWithOrigin(TransportDeleteIndexAction.TYPE, deleteRequest, listener.delegateFailureAndWrap((l, response) -> { if (response.isAcknowledged() == false) { - listener.onFailure(new ElasticsearchStatusException("Failed to delete internal indices", RestStatus.INTERNAL_SERVER_ERROR)); + l.onFailure(new ElasticsearchStatusException("Failed to delete internal indices", RestStatus.INTERNAL_SERVER_ERROR)); return; } - listener.onResponse(true); - }, listener::onFailure)); + l.onResponse(true); + })); } private void putTransformConfiguration( @@ -331,9 +310,7 @@ private void putTransformConfiguration( if (seqNoPrimaryTermAndIndex != null) { indexRequest.setIfSeqNo(seqNoPrimaryTermAndIndex.getSeqNo()).setIfPrimaryTerm(seqNoPrimaryTermAndIndex.getPrimaryTerm()); } - executeAsyncWithOrigin(client, TRANSFORM_ORIGIN, TransportIndexAction.TYPE, indexRequest, ActionListener.wrap(r -> { - listener.onResponse(true); - }, e -> { + executeAsyncWithOrigin(TransportIndexAction.TYPE, indexRequest, ActionListener.wrap(r -> listener.onResponse(true), e -> { if (e instanceof VersionConflictEngineException) { if (DocWriteRequest.OpType.CREATE.equals(opType)) { // we want to create the transform but it already exists listener.onFailure( @@ -378,22 +355,16 @@ public void getTransformCheckpoint(String transformId, long checkpoint, ActionLi .setAllowPartialSearchResults(false) .request(); - executeAsyncWithOrigin( - client, - TRANSFORM_ORIGIN, - TransportSearchAction.TYPE, - searchRequest, - ActionListener.wrap(searchResponse -> { - if (searchResponse.getHits().getHits().length == 0) { - // do not fail if checkpoint does not exist but return an empty checkpoint - logger.trace("found no checkpoint for transform [" + transformId + "], returning empty checkpoint"); - resultListener.onResponse(TransformCheckpoint.EMPTY); - return; - } - BytesReference source = searchResponse.getHits().getHits()[0].getSourceRef(); - parseCheckpointsLenientlyFromSource(source, transformId, resultListener); - }, resultListener::onFailure) - ); + executeAsyncWithOrigin(TransportSearchAction.TYPE, searchRequest, resultListener.delegateFailureAndWrap((l, searchResponse) -> { + if (searchResponse.getHits().getHits().length == 0) { + // do not fail if checkpoint does not exist but return an empty checkpoint + logger.trace("found no checkpoint for transform [{}], returning empty checkpoint", transformId); + l.onResponse(TransformCheckpoint.EMPTY); + return; + } + BytesReference source = searchResponse.getHits().getHits()[0].getSourceRef(); + parseCheckpointsLenientlyFromSource(source, transformId, l); + })); } @Override @@ -416,14 +387,12 @@ public void getTransformCheckpointForUpdate( .request(); executeAsyncWithOrigin( - client, - TRANSFORM_ORIGIN, TransportSearchAction.TYPE, searchRequest, - ActionListener.wrap(searchResponse -> { + checkpointAndVersionListener.delegateFailureAndWrap((l, searchResponse) -> { if (searchResponse.getHits().getHits().length == 0) { // do not fail, this _must_ be handled by the caller - checkpointAndVersionListener.onResponse(null); + l.onResponse(null); return; } SearchHit hit = searchResponse.getHits().getHits()[0]; @@ -431,17 +400,16 @@ public void getTransformCheckpointForUpdate( parseCheckpointsLenientlyFromSource( source, transformId, - ActionListener.wrap( - parsedCheckpoint -> checkpointAndVersionListener.onResponse( + l.delegateFailureAndWrap( + (ll, parsedCheckpoint) -> ll.onResponse( Tuple.tuple( parsedCheckpoint, new SeqNoPrimaryTermAndIndex(hit.getSeqNo(), hit.getPrimaryTerm(), hit.getIndex()) ) - ), - checkpointAndVersionListener::onFailure + ) ) ); - }, checkpointAndVersionListener::onFailure) + }) ); } @@ -459,22 +427,16 @@ public void getTransformConfiguration(String transformId, ActionListenerwrap(searchResponse -> { - if (searchResponse.getHits().getHits().length == 0) { - resultListener.onFailure( - new ResourceNotFoundException(TransformMessages.getMessage(TransformMessages.REST_UNKNOWN_TRANSFORM, transformId)) - ); - return; - } - BytesReference source = searchResponse.getHits().getHits()[0].getSourceRef(); - parseTransformLenientlyFromSource(source, transformId, resultListener); - }, resultListener::onFailure) - ); + executeAsyncWithOrigin(TransportSearchAction.TYPE, searchRequest, resultListener.delegateFailureAndWrap((l, searchResponse) -> { + if (searchResponse.getHits().getHits().length == 0) { + l.onFailure( + new ResourceNotFoundException(TransformMessages.getMessage(TransformMessages.REST_UNKNOWN_TRANSFORM, transformId)) + ); + return; + } + BytesReference source = searchResponse.getHits().getHits()[0].getSourceRef(); + parseTransformLenientlyFromSource(source, transformId, l); + })); } @Override @@ -495,26 +457,29 @@ public void getTransformConfigurationForUpdate( .seqNoAndPrimaryTerm(true) .request(); - executeAsyncWithOrigin(client, TRANSFORM_ORIGIN, TransportSearchAction.TYPE, searchRequest, ActionListener.wrap(searchResponse -> { - if (searchResponse.getHits().getHits().length == 0) { - configAndVersionListener.onFailure( - new ResourceNotFoundException(TransformMessages.getMessage(TransformMessages.REST_UNKNOWN_TRANSFORM, transformId)) + executeAsyncWithOrigin( + TransportSearchAction.TYPE, + searchRequest, + configAndVersionListener.delegateFailureAndWrap((l, searchResponse) -> { + if (searchResponse.getHits().getHits().length == 0) { + l.onFailure( + new ResourceNotFoundException(TransformMessages.getMessage(TransformMessages.REST_UNKNOWN_TRANSFORM, transformId)) + ); + return; + } + SearchHit hit = searchResponse.getHits().getHits()[0]; + BytesReference source = hit.getSourceRef(); + parseTransformLenientlyFromSource( + source, + transformId, + l.delegateFailureAndWrap( + (ll, config) -> ll.onResponse( + Tuple.tuple(config, new SeqNoPrimaryTermAndIndex(hit.getSeqNo(), hit.getPrimaryTerm(), hit.getIndex())) + ) + ) ); - return; - } - SearchHit hit = searchResponse.getHits().getHits()[0]; - BytesReference source = hit.getSourceRef(); - parseTransformLenientlyFromSource( - source, - transformId, - ActionListener.wrap( - config -> configAndVersionListener.onResponse( - Tuple.tuple(config, new SeqNoPrimaryTermAndIndex(hit.getSeqNo(), hit.getPrimaryTerm(), hit.getIndex())) - ), - configAndVersionListener::onFailure - ) - ); - }, configAndVersionListener::onFailure)); + }) + ); } @Override @@ -543,48 +508,40 @@ public void expandTransformIds( final ExpandedIdsMatcher requiredMatches = new ExpandedIdsMatcher(idTokens, allowNoMatch); - executeAsyncWithOrigin( - client.threadPool().getThreadContext(), - TRANSFORM_ORIGIN, - request, - ActionListener.wrap(searchResponse -> { - long totalHits = searchResponse.getHits().getTotalHits().value; - // important: preserve order - Set ids = Sets.newLinkedHashSetWithExpectedSize(searchResponse.getHits().getHits().length); - Set configs = Sets.newLinkedHashSetWithExpectedSize(searchResponse.getHits().getHits().length); - for (SearchHit hit : searchResponse.getHits().getHits()) { - try (XContentParser parser = createParser(hit)) { - TransformConfig config = TransformConfig.fromXContent(parser, null, true); - if (ids.add(config.getId())) { - configs.add(config); - } - } catch (IOException e) { - foundConfigsListener.onFailure(new ElasticsearchParseException("failed to parse search hit for ids", e)); - return; + executeAsyncWithOrigin(request, foundConfigsListener.delegateFailureAndWrap((l, searchResponse) -> { + long totalHits = searchResponse.getHits().getTotalHits().value; + // important: preserve order + Set ids = Sets.newLinkedHashSetWithExpectedSize(searchResponse.getHits().getHits().length); + Set configs = Sets.newLinkedHashSetWithExpectedSize(searchResponse.getHits().getHits().length); + for (SearchHit hit : searchResponse.getHits().getHits()) { + try (XContentParser parser = createParser(hit)) { + TransformConfig config = TransformConfig.fromXContent(parser, null, true); + if (ids.add(config.getId())) { + configs.add(config); } - } - requiredMatches.filterMatchedIds(ids); - if (requiredMatches.hasUnmatchedIds()) { - // some required Ids were not found - foundConfigsListener.onFailure( - new ResourceNotFoundException( - TransformMessages.getMessage(TransformMessages.REST_UNKNOWN_TRANSFORM, requiredMatches.unmatchedIdsString()) - ) - ); + } catch (IOException e) { + l.onFailure(new ElasticsearchParseException("failed to parse search hit for ids", e)); return; } - // if only exact ids have been given, take the count from docs to avoid potential duplicates - // in versioned indexes (like transform) - if (requiredMatches.isOnlyExact()) { - foundConfigsListener.onResponse( - new Tuple<>((long) ids.size(), Tuple.tuple(new ArrayList<>(ids), new ArrayList<>(configs))) - ); - } else { - foundConfigsListener.onResponse(new Tuple<>(totalHits, Tuple.tuple(new ArrayList<>(ids), new ArrayList<>(configs)))); - } - }, foundConfigsListener::onFailure), - client::search - ); + } + requiredMatches.filterMatchedIds(ids); + if (requiredMatches.hasUnmatchedIds()) { + // some required Ids were not found + l.onFailure( + new ResourceNotFoundException( + TransformMessages.getMessage(TransformMessages.REST_UNKNOWN_TRANSFORM, requiredMatches.unmatchedIdsString()) + ) + ); + return; + } + // if only exact ids have been given, take the count from docs to avoid potential duplicates + // in versioned indexes (like transform) + if (requiredMatches.isOnlyExact()) { + l.onResponse(new Tuple<>((long) ids.size(), Tuple.tuple(new ArrayList<>(ids), new ArrayList<>(configs)))); + } else { + l.onResponse(new Tuple<>(totalHits, Tuple.tuple(new ArrayList<>(ids), new ArrayList<>(configs)))); + } + }), client::search); } private XContentParser createParser(BytesReference source) throws IOException { @@ -601,12 +558,7 @@ private XContentParser createParser(SearchHit hit) throws IOException { @Override public void getAllTransformIds(TimeValue timeout, ActionListener> listener) { - expandAllTransformIds( - false, - MAX_RESULTS_WINDOW, - timeout, - ActionListener.wrap(r -> listener.onResponse(r.v2()), listener::onFailure) - ); + expandAllTransformIds(false, MAX_RESULTS_WINDOW, timeout, listener.delegateFailureAndWrap((l, r) -> l.onResponse(r.v2()))); } @Override @@ -616,7 +568,7 @@ public void getAllOutdatedTransformIds(TimeValue timeout, ActionListener listener) { - ActionListener deleteListener = ActionListener.wrap(dbqResponse -> { listener.onResponse(true); }, e -> { + ActionListener deleteListener = ActionListener.wrap(dbqResponse -> listener.onResponse(true), e -> { if (e.getClass() == IndexNotFoundException.class) { listener.onFailure( new ResourceNotFoundException(TransformMessages.getMessage(TransformMessages.REST_UNKNOWN_TRANSFORM, transformId)) @@ -636,7 +588,7 @@ public void resetTransform(String transformId, ActionListener listener) .query(QueryBuilders.termQuery(TransformField.ID.getPreferredName(), transformId)) .trackTotalHitsUpTo(1) ); - executeAsyncWithOrigin(client, TRANSFORM_ORIGIN, TransportSearchAction.TYPE, searchRequest, ActionListener.wrap(searchResponse -> { + executeAsyncWithOrigin(TransportSearchAction.TYPE, searchRequest, deleteListener.delegateFailureAndWrap((l, searchResponse) -> { if (searchResponse.getHits().getTotalHits().value == 0) { listener.onFailure( new ResourceNotFoundException(TransformMessages.getMessage(TransformMessages.REST_UNKNOWN_TRANSFORM, transformId)) @@ -655,8 +607,8 @@ public void resetTransform(String transformId, ActionListener listener) TransformInternalIndexConstants.INDEX_NAME_PATTERN, TransformInternalIndexConstants.INDEX_NAME_PATTERN_DEPRECATED ).setQuery(dbqQuery).setRefresh(true); - executeAsyncWithOrigin(client, TRANSFORM_ORIGIN, DeleteByQueryAction.INSTANCE, dbqRequest, deleteListener); - }, deleteListener::onFailure)); + executeAsyncWithOrigin(DeleteByQueryAction.INSTANCE, dbqRequest, l); + })); } @Override @@ -668,7 +620,7 @@ public void deleteTransform(String transformId, ActionListener listener request.setQuery(query); request.setRefresh(true); - executeAsyncWithOrigin(client, TRANSFORM_ORIGIN, DeleteByQueryAction.INSTANCE, request, ActionListener.wrap(deleteResponse -> { + executeAsyncWithOrigin(DeleteByQueryAction.INSTANCE, request, ActionListener.wrap(deleteResponse -> { if (deleteResponse.getDeleted() == 0) { listener.onFailure( new ResourceNotFoundException(TransformMessages.getMessage(TransformMessages.REST_UNKNOWN_TRANSFORM, transformId)) @@ -714,8 +666,6 @@ public void putOrUpdateTransformStoredDoc( } executeAsyncWithOrigin( - client, - TRANSFORM_ORIGIN, TransportIndexAction.TYPE, indexRequest, ActionListener.wrap( @@ -758,38 +708,30 @@ public void getTransformStoredDoc( .seqNoAndPrimaryTerm(true) .request(); - executeAsyncWithOrigin( - client, - TRANSFORM_ORIGIN, - TransportSearchAction.TYPE, - searchRequest, - ActionListener.wrap(searchResponse -> { - if (searchResponse.getHits().getHits().length == 0) { - if (allowNoMatch) { - resultListener.onResponse(null); - } else { - resultListener.onFailure( - new ResourceNotFoundException( - TransformMessages.getMessage(TransformMessages.UNKNOWN_TRANSFORM_STATS, transformId) - ) - ); - } - return; - } - SearchHit searchHit = searchResponse.getHits().getHits()[0]; - try (XContentParser parser = createParser(searchHit)) { - resultListener.onResponse( - Tuple.tuple(TransformStoredDoc.fromXContent(parser), SeqNoPrimaryTermAndIndex.fromSearchHit(searchHit)) - ); - } catch (Exception e) { - logger.error( - TransformMessages.getMessage(TransformMessages.FAILED_TO_PARSE_TRANSFORM_STATISTICS_CONFIGURATION, transformId), - e + executeAsyncWithOrigin(TransportSearchAction.TYPE, searchRequest, resultListener.delegateFailureAndWrap((l, searchResponse) -> { + if (searchResponse.getHits().getHits().length == 0) { + if (allowNoMatch) { + l.onResponse(null); + } else { + l.onFailure( + new ResourceNotFoundException(TransformMessages.getMessage(TransformMessages.UNKNOWN_TRANSFORM_STATS, transformId)) ); - resultListener.onFailure(e); } - }, resultListener::onFailure) - ); + return; + } + SearchHit searchHit = searchResponse.getHits().getHits()[0]; + try (XContentParser parser = createParser(searchHit)) { + resultListener.onResponse( + Tuple.tuple(TransformStoredDoc.fromXContent(parser), SeqNoPrimaryTermAndIndex.fromSearchHit(searchHit)) + ); + } catch (Exception e) { + logger.error( + TransformMessages.getMessage(TransformMessages.FAILED_TO_PARSE_TRANSFORM_STATISTICS_CONFIGURATION, transformId), + e + ); + resultListener.onFailure(e); + } + })); } @Override @@ -816,43 +758,50 @@ public void getTransformStoredDocs( .setTimeout(timeout) .request(); - executeAsyncWithOrigin( - client.threadPool().getThreadContext(), - TRANSFORM_ORIGIN, - searchRequest, - ActionListener.wrap(searchResponse -> { - List stats = new ArrayList<>(); - String previousId = null; - for (SearchHit hit : searchResponse.getHits().getHits()) { - // skip old versions - if (hit.getId().equals(previousId) == false) { - previousId = hit.getId(); - try (XContentParser parser = createParser(hit)) { - stats.add(TransformStoredDoc.fromXContent(parser)); - } catch (IOException e) { - listener.onFailure(new ElasticsearchParseException("failed to parse transform stats from search hit", e)); - return; - } + executeAsyncWithOrigin(searchRequest, listener.delegateFailureAndWrap((l, searchResponse) -> { + List stats = new ArrayList<>(); + String previousId = null; + for (SearchHit hit : searchResponse.getHits().getHits()) { + // skip old versions + if (hit.getId().equals(previousId) == false) { + previousId = hit.getId(); + try (XContentParser parser = createParser(hit)) { + stats.add(TransformStoredDoc.fromXContent(parser)); + } catch (IOException e) { + l.onFailure(new ElasticsearchParseException("failed to parse transform stats from search hit", e)); + return; } } - - listener.onResponse(stats); - }, listener::onFailure), - client::search - ); + } + l.onResponse(stats); + }), client::search); } @Override public void refresh(ActionListener listener) { executeAsyncWithOrigin( - client.threadPool().getThreadContext(), - TRANSFORM_ORIGIN, new RefreshRequest(TransformInternalIndexConstants.LATEST_INDEX_NAME), - ActionListener.wrap(r -> listener.onResponse(true), listener::onFailure), + listener.delegateFailureAndWrap((l, r) -> l.onResponse(true)), client.admin().indices()::refresh ); } + private void executeAsyncWithOrigin( + Request request, + ActionListener listener, + BiConsumer> consumer + ) { + ClientHelper.executeAsyncWithOrigin(client.threadPool().getThreadContext(), TRANSFORM_ORIGIN, request, listener, consumer); + } + + private void executeAsyncWithOrigin( + ActionType action, + Request request, + ActionListener listener + ) { + ClientHelper.executeAsyncWithOrigin(client, TRANSFORM_ORIGIN, action, request, listener); + } + private void parseTransformLenientlyFromSource( BytesReference source, String transformId, @@ -950,51 +899,45 @@ private void recursiveExpandAllTransformIds( ) .request(); - executeAsyncWithOrigin( - client.threadPool().getThreadContext(), - TRANSFORM_ORIGIN, - request, - ActionListener.wrap(searchResponse -> { - long totalHits = total; - String idOfLastHit = lastId; - - for (SearchHit hit : searchResponse.getHits().getHits()) { - String id = hit.field(TransformField.ID.getPreferredName()).getValue(); - - // paranoia - if (Strings.isNullOrEmpty(id)) { - continue; - } + executeAsyncWithOrigin(request, listener.delegateFailureAndWrap((l, searchResponse) -> { + long totalHits = total; + String idOfLastHit = lastId; - // only count hits if looking for outdated transforms - if (filterForOutdated && hit.getIndex().equals(TransformInternalIndexConstants.LATEST_INDEX_VERSIONED_NAME)) { - ++totalHits; - } else if (id.equals(idOfLastHit) == false && collectedIds.add(id)) { - ++totalHits; - } - idOfLastHit = id; + for (SearchHit hit : searchResponse.getHits().getHits()) { + String id = hit.field(TransformField.ID.getPreferredName()).getValue(); + + // paranoia + if (Strings.isNullOrEmpty(id)) { + continue; } - if (searchResponse.getHits().getHits().length == page.getSize()) { - PageParams nextPage = new PageParams(page.getFrom() + page.getSize(), maxResultWindow); - - recursiveExpandAllTransformIds( - collectedIds, - totalHits, - filterForOutdated, - maxResultWindow, - idOfLastHit, - nextPage, - timeout, - listener - ); - return; + // only count hits if looking for outdated transforms + if (filterForOutdated && hit.getIndex().equals(TransformInternalIndexConstants.LATEST_INDEX_VERSIONED_NAME)) { + ++totalHits; + } else if (id.equals(idOfLastHit) == false && collectedIds.add(id)) { + ++totalHits; } + idOfLastHit = id; + } - listener.onResponse(new Tuple<>(totalHits, collectedIds)); - }, listener::onFailure), - client::search - ); + if (searchResponse.getHits().getHits().length == page.getSize()) { + PageParams nextPage = new PageParams(page.getFrom() + page.getSize(), maxResultWindow); + + recursiveExpandAllTransformIds( + collectedIds, + totalHits, + filterForOutdated, + maxResultWindow, + idOfLastHit, + nextPage, + timeout, + l + ); + return; + } + + l.onResponse(new Tuple<>(totalHits, collectedIds)); + }), client::search); } private static Tuple getStatusAndReason(final BulkByScrollResponse response) { diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/TransformIndex.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/TransformIndex.java index fe3d4ede898bc..e3d9fa3aff671 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/TransformIndex.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/TransformIndex.java @@ -33,6 +33,7 @@ import org.elasticsearch.xpack.core.transform.transforms.DestAlias; import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; import org.elasticsearch.xpack.core.transform.transforms.TransformDestIndexSettings; +import org.elasticsearch.xpack.core.transform.transforms.TransformEffectiveSettings; import org.elasticsearch.xpack.transform.notifications.TransformAuditor; import java.time.Clock; @@ -128,7 +129,7 @@ public static void createDestinationIndex( // <2> Set up destination index aliases, regardless whether the destination index was created by the transform or by the user ActionListener createDestinationIndexListener = ActionListener.wrap(createdDestinationIndex -> { if (createdDestinationIndex) { - String message = Boolean.FALSE.equals(config.getSettings().getDeduceMappings()) + String message = TransformEffectiveSettings.isDeduceMappingsDisabled(config.getSettings()) ? "Created destination index [" + destinationIndex + "]." : "Created destination index [" + destinationIndex + "] with deduced mappings."; auditor.info(config.getId(), message); @@ -139,7 +140,7 @@ public static void createDestinationIndex( if (dest.length == 0) { TransformDestIndexSettings generatedDestIndexSettings = createTransformDestIndexSettings( destIndexSettings, - Boolean.FALSE.equals(config.getSettings().getDeduceMappings()) ? emptyMap() : destIndexMappings, + TransformEffectiveSettings.isDeduceMappingsDisabled(config.getSettings()) ? emptyMap() : destIndexMappings, config.getId(), Clock.systemUTC() ); diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java index 1634f417924c0..c68c73fd71d9e 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java @@ -50,6 +50,7 @@ import org.elasticsearch.xpack.core.transform.transforms.SettingsConfig; import org.elasticsearch.xpack.core.transform.transforms.TransformCheckpoint; import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformEffectiveSettings; import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerPosition; import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerStats; import org.elasticsearch.xpack.core.transform.transforms.TransformProgress; @@ -131,17 +132,12 @@ class ClientTransformIndexer extends TransformIndexer { // TODO: move into context constructor context.setShouldStopAtCheckpoint(shouldStopAtCheckpoint); - if (transformConfig.getSettings().getUsePit() != null) { - disablePit = transformConfig.getSettings().getUsePit() == false; - } + disablePit = TransformEffectiveSettings.isPitDisabled(transformConfig.getSettings()); } @Override public void applyNewSettings(SettingsConfig newSettings) { - if (newSettings.getUsePit() != null) { - disablePit = newSettings.getUsePit() == false; - } - + disablePit = TransformEffectiveSettings.isPitDisabled(newSettings); super.applyNewSettings(newSettings); } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandler.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandler.java index c7e0eda5ca5e6..337d3c5820c07 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandler.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandler.java @@ -17,12 +17,11 @@ import org.elasticsearch.script.ScriptException; import org.elasticsearch.xpack.core.transform.TransformMessages; import org.elasticsearch.xpack.core.transform.transforms.SettingsConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformEffectiveSettings; import org.elasticsearch.xpack.core.transform.utils.ExceptionsHelper; import org.elasticsearch.xpack.transform.notifications.TransformAuditor; import org.elasticsearch.xpack.transform.utils.ExceptionRootCauseFinder; -import java.util.Optional; - import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.core.common.notifications.Level.INFO; import static org.elasticsearch.xpack.core.common.notifications.Level.WARNING; @@ -59,32 +58,28 @@ void handleIndexerFailure(Exception exception, SettingsConfig settingsConfig) { // more detailed reporting in the handlers and below logger.atDebug().withThrowable(exception).log("[{}] transform encountered an exception", transformId); Throwable unwrappedException = ExceptionsHelper.findSearchExceptionRootCause(exception); - boolean unattended = Boolean.TRUE.equals(settingsConfig.getUnattended()); + boolean unattended = TransformEffectiveSettings.isUnattended(settingsConfig); + int numFailureRetries = TransformEffectiveSettings.getNumFailureRetries(settingsConfig, context.getNumFailureRetries()); if (unwrappedException instanceof CircuitBreakingException e) { handleCircuitBreakingException(e, unattended); } else if (unwrappedException instanceof ScriptException e) { handleScriptException(e, unattended); } else if (unwrappedException instanceof BulkIndexingException e) { - handleBulkIndexingException(e, unattended, getNumFailureRetries(settingsConfig)); + handleBulkIndexingException(e, unattended, numFailureRetries); } else if (unwrappedException instanceof ClusterBlockException e) { // gh#89802 always retry for a cluster block exception, because a cluster block should be temporary. - retry(e, e.getDetailedMessage(), unattended, getNumFailureRetries(settingsConfig)); + retry(e, e.getDetailedMessage(), unattended, numFailureRetries); } else if (unwrappedException instanceof SearchPhaseExecutionException e) { // The reason of a SearchPhaseExecutionException unfortunately contains a full stack trace. // Instead of displaying that to the user, get the cause's message instead. - retry(e, e.getCause() != null ? e.getCause().getMessage() : null, unattended, getNumFailureRetries(settingsConfig)); + retry(e, e.getCause() != null ? e.getCause().getMessage() : null, unattended, numFailureRetries); } else if (unwrappedException instanceof ElasticsearchException e) { - handleElasticsearchException(e, unattended, getNumFailureRetries(settingsConfig)); + handleElasticsearchException(e, unattended, numFailureRetries); } else if (unwrappedException instanceof IllegalArgumentException e) { handleIllegalArgumentException(e, unattended); } else { - retry( - unwrappedException, - ExceptionRootCauseFinder.getDetailedMessage(unwrappedException), - unattended, - getNumFailureRetries(settingsConfig) - ); + retry(unwrappedException, ExceptionRootCauseFinder.getDetailedMessage(unwrappedException), unattended, numFailureRetries); } } @@ -98,7 +93,7 @@ void handleIndexerFailure(Exception exception, SettingsConfig settingsConfig) { boolean handleStatePersistenceFailure(Exception e, SettingsConfig settingsConfig) { // we use the same setting for retries, however a separate counter, because the failure // counter for search/index gets reset after a successful bulk index request - int numFailureRetries = getNumFailureRetries(settingsConfig); + int numFailureRetries = TransformEffectiveSettings.getNumFailureRetries(settingsConfig, context.getNumFailureRetries()); int failureCount = context.incrementAndGetStatePersistenceFailureCount(e); @@ -273,19 +268,4 @@ private void fail(Throwable exception, String failureMessage) { // note: logging and audit is done as part of context.markAsFailed context.markAsFailed(exception, failureMessage); } - - /** - * Get the number of retries. - *

    - * The number of retries are read from the config or if not read from the context which is based on a cluster wide - * default. If the transform runs in unattended mode, the number of retries is always indefinite. - * - * @param settingsConfig the setting config - * @return the number of retries or -1 if retries are indefinite - */ - private int getNumFailureRetries(SettingsConfig settingsConfig) { - return Boolean.TRUE.equals(settingsConfig.getUnattended()) - ? -1 - : Optional.ofNullable(settingsConfig.getNumFailureRetries()).orElse(context.getNumFailureRetries()); - } } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformIndexer.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformIndexer.java index 4b2da731351d7..38bd231e3e76a 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformIndexer.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformIndexer.java @@ -36,6 +36,7 @@ import org.elasticsearch.xpack.core.transform.transforms.SettingsConfig; import org.elasticsearch.xpack.core.transform.transforms.TransformCheckpoint; import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformEffectiveSettings; import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerPosition; import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerStats; import org.elasticsearch.xpack.core.transform.transforms.TransformProgress; @@ -333,6 +334,9 @@ protected void onStart(long now, ActionListener listener) { } }, listener::onFailure); + var shouldMaybeCreateDestIndexForUnattended = context.getCheckpoint() == 0 + && TransformEffectiveSettings.isUnattended(transformConfig.getSettings()); + ActionListener> fieldMappingsListener = ActionListener.wrap(destIndexMappings -> { if (destIndexMappings.isEmpty() == false) { // If we managed to fetch destination index mappings, we use them from now on ... @@ -344,9 +348,7 @@ protected void onStart(long now, ActionListener listener) { // Since the unattended transform could not have created the destination index yet, we do it here. // This is important to create the destination index explicitly before indexing first documents. Otherwise, the destination // index aliases may be missing. - if (destIndexMappings.isEmpty() - && context.getCheckpoint() == 0 - && Boolean.TRUE.equals(transformConfig.getSettings().getUnattended())) { + if (destIndexMappings.isEmpty() && shouldMaybeCreateDestIndexForUnattended) { doMaybeCreateDestIndex(deducedDestIndexMappings.get(), configurationReadyListener); } else { configurationReadyListener.onResponse(null); @@ -364,7 +366,7 @@ protected void onStart(long now, ActionListener listener) { deducedDestIndexMappings.set(validationResponse.getDestIndexMappings()); if (isContinuous()) { transformsConfigManager.getTransformConfiguration(getJobId(), ActionListener.wrap(config -> { - if (transformConfig.equals(config) && fieldMappings != null) { + if (transformConfig.equals(config) && fieldMappings != null && shouldMaybeCreateDestIndexForUnattended == false) { logger.trace("[{}] transform config has not changed.", getJobId()); configurationReadyListener.onResponse(null); } else { @@ -412,7 +414,7 @@ protected void onStart(long now, ActionListener listener) { hasSourceChanged = true; listener.onFailure(failure); })); - } else if (context.getCheckpoint() == 0 && Boolean.TRUE.equals(transformConfig.getSettings().getUnattended())) { + } else if (context.getCheckpoint() == 0 && TransformEffectiveSettings.isUnattended(transformConfig.getSettings())) { // this transform runs in unattended mode and has never run, to go on validate(changedSourceListener); } else { diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformNodes.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformNodes.java index d282239099d6b..56e5fd5900cfb 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformNodes.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformNodes.java @@ -241,7 +241,10 @@ public static boolean nodeCanRunThisTransform( // does the transform require a remote and remote is enabled? if (requiresRemote && node.isRemoteClusterClient() == false) { if (explain != null) { - explain.put(node.getId(), "transform requires a remote connection but remote is disabled"); + explain.put( + node.getId(), + "transform requires a remote connection but the node does not have the remote_cluster_client role" + ); } return false; } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformTask.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformTask.java index b0435a08a4187..8a78be8417020 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformTask.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformTask.java @@ -17,6 +17,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.Strings; import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.core.Predicates; import org.elasticsearch.core.TimeValue; import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; @@ -607,7 +608,7 @@ public static PersistentTask getTransformTask(String transformId, ClusterStat } public static Collection> findAllTransformTasks(ClusterState clusterState) { - return findTransformTasks(task -> true, clusterState); + return findTransformTasks(Predicates.always(), clusterState); } public static Collection> findTransformTasks(Set transformIds, ClusterState clusterState) { @@ -616,7 +617,7 @@ public static Collection> findTransformTasks(Set trans public static Collection> findTransformTasks(String transformIdPattern, ClusterState clusterState) { Predicate> taskMatcher = transformIdPattern == null - || Strings.isAllOrWildcard(transformIdPattern) ? t -> true : t -> { + || Strings.isAllOrWildcard(transformIdPattern) ? Predicates.always() : t -> { TransformTaskParams transformParams = (TransformTaskParams) t.getParams(); return Regex.simpleMatch(transformIdPattern, transformParams.getId()); }; diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/Pivot.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/Pivot.java index 0d4dbcb6c2094..8c134b92c02af 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/Pivot.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/Pivot.java @@ -27,6 +27,7 @@ import org.elasticsearch.xpack.core.transform.TransformMessages; import org.elasticsearch.xpack.core.transform.transforms.SettingsConfig; import org.elasticsearch.xpack.core.transform.transforms.SourceConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformEffectiveSettings; import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerStats; import org.elasticsearch.xpack.core.transform.transforms.TransformProgress; import org.elasticsearch.xpack.core.transform.transforms.pivot.PivotConfig; @@ -132,14 +133,7 @@ protected Stream> extractResults( TransformIndexerStats transformIndexerStats, TransformProgress transformProgress ) { - // defines how dates are written, if not specified in settings - // < 7.11 as epoch millis - // >= 7.11 as string - // note: it depends on the version when the transform has been created, not the version of the code - boolean datesAsEpoch = settings.getDatesAsEpochMillis() != null - ? settings.getDatesAsEpochMillis() - : version.before(TransformConfigVersion.V_7_11_0); - + boolean datesAsEpoch = TransformEffectiveSettings.writeDatesAsEpochMillis(settings, version); return AggregationResultUtils.extractCompositeAggregationResults( agg, config.getGroupConfig(), diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/SchemaUtil.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/SchemaUtil.java index 48b156ce39fc2..d5e0351a8822e 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/SchemaUtil.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/SchemaUtil.java @@ -24,6 +24,7 @@ import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.transform.transforms.SettingsConfig; import org.elasticsearch.xpack.core.transform.transforms.SourceConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformEffectiveSettings; import org.elasticsearch.xpack.core.transform.transforms.pivot.PivotConfig; import java.math.BigDecimal; @@ -167,7 +168,7 @@ public static void deduceMappings( sourceMappings -> listener.onResponse( resolveMappings( transformId, - Boolean.FALSE.equals(settingsConfig.getDeduceMappings()) == false, + TransformEffectiveSettings.isDeduceMappingsDisabled(settingsConfig), aggregationSourceFieldNames, aggregationTypes, fieldNamesForGrouping, @@ -207,7 +208,7 @@ public static void getDestinationFieldMappings( private static Map resolveMappings( String transformId, - boolean deduceMappings, + boolean deduceMappingsDisabled, Map aggregationSourceFieldNames, Map aggregationTypes, Map fieldNamesForGrouping, @@ -244,7 +245,7 @@ private static Map resolveMappings( targetMapping.put(targetFieldName, destinationMapping); } else { logger.log( - deduceMappings ? Level.WARN : Level.INFO, + deduceMappingsDisabled ? Level.INFO : Level.WARN, "[{}] Failed to deduce mapping for [{}], fall back to dynamic mapping. " + "Create the destination index with complete mappings first to avoid deducing the mappings", transformId, @@ -260,7 +261,7 @@ private static Map resolveMappings( targetMapping.put(targetFieldName, destinationMapping); } else { logger.log( - deduceMappings ? Level.WARN : Level.INFO, + deduceMappingsDisabled ? Level.INFO : Level.WARN, "[{}] Failed to deduce mapping for [{}], fall back to keyword. " + "Create the destination index with complete mappings first to avoid deducing the mappings", transformId, diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransformConfigLinterTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransformConfigLinterTests.java index 30b86c71f473b..288ec8fc7a3d7 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransformConfigLinterTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransformConfigLinterTests.java @@ -27,11 +27,9 @@ import org.elasticsearch.xpack.transform.transforms.pivot.Pivot; import java.util.Collections; -import java.util.HashMap; import java.util.Map; import static java.util.Collections.singletonList; -import static java.util.Collections.singletonMap; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.is; @@ -44,7 +42,7 @@ public void testGetWarnings_Pivot_WithScriptBasedRuntimeFields() { AggregationConfigTests.randomAggregationConfig(), null ); - Function function = new Pivot(pivotConfig, new SettingsConfig(), TransformConfigVersion.CURRENT, Collections.emptySet()); + Function function = new Pivot(pivotConfig, SettingsConfig.EMPTY, TransformConfigVersion.CURRENT, Collections.emptySet()); SourceConfig sourceConfig = SourceConfigTests.randomSourceConfig(); assertThat(TransformConfigLinter.getWarnings(function, sourceConfig, null), is(empty())); @@ -52,13 +50,14 @@ public void testGetWarnings_Pivot_WithScriptBasedRuntimeFields() { assertThat(TransformConfigLinter.getWarnings(function, sourceConfig, syncConfig), is(empty())); - Map runtimeMappings = new HashMap<>() { - { - put("rt-field-A", singletonMap("type", "keyword")); - put("rt-field-B", singletonMap("script", "some script")); - put("rt-field-C", singletonMap("script", "some other script")); - } - }; + Map runtimeMappings = Map.of( + "rt-field-A", + Map.of("type", "keyword"), + "rt-field-B", + Map.of("script", "some script"), + "rt-field-C", + Map.of("script", "some other script") + ); sourceConfig = new SourceConfig( generateRandomStringArray(10, 10, false, false), QueryConfigTests.randomQueryConfig(), @@ -81,13 +80,14 @@ public void testGetWarnings_Latest_WithScriptBasedRuntimeFields() { SyncConfig syncConfig = new TimeSyncConfig("rt-field-C", null); - Map runtimeMappings = new HashMap<>() { - { - put("rt-field-A", singletonMap("type", "keyword")); - put("rt-field-B", singletonMap("script", "some script")); - put("rt-field-C", singletonMap("script", "some other script")); - } - }; + Map runtimeMappings = Map.of( + "rt-field-A", + Map.of("type", "keyword"), + "rt-field-B", + Map.of("script", "some script"), + "rt-field-C", + Map.of("script", "some other script") + ); sourceConfig = new SourceConfig( generateRandomStringArray(10, 10, false, false), QueryConfigTests.randomQueryConfig(), @@ -117,7 +117,7 @@ public void testGetWarnings_Pivot_CouldNotFindAnyOptimization() { AggregationConfigTests.randomAggregationConfig(), null ); - Function function = new Pivot(pivotConfig, new SettingsConfig(), TransformConfigVersion.CURRENT, Collections.emptySet()); + Function function = new Pivot(pivotConfig, SettingsConfig.EMPTY, TransformConfigVersion.CURRENT, Collections.emptySet()); SourceConfig sourceConfig = SourceConfigTests.randomSourceConfig(); SyncConfig syncConfig = TimeSyncConfigTests.randomTimeSyncConfig(); assertThat( diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/persistence/TransformIndexTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/persistence/TransformIndexTests.java index 9db4ba1fc73b6..87b65978f667e 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/persistence/TransformIndexTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/persistence/TransformIndexTests.java @@ -45,7 +45,7 @@ import java.util.concurrent.TimeUnit; import static java.util.Collections.emptyMap; -import static java.util.Collections.singletonMap; +import static java.util.Map.entry; import static org.elasticsearch.common.xcontent.support.XContentMapValues.extractValue; import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.contains; @@ -220,68 +220,45 @@ public void testSetUpDestinationAliases() { public void testCreateMappingsFromStringMap() { assertThat(TransformIndex.createMappingsFromStringMap(emptyMap()), is(anEmptyMap())); + assertThat(TransformIndex.createMappingsFromStringMap(Map.of("a", "long")), equalTo(Map.of("a", Map.of("type", "long")))); assertThat( - TransformIndex.createMappingsFromStringMap(singletonMap("a", "long")), - is(equalTo(singletonMap("a", singletonMap("type", "long")))) + TransformIndex.createMappingsFromStringMap(Map.of("a", "long", "b", "keyword")), + equalTo(Map.of("a", Map.of("type", "long"), "b", Map.of("type", "keyword"))) + ); + assertThat( + TransformIndex.createMappingsFromStringMap(Map.of("a", "long", "a.b", "keyword")), + equalTo(Map.of("a", Map.of("type", "long"), "a.b", Map.of("type", "keyword"))) + ); + assertThat( + TransformIndex.createMappingsFromStringMap(Map.of("a", "long", "a.b", "text", "a.b.c", "keyword")), + equalTo(Map.of("a", Map.of("type", "long"), "a.b", Map.of("type", "text"), "a.b.c", Map.of("type", "keyword"))) + ); + assertThat( + TransformIndex.createMappingsFromStringMap( + Map.ofEntries( + entry("a", "object"), + entry("a.b", "long"), + entry("c", "nested"), + entry("c.d", "boolean"), + entry("f", "object"), + entry("f.g", "object"), + entry("f.g.h", "text"), + entry("f.g.h.i", "text") + ) + ), + equalTo( + Map.ofEntries( + entry("a", Map.of("type", "object")), + entry("a.b", Map.of("type", "long")), + entry("c", Map.of("type", "nested")), + entry("c.d", Map.of("type", "boolean")), + entry("f", Map.of("type", "object")), + entry("f.g", Map.of("type", "object")), + entry("f.g.h", Map.of("type", "text")), + entry("f.g.h.i", Map.of("type", "text")) + ) + ) ); - assertThat(TransformIndex.createMappingsFromStringMap(new HashMap<>() { - { - put("a", "long"); - put("b", "keyword"); - } - }), is(equalTo(new HashMap<>() { - { - put("a", singletonMap("type", "long")); - put("b", singletonMap("type", "keyword")); - } - }))); - assertThat(TransformIndex.createMappingsFromStringMap(new HashMap<>() { - { - put("a", "long"); - put("a.b", "keyword"); - } - }), is(equalTo(new HashMap<>() { - { - put("a", singletonMap("type", "long")); - put("a.b", singletonMap("type", "keyword")); - } - }))); - assertThat(TransformIndex.createMappingsFromStringMap(new HashMap<>() { - { - put("a", "long"); - put("a.b", "text"); - put("a.b.c", "keyword"); - } - }), is(equalTo(new HashMap<>() { - { - put("a", singletonMap("type", "long")); - put("a.b", singletonMap("type", "text")); - put("a.b.c", singletonMap("type", "keyword")); - } - }))); - assertThat(TransformIndex.createMappingsFromStringMap(new HashMap<>() { - { - put("a", "object"); - put("a.b", "long"); - put("c", "nested"); - put("c.d", "boolean"); - put("f", "object"); - put("f.g", "object"); - put("f.g.h", "text"); - put("f.g.h.i", "text"); - } - }), is(equalTo(new HashMap<>() { - { - put("a", singletonMap("type", "object")); - put("a.b", singletonMap("type", "long")); - put("c", singletonMap("type", "nested")); - put("c.d", singletonMap("type", "boolean")); - put("f", singletonMap("type", "object")); - put("f.g", singletonMap("type", "object")); - put("f.g.h", singletonMap("type", "text")); - put("f.g.h.i", singletonMap("type", "text")); - } - }))); } @SuppressWarnings("unchecked") diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java index 43a8f35cfeafe..017fe3d289b0c 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java @@ -47,6 +47,7 @@ import org.elasticsearch.xpack.core.transform.transforms.TransformCheckpoint; import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; import org.elasticsearch.xpack.core.transform.transforms.TransformConfigTests; +import org.elasticsearch.xpack.core.transform.transforms.TransformEffectiveSettings; import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerPosition; import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerStats; import org.elasticsearch.xpack.core.transform.transforms.TransformProgress; @@ -309,7 +310,7 @@ public void testDisablePit() throws InterruptedException { } TransformConfig config = configBuilder.build(); - boolean pitEnabled = config.getSettings().getUsePit() == null || config.getSettings().getUsePit(); + boolean pitEnabled = TransformEffectiveSettings.isPitDisabled(config.getSettings()) == false; try (var threadPool = createThreadPool()) { final var client = new PitMockClient(threadPool, true); diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutorTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutorTests.java index b32ec235fcc6f..b927a248faf31 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutorTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutorTests.java @@ -166,7 +166,7 @@ public void testNodeAssignmentProblems() { equalTo( "Not starting transform [new-task-id], reasons [" + "current-data-node-with-0-tasks-transform-remote-disabled:" - + "transform requires a remote connection but remote is disabled" + + "transform requires a remote connection but the node does not have the remote_cluster_client role" + "]" ) ); @@ -195,7 +195,7 @@ public void testNodeAssignmentProblems() { equalTo( "Not starting transform [new-task-id], reasons [" + "current-data-node-with-0-tasks-transform-remote-disabled:" - + "transform requires a remote connection but remote is disabled" + + "transform requires a remote connection but the node does not have the remote_cluster_client role" + "|" + "current-data-node-with-transform-disabled:not a transform node" + "]" diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/common/DocumentConversionUtilsTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/common/DocumentConversionUtilsTests.java index c6f2a33240471..b4d38ab517bb7 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/common/DocumentConversionUtilsTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/common/DocumentConversionUtilsTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.test.ESTestCase; import java.util.Collections; -import java.util.HashMap; import java.util.Map; import static java.util.Map.entry; @@ -87,21 +86,16 @@ public void testRemoveInternalFields() { } public void testExtractFieldMappings() { - FieldCapabilitiesResponse response = new FieldCapabilitiesResponse(new String[] { "some-index" }, new HashMap<>() { - { - put("field-1", new HashMap<>() { - { - put("keyword", createFieldCapabilities("field-1", "keyword")); - } - }); - put("field-2", new HashMap<>() { - { - put("long", createFieldCapabilities("field-2", "long")); - put("keyword", createFieldCapabilities("field-2", "keyword")); - } - }); - } - }); + FieldCapabilitiesResponse response = new FieldCapabilitiesResponse( + new String[] { "some-index" }, + Map.ofEntries( + entry("field-1", Map.of("keyword", createFieldCapabilities("field-1", "keyword"))), + entry( + "field-2", + Map.of("long", createFieldCapabilities("field-2", "long"), "keyword", createFieldCapabilities("field-2", "keyword")) + ) + ) + ); assertThat( DocumentConversionUtils.extractFieldMappings(response), diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationSchemaAndResultTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationSchemaAndResultTests.java index 5943a9007fb7c..1eb86b813f260 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationSchemaAndResultTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationSchemaAndResultTests.java @@ -153,7 +153,7 @@ public void testBasic() throws InterruptedException { client, emptyMap(), "my-transform", - new SettingsConfig(), + SettingsConfig.EMPTY, pivotConfig, new SourceConfig(new String[] { "source-index" }), listener @@ -233,7 +233,7 @@ public void testNested() throws InterruptedException { client, emptyMap(), "my-transform", - new SettingsConfig(), + SettingsConfig.EMPTY, pivotConfig, new SourceConfig(new String[] { "source-index" }), listener diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/PivotTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/PivotTests.java index 5d58ac9904482..0a030d26016f7 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/PivotTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/PivotTests.java @@ -125,14 +125,14 @@ protected NamedXContentRegistry xContentRegistry() { public void testValidateExistingIndex() throws Exception { SourceConfig source = new SourceConfig("existing_source_index"); - Function pivot = new Pivot(getValidPivotConfig(), new SettingsConfig(), TransformConfigVersion.CURRENT, Collections.emptySet()); + Function pivot = new Pivot(getValidPivotConfig(), SettingsConfig.EMPTY, TransformConfigVersion.CURRENT, Collections.emptySet()); assertValidTransform(client, source, pivot); } public void testValidateNonExistingIndex() throws Exception { SourceConfig source = new SourceConfig("non_existing_source_index"); - Function pivot = new Pivot(getValidPivotConfig(), new SettingsConfig(), TransformConfigVersion.CURRENT, Collections.emptySet()); + Function pivot = new Pivot(getValidPivotConfig(), SettingsConfig.EMPTY, TransformConfigVersion.CURRENT, Collections.emptySet()); assertInvalidTransform(client, source, pivot); } @@ -142,7 +142,7 @@ public void testInitialPageSize() throws Exception { Function pivot = new Pivot( new PivotConfig(GroupConfigTests.randomGroupConfig(), getValidAggregationConfig(), expectedPageSize), - new SettingsConfig(), + SettingsConfig.EMPTY, TransformConfigVersion.CURRENT, Collections.emptySet() ); @@ -150,7 +150,7 @@ public void testInitialPageSize() throws Exception { pivot = new Pivot( new PivotConfig(GroupConfigTests.randomGroupConfig(), getValidAggregationConfig(), null), - new SettingsConfig(), + SettingsConfig.EMPTY, TransformConfigVersion.CURRENT, Collections.emptySet() ); @@ -164,7 +164,7 @@ public void testSearchFailure() throws Exception { // search has failures although they might just be temporary SourceConfig source = new SourceConfig("existing_source_index_with_failing_shards"); - Function pivot = new Pivot(getValidPivotConfig(), new SettingsConfig(), TransformConfigVersion.CURRENT, Collections.emptySet()); + Function pivot = new Pivot(getValidPivotConfig(), SettingsConfig.EMPTY, TransformConfigVersion.CURRENT, Collections.emptySet()); assertInvalidTransform(client, source, pivot); } @@ -177,7 +177,7 @@ public void testValidateAllSupportedAggregations() throws Exception { Function pivot = new Pivot( getValidPivotConfig(aggregationConfig), - new SettingsConfig(), + SettingsConfig.EMPTY, TransformConfigVersion.CURRENT, Collections.emptySet() ); @@ -191,7 +191,7 @@ public void testValidateAllUnsupportedAggregations() throws Exception { Function pivot = new Pivot( getValidPivotConfig(aggregationConfig), - new SettingsConfig(), + SettingsConfig.EMPTY, TransformConfigVersion.CURRENT, Collections.emptySet() ); @@ -233,7 +233,7 @@ public void testGetPerformanceCriticalFields() throws IOException { assertThat(groupConfig.validate(null), is(nullValue())); PivotConfig pivotConfig = new PivotConfig(groupConfig, AggregationConfigTests.randomAggregationConfig(), null); - Function pivot = new Pivot(pivotConfig, new SettingsConfig(), TransformConfigVersion.CURRENT, Collections.emptySet()); + Function pivot = new Pivot(pivotConfig, SettingsConfig.EMPTY, TransformConfigVersion.CURRENT, Collections.emptySet()); assertThat(pivot.getPerformanceCriticalFields(), contains("field-A", "field-B", "field-C")); } diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/SchemaUtilTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/SchemaUtilTests.java index f6846bc065976..212942a09e40e 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/SchemaUtilTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/SchemaUtilTests.java @@ -46,9 +46,11 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static java.util.Collections.singletonMap; +import static org.elasticsearch.test.MapMatcher.matchesMap; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.Matchers.aMapWithSize; import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.is; @@ -56,29 +58,26 @@ public class SchemaUtilTests extends ESTestCase { public void testInsertNestedObjectMappings() { - Map fieldMappings = new HashMap<>() { - { - // creates: a.b, a - put("a.b.c", "long"); - put("a.b.d", "double"); - // creates: c.b, c - put("c.b.a", "double"); - // creates: c.d - put("c.d.e", "object"); - put("d", "long"); - put("e.f.g", "long"); - // cc: already there - put("e.f", "object"); - // cc: already there but different type (should not be possible) - put("e", "long"); - // cc: start with . (should not be possible) - put(".x", "long"); - // cc: start and ends with . (should not be possible), creates: .y - put(".y.", "long"); - // cc: ends with . (should not be possible), creates: .z - put(".z.", "long"); - } - }; + Map fieldMappings = new HashMap<>(); + // creates: a.b, a + fieldMappings.put("a.b.c", "long"); + fieldMappings.put("a.b.d", "double"); + // creates: c.b, c + fieldMappings.put("c.b.a", "double"); + // creates: c.d + fieldMappings.put("c.d.e", "object"); + fieldMappings.put("d", "long"); + fieldMappings.put("e.f.g", "long"); + // cc: already there + fieldMappings.put("e.f", "object"); + // cc: already there but different type (should not be possible) + fieldMappings.put("e", "long"); + // cc: start with . (should not be possible) + fieldMappings.put(".x", "long"); + // cc: start and ends with . (should not be possible), creates: .y + fieldMappings.put(".y.", "long"); + // cc: ends with . (should not be possible), creates: .z + fieldMappings.put(".z.", "long"); SchemaUtil.insertNestedObjectMappings(fieldMappings); @@ -122,10 +121,7 @@ public void testGetSourceFieldMappings() throws InterruptedException { null, listener ), - mappings -> { - assertNotNull(mappings); - assertTrue(mappings.isEmpty()); - } + mappings -> assertThat(mappings, anEmptyMap()) ); // fields is empty @@ -137,10 +133,7 @@ public void testGetSourceFieldMappings() throws InterruptedException { new String[] {}, listener ), - mappings -> { - assertNotNull(mappings); - assertTrue(mappings.isEmpty()); - } + mappings -> assertThat(mappings, anEmptyMap()) ); // good use @@ -152,23 +145,13 @@ public void testGetSourceFieldMappings() throws InterruptedException { new String[] { "field-1", "field-2" }, listener ), - mappings -> { - assertNotNull(mappings); - assertEquals(2, mappings.size()); - assertEquals("long", mappings.get("field-1")); - assertEquals("long", mappings.get("field-2")); - } + mappings -> assertThat(mappings, matchesMap(Map.of("field-1", "long", "field-2", "long"))) ); } } public void testGetSourceFieldMappingsWithRuntimeMappings() throws InterruptedException { - Map runtimeMappings = new HashMap<>() { - { - put("field-2", singletonMap("type", "keyword")); - put("field-3", singletonMap("type", "boolean")); - } - }; + Map runtimeMappings = Map.of("field-2", Map.of("type", "keyword"), "field-3", Map.of("type", "boolean")); try (var threadPool = createThreadPool()) { final var client = new FieldCapsMockClient(threadPool, emptySet()); this.>assertAsync( diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java index ccd9023f745bb..e5f4091ca89eb 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java @@ -15,6 +15,8 @@ import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; import org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule; +import java.util.Arrays; + import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; @@ -24,6 +26,7 @@ import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.templateRequest; import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; public class RejectedExecutionTests extends AbstractWatcherIntegrationTestCase { @@ -39,6 +42,7 @@ public void testHistoryOnRejection() throws Exception { prepareIndex("idx").setSource("field", "a").get(); refresh(); WatcherSearchTemplateRequest request = templateRequest(searchSource().query(termQuery("field", "a")), "idx"); + // The following watch will get rejected because we have configured the watcher thread pool queue size to be 0: new PutWatchRequestBuilder(client()).setId(randomAlphaOfLength(5)) .setSource( watchBuilder().trigger(schedule(interval(1, IntervalSchedule.Interval.Unit.SECONDS))) @@ -47,25 +51,31 @@ public void testHistoryOnRejection() throws Exception { .addAction("_logger", loggingAction("_logging").setCategory("_category")) ) .get(); - + // Now we make sure that we get a watcher history record for the failed watch (it is written on a different thread pool) assertBusy(() -> { flushAndRefresh(".watcher-history-*"); - assertResponse( - prepareSearch(".watcher-history-*"), - searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, greaterThanOrEqualTo(2L)) - ); + assertResponse(prepareSearch(".watcher-history-*"), searchResponse -> { + assertThat("Watcher history not found", searchResponse.getHits().getTotalHits().value, greaterThanOrEqualTo(2L)); + assertThat( + "Did not find watcher history for rejected watch", + Arrays.stream(searchResponse.getHits().getHits()) + .anyMatch( + hit -> hit.getSourceAsMap() != null + && hit.getSourceAsMap().get("messages") != null + && hit.getSourceAsMap().get("messages").toString().contains("due to thread pool capacity") + ), + equalTo(true) + ); + }); }); } @Override protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { - return Settings.builder() .put(super.nodeSettings(nodeOrdinal, otherSettings)) .put(XPackSettings.SECURITY_ENABLED.getKey(), false) .put(LicenseSettings.SELF_GENERATED_LICENSE_TYPE.getKey(), "trial") - .put("thread_pool.write.size", 1) - .put("thread_pool.write.queue_size", 1) .put("xpack.watcher.thread_pool.size", 1) .put("xpack.watcher.thread_pool.queue_size", 0) .build(); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java index 571e8912b43b2..f6e34ccb243c8 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java @@ -166,7 +166,9 @@ public void clusterChanged(ClusterChangedEvent event) { if (watcherService.validate(event.state())) { previousShardRoutings.set(localAffectedShardRoutings); if (state.get() == WatcherState.STARTED) { - watcherService.reload(event.state(), "new local watcher shard allocation ids"); + watcherService.reload(event.state(), "new local watcher shard allocation ids", (exception) -> { + clearAllocationIds(); // will cause reload again + }); } else if (isStoppedOrStopping) { this.state.set(WatcherState.STARTING); watcherService.start(event.state(), () -> this.state.set(WatcherState.STARTED), (exception) -> { diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java index a067b99c6bff0..5389f34212270 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java @@ -201,7 +201,7 @@ void stopExecutor() { * Reload the watcher service, does not switch the state from stopped to started, just keep going * @param state cluster state, which is needed to find out about local shards */ - void reload(ClusterState state, String reason) { + void reload(ClusterState state, String reason, Consumer exceptionConsumer) { boolean hasValidWatcherTemplates = WatcherIndexTemplateRegistry.validate(state); if (hasValidWatcherTemplates == false) { logger.warn("missing watcher index templates"); @@ -221,7 +221,10 @@ void reload(ClusterState state, String reason) { int cancelledTaskCount = executionService.clearExecutionsAndQueue(() -> {}); logger.info("reloading watcher, reason [{}], cancelled [{}] queued tasks", reason, cancelledTaskCount); - executor.execute(wrapWatcherService(() -> reloadInner(state, reason, false), e -> logger.error("error reloading watcher", e))); + executor.execute(wrapWatcherService(() -> reloadInner(state, reason, false), e -> { + logger.error("error reloading watcher", e); + exceptionConsumer.accept(e); + })); } /** diff --git a/x-pack/plugin/watcher/src/main/resources/org/elasticsearch/xpack/watcher/painless_whitelist.txt b/x-pack/plugin/watcher/src/main/resources/org/elasticsearch/xpack/watcher/painless_whitelist.txt index 89e313875c18e..2dc9b41bbba23 100644 --- a/x-pack/plugin/watcher/src/main/resources/org/elasticsearch/xpack/watcher/painless_whitelist.txt +++ b/x-pack/plugin/watcher/src/main/resources/org/elasticsearch/xpack/watcher/painless_whitelist.txt @@ -8,6 +8,7 @@ class java.lang.String { String org.elasticsearch.painless.api.Augmentation sha1() String org.elasticsearch.painless.api.Augmentation sha256() + String org.elasticsearch.painless.api.Augmentation sha512() } class org.elasticsearch.painless.api.Json { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java index 57ec168728171..365b072a418ef 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java @@ -258,6 +258,91 @@ public void testExceptionOnStart() { assertThat(lifeCycleService.getState().get(), equalTo(WatcherState.STARTED)); } + public void testReloadWithIdenticalRoutingTable() { + /* + * This tests that the identical routing table causes reload only once. + */ + startWatcher(); + + ClusterChangedEvent[] events = masterChangeScenario(); + assertThat(events[1].previousState(), equalTo(events[0].state())); + assertFalse(events[1].routingTableChanged()); + + for (ClusterChangedEvent event : events) { + when(watcherService.validate(event.state())).thenReturn(true); + lifeCycleService.clusterChanged(event); + } + // reload should occur on the first event + verify(watcherService).reload(eq(events[0].state()), anyString(), any()); + // but it shouldn't on the second event unless routing table changes + verify(watcherService, never()).reload(eq(events[1].state()), anyString(), any()); + } + + public void testReloadWithIdenticalRoutingTableAfterException() { + /* + * This tests that even the identical routing table causes reload again if some exception (for example a timeout while loading + * watches) interrupted the previous one. + */ + startWatcher(); + + ClusterChangedEvent[] events = masterChangeScenario(); + assertThat(events[1].previousState(), equalTo(events[0].state())); + assertFalse(events[1].routingTableChanged()); + + // simulate exception on the first event + doAnswer(invocation -> { + Consumer exceptionConsumer = invocation.getArgument(2); + exceptionConsumer.accept(new ElasticsearchTimeoutException(new TimeoutException("Artificial timeout"))); + return null; + }).when(watcherService).reload(eq(events[0].state()), anyString(), any()); + + for (ClusterChangedEvent event : events) { + when(watcherService.validate(event.state())).thenReturn(true); + lifeCycleService.clusterChanged(event); + } + // reload should occur on the first event but it fails + verify(watcherService).reload(eq(events[0].state()), anyString(), any()); + // reload should occur again on the second event because the previous one failed + verify(watcherService).reload(eq(events[1].state()), anyString(), any()); + } + + private ClusterChangedEvent[] masterChangeScenario() { + DiscoveryNodes nodes = new DiscoveryNodes.Builder().localNodeId("node_1").add(newNode("node_1")).add(newNode("node_2")).build(); + + Index index = new Index(Watch.INDEX, "uuid"); + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index); + indexRoutingTableBuilder.addShard( + TestShardRouting.newShardRouting(new ShardId(index, 0), "node_1", true, ShardRoutingState.STARTED) + ); + RoutingTable routingTable = RoutingTable.builder().add(indexRoutingTableBuilder.build()).build(); + + IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(Watch.INDEX) + .settings(settings(IndexVersion.current()).put(IndexMetadata.INDEX_FORMAT_SETTING.getKey(), 6)) // the internal index format, + // required + .numberOfShards(1) + .numberOfReplicas(0); + Metadata metadata = Metadata.builder() + .put(IndexTemplateMetadata.builder(HISTORY_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(indexMetadataBuilder) + .build(); + + ClusterState emptyState = ClusterState.builder(new ClusterName("my-cluster")).nodes(nodes).metadata(metadata).build(); + ClusterState stateWithMasterNode1 = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(nodes.withMasterNodeId("node_1")) + .metadata(metadata) + .routingTable(routingTable) + .build(); + ClusterState stateWithMasterNode2 = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(nodes.withMasterNodeId("node_2")) + .metadata(metadata) + .routingTable(routingTable) + .build(); + + return new ClusterChangedEvent[] { + new ClusterChangedEvent("any", stateWithMasterNode1, emptyState), + new ClusterChangedEvent("any", stateWithMasterNode2, stateWithMasterNode1) }; + } + public void testNoLocalShards() { Index watchIndex = new Index(Watch.INDEX, "foo"); ShardId shardId = new ShardId(watchIndex, 0); @@ -301,7 +386,7 @@ public void testNoLocalShards() { when(watcherService.validate(eq(clusterStateWithLocalShards))).thenReturn(true); when(watcherService.validate(eq(clusterStateWithoutLocalShards))).thenReturn(false); lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterStateWithLocalShards, clusterStateWithoutLocalShards)); - verify(watcherService, times(1)).reload(eq(clusterStateWithLocalShards), eq("new local watcher shard allocation ids")); + verify(watcherService, times(1)).reload(eq(clusterStateWithLocalShards), eq("new local watcher shard allocation ids"), any()); verify(watcherService, times(1)).validate(eq(clusterStateWithLocalShards)); verifyNoMoreInteractions(watcherService); @@ -380,12 +465,12 @@ public void testReplicaWasAddedOrRemoved() { when(watcherService.validate(eq(firstEvent.state()))).thenReturn(true); lifeCycleService.clusterChanged(firstEvent); - verify(watcherService).reload(eq(firstEvent.state()), anyString()); + verify(watcherService).reload(eq(firstEvent.state()), anyString(), any()); reset(watcherService); when(watcherService.validate(eq(secondEvent.state()))).thenReturn(true); lifeCycleService.clusterChanged(secondEvent); - verify(watcherService).reload(eq(secondEvent.state()), anyString()); + verify(watcherService).reload(eq(secondEvent.state()), anyString(), any()); } // make sure that cluster state changes can be processed on nodes that do not hold data @@ -425,7 +510,7 @@ public void testNonDataNode() { lifeCycleService.clusterChanged(new ClusterChangedEvent("any", currentState, previousState)); verify(watcherService, times(0)).pauseExecution(any()); - verify(watcherService, times(0)).reload(any(), any()); + verify(watcherService, times(0)).reload(any(), any(), any()); } public void testThatMissingWatcherIndexMetadataOnlyResetsOnce() { @@ -452,7 +537,7 @@ public void testThatMissingWatcherIndexMetadataOnlyResetsOnce() { // first add the shard allocation ids, by going from empty cs to CS with watcher index lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterStateWithWatcherIndex, clusterStateWithoutWatcherIndex)); - verify(watcherService).reload(eq(clusterStateWithWatcherIndex), anyString()); + verify(watcherService).reload(eq(clusterStateWithWatcherIndex), anyString(), any()); // now remove watches index, and ensure that pausing is only called once, no matter how often called (i.e. each CS update) lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterStateWithoutWatcherIndex, clusterStateWithWatcherIndex)); @@ -577,7 +662,7 @@ public void testWatcherReloadsOnNodeOutageWithWatcherShard() { when(watcherService.validate(any())).thenReturn(true); ClusterChangedEvent event = new ClusterChangedEvent("whatever", currentState, previousState); lifeCycleService.clusterChanged(event); - verify(watcherService).reload(eq(event.state()), anyString()); + verify(watcherService).reload(eq(event.state()), anyString(), any()); } private void startWatcher() { @@ -609,7 +694,7 @@ private void startWatcher() { lifeCycleService.clusterChanged(new ClusterChangedEvent("foo", state, emptyState)); assertThat(lifeCycleService.getState().get(), is(WatcherState.STARTED)); - verify(watcherService, times(1)).reload(eq(state), anyString()); + verify(watcherService, times(1)).reload(eq(state), anyString(), any()); assertThat(lifeCycleService.shardRoutings(), hasSize(1)); // reset the mock, the user has to mock everything themselves again diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java index 64bf5b5d99fdb..bee2d6aa22355 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.SlowLogFieldProvider; import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.engine.InternalEngineFactory; import org.elasticsearch.indices.SystemIndexDescriptor; @@ -66,7 +67,8 @@ public void testWatcherDisabledTests() throws Exception { Collections.emptyMap(), () -> true, TestIndexNameExpressionResolver.newInstance(), - Collections.emptyMap() + Collections.emptyMap(), + mock(SlowLogFieldProvider.class) ); // this will trip an assertion if the watcher indexing operation listener is null (which it is) but we try to add it watcher.onIndexModule(indexModule); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java index 19bac967c576a..24a4eede1b20d 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java @@ -77,6 +77,7 @@ import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -349,12 +350,38 @@ void stopExecutor() {} ClusterState.Builder csBuilder = new ClusterState.Builder(new ClusterName("_name")); csBuilder.metadata(Metadata.builder()); - service.reload(csBuilder.build(), "whatever"); + service.reload(csBuilder.build(), "whatever", exception -> {}); verify(executionService).clearExecutionsAndQueue(any()); verify(executionService, never()).pause(any()); verify(triggerService).pauseExecution(); } + // the trigger service should not start unless watches are loaded successfully + public void testReloadingWatcherDoesNotStartTriggerServiceIfFailingToLoadWatches() { + ExecutionService executionService = mock(ExecutionService.class); + TriggerService triggerService = mock(TriggerService.class); + WatcherService service = new WatcherService( + Settings.EMPTY, + triggerService, + mock(TriggeredWatchStore.class), + executionService, + mock(WatchParser.class), + client, + EsExecutors.DIRECT_EXECUTOR_SERVICE + ) { + @Override + void stopExecutor() {} + }; + + ClusterState.Builder csBuilder = new ClusterState.Builder(new ClusterName("_name")); + Metadata metadata = spy(Metadata.builder().build()); + when(metadata.getIndicesLookup()).thenThrow(RuntimeException.class); // simulate exception in WatcherService's private loadWatches() + + service.reload(csBuilder.metadata(metadata).build(), "whatever", exception -> {}); + verify(triggerService).pauseExecution(); + verify(triggerService, never()).start(any()); + } + private static DiscoveryNode newNode() { return DiscoveryNodeUtils.create("node"); } diff --git a/x-pack/plugin/write-load-forecaster/src/main/java/org/elasticsearch/xpack/writeloadforecaster/LicensedWriteLoadForecaster.java b/x-pack/plugin/write-load-forecaster/src/main/java/org/elasticsearch/xpack/writeloadforecaster/LicensedWriteLoadForecaster.java index c1126df228cfe..d4a85ce859b2b 100644 --- a/x-pack/plugin/write-load-forecaster/src/main/java/org/elasticsearch/xpack/writeloadforecaster/LicensedWriteLoadForecaster.java +++ b/x-pack/plugin/write-load-forecaster/src/main/java/org/elasticsearch/xpack/writeloadforecaster/LicensedWriteLoadForecaster.java @@ -76,7 +76,13 @@ public Metadata.Builder withWriteLoadForecastForWriteIndex(String dataStreamName clearPreviousForecast(dataStream, metadata); - final List indicesWriteLoadWithinMaxAgeRange = getIndicesWithinMaxAgeRange(dataStream, metadata).stream() + final List indicesWriteLoadWithinMaxAgeRange = DataStream.getIndicesWithinMaxAgeRange( + dataStream, + metadata::getSafe, + maxIndexAge, + threadPool::absoluteTimeInMillis + ) + .stream() .filter(index -> index.equals(dataStream.getWriteIndex()) == false) .map(metadata::getSafe) .map(IndexMetadata::getStats) @@ -134,25 +140,6 @@ static OptionalDouble forecastIndexWriteLoad(List indicesWriteLo return totalShardUptime == 0 ? OptionalDouble.empty() : OptionalDouble.of(totalWeightedWriteLoad / totalShardUptime); } - // Visible for testing - List getIndicesWithinMaxAgeRange(DataStream dataStream, Metadata.Builder metadata) { - final List dataStreamIndices = dataStream.getIndices(); - final long currentTimeMillis = threadPool.absoluteTimeInMillis(); - // Consider at least 1 index (including the write index) for cases where rollovers happen less often than maxIndexAge - int firstIndexWithinAgeRange = Math.max(dataStreamIndices.size() - 2, 0); - for (int i = 0; i < dataStreamIndices.size(); i++) { - Index index = dataStreamIndices.get(i); - final IndexMetadata indexMetadata = metadata.getSafe(index); - final long indexAge = currentTimeMillis - indexMetadata.getCreationDate(); - if (indexAge < maxIndexAge.getMillis()) { - // We need to consider the previous index too in order to cover the entire max-index-age range. - firstIndexWithinAgeRange = i == 0 ? 0 : i - 1; - break; - } - } - return dataStreamIndices.subList(firstIndexWithinAgeRange, dataStreamIndices.size()); - } - @Override @SuppressForbidden(reason = "This is the only place where IndexMetadata#getForecastedWriteLoad is allowed to be used") public OptionalDouble getForecastedWriteLoad(IndexMetadata indexMetadata) { diff --git a/x-pack/plugin/write-load-forecaster/src/test/java/org/elasticsearch/xpack/writeloadforecaster/LicensedWriteLoadForecasterTests.java b/x-pack/plugin/write-load-forecaster/src/test/java/org/elasticsearch/xpack/writeloadforecaster/LicensedWriteLoadForecasterTests.java index 38e754c802983..c7efb27509ef7 100644 --- a/x-pack/plugin/write-load-forecaster/src/test/java/org/elasticsearch/xpack/writeloadforecaster/LicensedWriteLoadForecasterTests.java +++ b/x-pack/plugin/write-load-forecaster/src/test/java/org/elasticsearch/xpack/writeloadforecaster/LicensedWriteLoadForecasterTests.java @@ -287,65 +287,6 @@ public void testWriteLoadForecast() { } } - public void testGetIndicesWithinMaxAgeRange() { - final TimeValue maxIndexAge = TimeValue.timeValueDays(7); - final LicensedWriteLoadForecaster writeLoadForecaster = new LicensedWriteLoadForecaster(() -> true, threadPool, maxIndexAge); - - final Metadata.Builder metadataBuilder = Metadata.builder(); - final int numberOfBackingIndicesOlderThanMinAge = randomIntBetween(0, 10); - final int numberOfBackingIndicesWithinMinAnge = randomIntBetween(0, 10); - final int numberOfShards = 1; - final List backingIndices = new ArrayList<>(); - final String dataStreamName = "logs-es"; - final List backingIndicesOlderThanMinAge = new ArrayList<>(); - for (int i = 0; i < numberOfBackingIndicesOlderThanMinAge; i++) { - long creationDate = System.currentTimeMillis() - maxIndexAge.millis() * 2; - final IndexMetadata indexMetadata = createIndexMetadata( - DataStream.getDefaultBackingIndexName(dataStreamName, backingIndices.size(), creationDate), - numberOfShards, - randomIndexWriteLoad(numberOfShards), - creationDate - ); - backingIndices.add(indexMetadata.getIndex()); - backingIndicesOlderThanMinAge.add(indexMetadata.getIndex()); - metadataBuilder.put(indexMetadata, false); - } - - final List backingIndicesWithinMinAge = new ArrayList<>(); - for (int i = 0; i < numberOfBackingIndicesWithinMinAnge; i++) { - final long createdAt = System.currentTimeMillis() - (maxIndexAge.getMillis() / 2); - final IndexMetadata indexMetadata = createIndexMetadata( - DataStream.getDefaultBackingIndexName(dataStreamName, backingIndices.size(), createdAt), - numberOfShards, - randomIndexWriteLoad(numberOfShards), - createdAt - ); - backingIndices.add(indexMetadata.getIndex()); - backingIndicesWithinMinAge.add(indexMetadata.getIndex()); - metadataBuilder.put(indexMetadata, false); - } - - final String writeIndexName = DataStream.getDefaultBackingIndexName(dataStreamName, backingIndices.size()); - final IndexMetadata writeIndexMetadata = createIndexMetadata(writeIndexName, numberOfShards, null, System.currentTimeMillis()); - backingIndices.add(writeIndexMetadata.getIndex()); - metadataBuilder.put(writeIndexMetadata, false); - - final DataStream dataStream = createDataStream(dataStreamName, backingIndices); - - metadataBuilder.put(dataStream); - - final List indicesWithinMaxAgeRange = writeLoadForecaster.getIndicesWithinMaxAgeRange(dataStream, metadataBuilder); - - final List expectedIndicesWithinMaxAgeRange = new ArrayList<>(); - if (numberOfBackingIndicesOlderThanMinAge > 0) { - expectedIndicesWithinMaxAgeRange.add(backingIndicesOlderThanMinAge.get(backingIndicesOlderThanMinAge.size() - 1)); - } - expectedIndicesWithinMaxAgeRange.addAll(backingIndicesWithinMinAge); - expectedIndicesWithinMaxAgeRange.add(writeIndexMetadata.getIndex()); - - assertThat(indicesWithinMaxAgeRange, is(equalTo(expectedIndicesWithinMaxAgeRange))); - } - private IndexWriteLoad randomIndexWriteLoad(int numberOfShards) { IndexWriteLoad.Builder builder = IndexWriteLoad.builder(numberOfShards); for (int shardId = 0; shardId < numberOfShards; shardId++) { diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java index b3a0d91d583a9..484e2ed3ac9c3 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java @@ -10,7 +10,6 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import org.apache.http.util.EntityUtils; -import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; @@ -37,7 +36,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103808") public class MLModelDeploymentFullClusterRestartIT extends AbstractXpackFullClusterRestartTestCase { // See PyTorchModelIT for how this model was created diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/110_enrich.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/110_enrich.yml index e072e034aebf6..2bafa1142683a 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/110_enrich.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/110_enrich.yml @@ -3,11 +3,8 @@ - skip: version: " - 7.8.99" reason: "Privilege change of enrich stats is backported to 7.9.0" - features: node_selector - do: - node_selector: - version: "7.9.0 - " enrich.stats: {} - length: { coordinator_stats: 3 } diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/120_api_key.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/120_api_key.yml index 334a85625a328..8cb9b33a1d0fe 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/120_api_key.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/120_api_key.yml @@ -23,14 +23,35 @@ --- -"Create API key with metadata in a mixed cluster": +"Create API key with metadata in a mixed cluster (pre 7.13.0)": - skip: features: [headers, node_selector] + version: "7.13.0 - " + reason: "Support metadata on API keys introduced in 7.13.0" - do: node_selector: - version: " 7.13.0 - " + version: "current" + security.create_api_key: + body: > + { + "name": "my-mixed-api-key-2", + "metadata": {"foo": "bar"} + } + - match: { name: "my-mixed-api-key-2" } + - is_true: id + - is_true: api_key + +--- +"Create API key with metadata in a mixed cluster": + + - skip: + features: [headers] + version: " - 7.12.99" + reason: "Support metadata on API keys introduced in 7.13.0" + + - do: security.create_api_key: body: > { diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/140_user_profile.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/140_user_profile.yml index c667c9266b8d5..486f067310511 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/140_user_profile.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/140_user_profile.yml @@ -1,14 +1,14 @@ --- -"Test User Profile feature will work in a mixed cluster": +"Test User Profile feature will work in a mixed cluster (pre 8.5.0)": - skip: features: node_selector - version: " - 7.99.99" - reason: "https://github.com/elastic/elasticsearch/issues/86373" + version: " - 7.99.99, 8.5.0 - " + reason: "Does not work pre 8.0 (#86373) and response format changed after 8.5 (#89023)" - do: node_selector: - version: " 8.5.0 - " + version: "current" security.activate_user_profile: body: > { @@ -22,7 +22,35 @@ - do: node_selector: - version: " 8.5.0 - " + version: "current" + security.get_user_profile: + uid: "$profile_uid" + + - length: { profiles : 1 } + - set: { profiles.0 : profile } + - match: { $profile.uid : "$profile_uid" } + - match: { $profile.user.username : "test_user" } + +--- +"Test User Profile feature will work in a mixed cluster": + + - skip: + version: " - 8.4.99" + reason: "response format is changed to support multiple UIDs #89023" + + - do: + security.activate_user_profile: + body: > + { + "grant_type": "password", + "username": "test_user", + "password" : "x-pack-test-password" + } + - is_true: uid + - match: { "user.username" : "test_user" } + - set: { uid: profile_uid } + + - do: security.get_user_profile: uid: "$profile_uid" diff --git a/x-pack/qa/saml-idp-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java b/x-pack/qa/saml-idp-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java index 6e6939084cdd3..e8caf004e043b 100644 --- a/x-pack/qa/saml-idp-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java +++ b/x-pack/qa/saml-idp-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java @@ -251,7 +251,7 @@ public void setupNativeUser() throws IOException { *

  • Uses that token to verify the user details
  • * */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103595") + // @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103595") public void testLoginUserWithSamlRoleMapping() throws Exception { final Tuple authTokens = loginViaSaml("shibboleth"); verifyElasticsearchAccessTokenForRoleMapping(authTokens.v1()); @@ -262,7 +262,7 @@ public void testLoginUserWithSamlRoleMapping() throws Exception { verifyElasticsearchAccessTokenInvalidated(accessToken); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103595") + // @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103595") public void testLoginUserWithAuthorizingRealm() throws Exception { final Tuple authTokens = loginViaSaml("shibboleth_native"); verifyElasticsearchAccessTokenForAuthorizingRealms(authTokens.v1()); @@ -273,7 +273,7 @@ public void testLoginUserWithAuthorizingRealm() throws Exception { verifyElasticsearchAccessTokenInvalidated(accessToken); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103595") + // @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103595") public void testLoginWithWrongRealmFails() throws Exception { final BasicHttpContext context = new BasicHttpContext(); try (CloseableHttpClient client = getHttpClient()) { diff --git a/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/enrollment/tool/CreateEnrollmentTokenToolTests.java b/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/enrollment/tool/CreateEnrollmentTokenToolTests.java index 27affc69f5fbc..e86e709a662c6 100644 --- a/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/enrollment/tool/CreateEnrollmentTokenToolTests.java +++ b/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/enrollment/tool/CreateEnrollmentTokenToolTests.java @@ -11,6 +11,7 @@ import com.google.common.jimfs.Configuration; import com.google.common.jimfs.Jimfs; +import com.unboundid.util.Base64; import org.elasticsearch.cli.Command; import org.elasticsearch.cli.CommandTestCase; @@ -57,6 +58,8 @@ @SuppressWarnings("unchecked") public class CreateEnrollmentTokenToolTests extends CommandTestCase { + private static final String KIBANA_API_KEY = "DR6CzXkBDf8amV_48yYX:x3YqU_rqQwm-ESrkExcnOg"; + private static final String NODE_API_KEY = "DR6CzXkBDf8amV_48yYX:4BhUk-mkFm-AwvRFg90KJ"; static FileSystem jimfs; String pathHomeParameter; @@ -126,15 +129,13 @@ public void setup() throws Exception { this.externalEnrollmentTokenGenerator = mock(ExternalEnrollmentTokenGenerator.class); EnrollmentToken kibanaToken = new EnrollmentToken( - "DR6CzXkBDf8amV_48yYX:x3YqU_rqQwm-ESrkExcnOg", + KIBANA_API_KEY, "ce480d53728605674fcfd8ffb51000d8a33bf32de7c7f1e26b4d428f8a91362d", - "8.0.0", Arrays.asList("[192.168.0.1:9201, 172.16.254.1:9202") ); EnrollmentToken nodeToken = new EnrollmentToken( - "DR6CzXkBDf8amV_48yYX:4BhUk-mkFm-AwvRFg90KJ", + NODE_API_KEY, "ce480d53728605674fcfd8ffb51000d8a33bf32de7c7f1e26b4d428f8a91362d", - "8.0.0", Arrays.asList("[192.168.0.1:9201, 172.16.254.1:9202") ); when(externalEnrollmentTokenGenerator.createKibanaEnrollmentToken(anyString(), any(SecureString.class), any(URL.class))).thenReturn( @@ -153,14 +154,15 @@ public static void closeJimfs() throws IOException { } } - public void testCreateToken() throws Exception { - String scope = randomBoolean() ? "node" : "kibana"; - String output = execute("--scope", scope); - if (scope.equals("kibana")) { - assertThat(output, containsString("1WXzQ4eVlYOngzWXFVX3JxUXdtLUVTcmtFeGNuT2cifQ==")); - } else { - assertThat(output, containsString("4YW1WXzQ4eVlYOjRCaFVrLW1rRm0tQXd2UkZnOTBLSiJ9")); - } + public void testCreateKibanaToken() throws Exception { + String kibanaToken = Base64.decodeToString(execute("--scope", "kibana").trim()); + assertThat(kibanaToken, containsString(KIBANA_API_KEY)); + + } + + public void testCreateNodeToken() throws Exception { + String nodeToken = Base64.decodeToString(execute("--scope", "node").trim()); + assertThat(nodeToken, containsString(NODE_API_KEY)); } public void testInvalidScope() throws Exception { @@ -189,7 +191,6 @@ public void testUserCanPassUrl() throws Exception { EnrollmentToken kibanaToken = new EnrollmentToken( "DR6CzXkBDf8amV_48yYX:x3YqU_rqQwm-ESrkExcnOg", "ce480d53728605674fcfd8ffb51000d8a33bf32de7c7f1e26b4d428f8a91362d", - "8.0.0", Arrays.asList("[192.168.0.1:9201, 172.16.254.1:9202") ); when( @@ -200,7 +201,7 @@ public void testUserCanPassUrl() throws Exception { ) ).thenReturn(kibanaToken); String output = execute("--scope", "kibana", "--url", "http://localhost:9204"); - assertThat(output, containsString("1WXzQ4eVlYOngzWXFVX3JxUXdtLUVTcmtFeGNuT2cifQ==")); + assertThat(Base64.decodeToString(output.trim()), containsString(KIBANA_API_KEY)); } @@ -227,9 +228,9 @@ public void testUnhealthyClusterWithForce() throws Exception { String scope = randomBoolean() ? "node" : "kibana"; String output = execute("--scope", scope); if (scope.equals("kibana")) { - assertThat(output, containsString("1WXzQ4eVlYOngzWXFVX3JxUXdtLUVTcmtFeGNuT2cifQ==")); + assertThat(Base64.decodeToString(output.trim()), containsString(KIBANA_API_KEY)); } else { - assertThat(output, containsString("4YW1WXzQ4eVlYOjRCaFVrLW1rRm0tQXd2UkZnOTBLSiJ9")); + assertThat(Base64.decodeToString(output.trim()), containsString(NODE_API_KEY)); } } diff --git a/x-pack/test/idp-fixture/build.gradle b/x-pack/test/idp-fixture/build.gradle index 3fd39dd9a18a8..2ef03bf7747cc 100644 --- a/x-pack/test/idp-fixture/build.gradle +++ b/x-pack/test/idp-fixture/build.gradle @@ -12,23 +12,22 @@ dependencies { api "junit:junit:${versions.junit}" } +tasks.withType(DockerBuildTask).configureEach { + noCache = BuildParams.isCi() + push = true //BuildParams.isCi() + getPlatforms().addAll( Architecture.values().collect{ it.dockerPlatform } ) +} + tasks.register("deployIdpFixtureDockerImages", DockerBuildTask) { dockerContext.fileValue(file("src/main/resources/idp")) baseImages = ["openjdk:11.0.16-jre"] - noCache = BuildParams.isCi() - tags = ["docker.elastic.co/elasticsearch-dev/idp-fixture:1.0"] - push = BuildParams.isCi() - getPlatforms().addAll( Architecture.values().collect{ it.dockerPlatform } ) + tags = ["docker.elastic.co/elasticsearch-dev/idp-fixture:1.1"] } - tasks.register("deployOpenLdapFixtureDockerImages", DockerBuildTask) { dockerContext.fileValue(file("src/main/resources/openldap")) baseImages = ["osixia/openldap:1.4.0"] - noCache = BuildParams.isCi() tags = ["docker.elastic.co/elasticsearch-dev/openldap-fixture:1.0"] - push = BuildParams.isCi() - getPlatforms().addAll( Architecture.values().collect{ it.dockerPlatform } ) } tasks.register("deployFixtureDockerImages") { diff --git a/x-pack/test/idp-fixture/src/main/java/org/elasticsearch/test/fixtures/idp/IdpTestContainer.java b/x-pack/test/idp-fixture/src/main/java/org/elasticsearch/test/fixtures/idp/IdpTestContainer.java index d76ca5741d8b3..32c8e693ef3f7 100644 --- a/x-pack/test/idp-fixture/src/main/java/org/elasticsearch/test/fixtures/idp/IdpTestContainer.java +++ b/x-pack/test/idp-fixture/src/main/java/org/elasticsearch/test/fixtures/idp/IdpTestContainer.java @@ -20,7 +20,7 @@ public final class IdpTestContainer extends DockerEnvironmentAwareTestContainer { - private static final String DOCKER_BASE_IMAGE = "docker.elastic.co/elasticsearch-dev/idp-fixture:1.0"; + private static final String DOCKER_BASE_IMAGE = "docker.elastic.co/elasticsearch-dev/idp-fixture:1.1"; private final TemporaryFolder temporaryFolder = new TemporaryFolder(); private Path certsPath; diff --git a/x-pack/test/idp-fixture/src/main/resources/idp/bin/run-jetty.sh b/x-pack/test/idp-fixture/src/main/resources/idp/bin/run-jetty.sh index 0160cc613407d..421deaa49d2ff 100644 --- a/x-pack/test/idp-fixture/src/main/resources/idp/bin/run-jetty.sh +++ b/x-pack/test/idp-fixture/src/main/resources/idp/bin/run-jetty.sh @@ -12,7 +12,7 @@ sed -i "s/^-Xmx.*$/-Xmx$JETTY_MAX_HEAP/g" /opt/shib-jetty-base/start.ini # For some reason, this container always immediately (in less than 1 second) exits with code 0 when starting for the first time # Even with a health check, docker-compose will immediately report the container as unhealthy when using --wait instead of waiting for it to become healthy -# So, let's just start it a second time if it exits quickly +# So, let's just start it a second time set +e start_time=$(date +%s) /opt/jetty-home/bin/jetty.sh run @@ -20,9 +20,13 @@ exit_code=$? end_time=$(date +%s) duration=$((end_time - start_time)) -if [ $duration -lt 10 ]; then - /opt/jetty-home/bin/jetty.sh run - exit_code=$? +echo "Duration for initial idp run was $duration seconds." + +if [ $duration -lt 60 ]; then + echo "Restarting idp." + + /opt/jetty-home/bin/jetty.sh run + exit_code=$? fi exit $exit_code